diff options
179 files changed, 24881 insertions, 1939 deletions
@@ -49,7 +49,10 @@ scripts/exp/udp_64B_vm7.pcap scripts/exp/udp_64B_vm8.pcap scripts/exp/udp_64B_vm9.pcap scripts/exp/stl_syn_attack.pcap - +scripts/exp/stl_vm_inc_size_64_128.pcap +scripts/exp/stl_vm_rand_size_512B_64_128.pcap +scripts/exp/stl_vm_rand_size_64_128.pcap +scripts/exp/stl_vm_random_size_64_128.pcap @@ -1,4 +1,6 @@ -v1.84 +v1.85 + + diff --git a/linux/ws_main.py b/linux/ws_main.py index 71914630..a41fab1e 100755 --- a/linux/ws_main.py +++ b/linux/ws_main.py @@ -257,6 +257,7 @@ bp =SrcGroups([ cxxflags_base =['-DWIN_UCODE_SIM', + '-DTREX_SIM', '-D_BYTE_ORDER', '-D_LITTLE_ENDIAN', '-DLINUX', diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py index 4c8d821d..604b1c1a 100755 --- a/linux_dpdk/ws_main.py +++ b/linux_dpdk/ws_main.py @@ -95,6 +95,7 @@ main_src = SrcGroup(dir='src', 'utl_term_io.cpp', 'global_io_mode.cpp', 'main_dpdk.cpp', + 'debug.cpp', 'bp_sim.cpp', 'latency.cpp', 'platform_cfg.cpp', diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py new file mode 100755 index 00000000..9c81a3a0 --- /dev/null +++ b/scripts/automation/regression/CPlatform.py @@ -0,0 +1,908 @@ +#!/router/bin/python + +from interfaces_e import IFType +from platform_cmd_link import * +import CustomLogger +import misc_methods +import re +import time +import CProgressDisp +from CShowParser import CShowParser + +class CPlatform(object): + def __init__(self, silent_mode): + self.if_mngr = CIfManager() + self.cmd_link = CCommandLink(silent_mode) + self.nat_config = None + self.stat_route_config = None + self.running_image = None + self.needed_image_path = None + self.tftp_cfg = None + self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False } + + def configure_basic_interfaces(self): + + cache = CCommandCache() + for dual_if in self.if_mngr.get_dual_if_list(): + client_if_command_set = [] + server_if_command_set = [] + + client_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.client_if.get_src_mac_addr()) ) + client_if_command_set.append ('mtu 4000') + client_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() )) + client_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() )) + + cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + + server_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.server_if.get_src_mac_addr()) ) + server_if_command_set.append ('mtu 4000') + server_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() )) + server_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() )) + + cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) + + self.cmd_link.run_single_command(cache) + self.config_history['basic_if_config'] = True + + + + def configure_basic_filtered_interfaces(self, intf_list): + + cache = CCommandCache() + for intf in intf_list: + if_command_set = [] + + if_command_set.append ('mac-address {mac}'.format( mac = intf.get_src_mac_addr()) ) + if_command_set.append ('mtu 4000') + if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() )) + if_command_set.append ('ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() )) + + cache.add('IF', if_command_set, intf.get_name()) + + self.cmd_link.run_single_command(cache) + + + def load_clean_config (self, config_filename = "clean_config.cfg", cfg_drive = "bootflash"): + self.clear_nat_translations() + + cache = CCommandCache() + cache.add('EXEC', "configure replace {drive}:{file} force".format(drive = cfg_drive, file = config_filename)) + self.cmd_link.run_single_command(cache) + + def config_pbr (self, mode = 'config'): + idx = 1 + unconfig_str = '' if mode=='config' else 'no ' + + cache = CCommandCache() + pre_commit_cache = CCommandCache() + pre_commit_set = set([]) + + for dual_if in self.if_mngr.get_dual_if_list(): + client_if_command_set = [] + server_if_command_set = [] + conf_t_command_set = [] + client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() ) + server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() ) + + if dual_if.is_duplicated(): + # define the relevant VRF name + pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + + # assign VRF to interfaces, config interfaces with relevant route-map + client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + + # config route-map routing + conf_t_command_set.append('{mode}route-map {dup}_{p1}_to_{p2} permit 10'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if mode == 'config': + conf_t_command_set.append('set ip next-hop {next_hop}'.format( + next_hop = client_net_next_hop) ) + conf_t_command_set.append('{mode}route-map {dup}_{p2}_to_{p1} permit 10'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if mode == 'config': + conf_t_command_set.append('set ip next-hop {next_hop}'.format( + next_hop = server_net_next_hop) ) + + # config global arp to interfaces net address and vrf + conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = server_net_next_hop, + dest_mac = dual_if.client_if.get_dest_mac())) + conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = client_net_next_hop, + dest_mac = dual_if.server_if.get_dest_mac())) + else: + # config interfaces with relevant route-map + client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format( + mode = unconfig_str, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format( + mode = unconfig_str, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + + # config route-map routing + conf_t_command_set.append('{mode}route-map {p1}_to_{p2} permit 10'.format( + mode = unconfig_str, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if mode == 'config': + conf_t_command_set.append('set ip next-hop {next_hop}'.format( + next_hop = client_net_next_hop) ) + conf_t_command_set.append('{mode}route-map {p2}_to_{p1} permit 10'.format( + mode = unconfig_str, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if mode == 'config': + conf_t_command_set.append('set ip next-hop {next_hop}'.format( + next_hop = server_net_next_hop) ) + + # config global arp to interfaces net address + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = server_net_next_hop, + dest_mac = dual_if.client_if.get_dest_mac())) + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = client_net_next_hop, + dest_mac = dual_if.server_if.get_dest_mac())) + + # assign generated config list to cache + cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) + cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + cache.add('CONF', conf_t_command_set) + idx += 2 + + # finish handling pre-config cache + pre_commit_set = list(pre_commit_set) +# pre_commit_set.append('exit') + pre_commit_cache.add('CONF', pre_commit_set ) + # deploy the configs (order is important!) + self.cmd_link.run_command( [pre_commit_cache, cache] ) + if self.config_history['basic_if_config']: + # in this case, duplicated interfaces will lose its ip address. + # re-config IPv4 addresses + self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() ) + + def config_no_pbr (self): + self.config_pbr(mode = 'unconfig') + + def config_static_routing (self, stat_route_obj, mode = 'config'): + + if mode == 'config': + self.stat_route_config = stat_route_obj # save the latest static route config for future removal purposes + + unconfig_str = '' if mode=='config' else 'no ' + cache = CCommandCache() + pre_commit_cache = CCommandCache() + pre_commit_set = set([]) + current_dup_intf = None + # client_net = None + # server_net = None + client_net = stat_route_obj.client_net_start + server_net = stat_route_obj.server_net_start + conf_t_command_set = [] + + for dual_if in self.if_mngr.get_dual_if_list(): + + # handle duplicated addressing generation + if dual_if.is_duplicated(): + if dual_if.get_vrf_name() != current_dup_intf: + # if this is a dual interfaces, and it is different from the one we proccessed so far, reset static route addressing + current_dup_intf = dual_if.get_vrf_name() + client_net = stat_route_obj.client_net_start + server_net = stat_route_obj.server_net_start + else: + if current_dup_intf is not None: + current_dup_intf = None + client_net = stat_route_obj.client_net_start + server_net = stat_route_obj.server_net_start + + client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() ) + server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() ) + + # handle static route configuration for the interfaces + if dual_if.is_duplicated(): + client_if_command_set = [] + server_if_command_set = [] + + # define the relevant VRF name + pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + + # assign VRF to interfaces, config interfaces with relevant route-map + client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) ) + + conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_net = client_net, + dest_mask = stat_route_obj.client_mask, + next_hop = client_net_next_hop)) + conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_net = server_net, + dest_mask = stat_route_obj.server_mask, + next_hop = server_net_next_hop)) + + # config global arp to interfaces net address and vrf + conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = server_net_next_hop, + dest_mac = dual_if.client_if.get_dest_mac())) + conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + dup = dual_if.get_vrf_name(), + next_hop = client_net_next_hop, + dest_mac = dual_if.server_if.get_dest_mac())) + + # assign generated interfaces config list to cache + cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) + cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + + else: + conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format( + mode = unconfig_str, + next_net = client_net, + dest_mask = stat_route_obj.client_mask, + next_hop = server_net_next_hop)) + conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format( + mode = unconfig_str, + next_net = server_net, + dest_mask = stat_route_obj.server_mask, + next_hop = client_net_next_hop)) + + # config global arp to interfaces net address + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = server_net_next_hop, + dest_mac = dual_if.client_if.get_dest_mac())) + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = client_net_next_hop, + dest_mac = dual_if.server_if.get_dest_mac())) + + # bump up to the next client network address + client_net = misc_methods.get_single_net_client_addr(client_net, stat_route_obj.net_increment) + server_net = misc_methods.get_single_net_client_addr(server_net, stat_route_obj.net_increment) + + + # finish handling pre-config cache + pre_commit_set = list(pre_commit_set) +# pre_commit_set.append('exit') + pre_commit_cache.add('CONF', pre_commit_set ) + # assign generated config list to cache + cache.add('CONF', conf_t_command_set) + # deploy the configs (order is important!) + self.cmd_link.run_command( [pre_commit_cache, cache] ) + if self.config_history['basic_if_config']: + # in this case, duplicated interfaces will lose its ip address. + # re-config IPv4 addresses + self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() ) + + + def config_no_static_routing (self, stat_route_obj = None): + + if stat_route_obj is None and self.stat_route_config is not None: + self.config_static_routing(self.stat_route_config, mode = 'unconfig') + self.stat_route_config = None # reverse current static route config back to None (no nat config is known to run). + elif stat_route_obj is not None: + self.config_static_routing(stat_route_obj, mode = 'unconfig') + else: + raise UserWarning('No static route configuration is available for removal.') + + def config_nbar_pd (self, mode = 'config'): + unconfig_str = '' if mode=='config' else 'no ' + cache = CCommandCache() + + for intf in self.if_mngr.get_if_list(if_type = IFType.Client): + cache.add('IF', "{mode}ip nbar protocol-discovery".format( mode = unconfig_str ), intf.get_name()) + + self.cmd_link.run_single_command( cache ) + + def config_no_nbar_pd (self): + self.config_nbar_pd (mode = 'unconfig') + + + def config_nat_verify (self, mode = 'config'): + + # toggle all duplicate interfaces + # dup_ifs = self.if_mngr.get_duplicated_if() + if mode=='config': + self.toggle_duplicated_intf(action = 'down') + # self.__toggle_interfaces(dup_ifs, action = 'down' ) + else: + # if we're in 'unconfig', toggle duplicated interfaces back up + self.toggle_duplicated_intf(action = 'up') + # self.__toggle_interfaces(dup_ifs) + + def config_no_nat_verify (self): + self.config_nat_verify(mode = 'unconfig') + + def config_nat (self, nat_obj, mode = 'config'): + + if mode == 'config': + self.nat_config = nat_obj # save the latest nat config for future removal purposes + + cache = CCommandCache() + conf_t_command_set = [] + client_net = nat_obj.clients_net_start + pool_net = nat_obj.nat_pool_start + unconfig_str = '' if mode=='config' else 'no ' + + # toggle all duplicate interfaces + # dup_ifs = self.if_mngr.get_duplicated_if() + if mode=='config': + self.toggle_duplicated_intf(action = 'down') + # self.__toggle_interfaces(dup_ifs, action = 'down' ) + else: + # if we're in 'unconfig', toggle duplicated interfaces back up + self.toggle_duplicated_intf(action = 'up') + # self.__toggle_interfaces(dup_ifs) + + for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False): + cache.add('IF', "{mode}ip nat inside".format( mode = unconfig_str ), dual_if.client_if.get_name()) + cache.add('IF', "{mode}ip nat outside".format( mode = unconfig_str ), dual_if.server_if.get_name()) + pool_id = dual_if.get_id() + 1 + + conf_t_command_set.append("{mode}ip nat pool pool{pool_num} {start_addr} {end_addr} netmask {mask}".format( + mode = unconfig_str, + pool_num = pool_id, + start_addr = pool_net, + end_addr = CNatConfig.calc_pool_end(pool_net, nat_obj.nat_netmask), + mask = nat_obj.nat_netmask)) + + conf_t_command_set.append("{mode}ip nat inside source list {num} pool pool{pool_num} overload".format( + mode = unconfig_str, + num = pool_id, + pool_num = pool_id )) + conf_t_command_set.append("{mode}access-list {num} permit {net_addr} {net_wildcard}".format( + mode = unconfig_str, + num = pool_id, + net_addr = client_net, + net_wildcard = nat_obj.client_acl_wildcard)) + + # bump up to the next client address + client_net = misc_methods.get_single_net_client_addr(client_net, nat_obj.net_increment) + pool_net = misc_methods.get_single_net_client_addr(pool_net, nat_obj.net_increment) + + + # assign generated config list to cache + cache.add('CONF', conf_t_command_set) + + # deploy the configs (order is important!) + self.cmd_link.run_single_command( cache ) + + + def config_no_nat (self, nat_obj = None): + # first, clear all nat translations + self.clear_nat_translations() + + # then, decompose the known config + if nat_obj is None and self.nat_config is not None: + self.config_nat(self.nat_config, mode = 'unconfig') + self.nat_config = None # reverse current NAT config back to None (no nat config is known to run). + elif nat_obj is not None: + self.config_nat(nat_obj, mode = 'unconfig') + else: + raise UserWarning('No NAT configuration is available for removal.') + + + def config_zbf (self, mode = 'config'): + cache = CCommandCache() + pre_commit_cache = CCommandCache() + conf_t_command_set = [] + + # toggle all duplicate interfaces down + self.toggle_duplicated_intf(action = 'down') + # dup_ifs = self.if_mngr.get_duplicated_if() + # self.__toggle_interfaces(dup_ifs, action = 'down' ) + + # define security zones and security service policy to be applied on the interfaces + conf_t_command_set.append('class-map type inspect match-any c1') + conf_t_command_set.append('match protocol tcp') + conf_t_command_set.append('match protocol udp') + conf_t_command_set.append('policy-map type inspect p1') + conf_t_command_set.append('class type inspect c1') + conf_t_command_set.append('inspect') + conf_t_command_set.append('class class-default') + conf_t_command_set.append('pass') + + conf_t_command_set.append('zone security z_in') + conf_t_command_set.append('zone security z_out') + + conf_t_command_set.append('zone-pair security in2out source z_in destination z_out') + conf_t_command_set.append('service-policy type inspect p1') + conf_t_command_set.append('zone-pair security out2in source z_out destination z_in') + conf_t_command_set.append('service-policy type inspect p1') + conf_t_command_set.append('exit') + + pre_commit_cache.add('CONF', conf_t_command_set) + + for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False): + cache.add('IF', "zone-member security z_in", dual_if.client_if.get_name() ) + cache.add('IF', "zone-member security z_out", dual_if.server_if.get_name() ) + + self.cmd_link.run_command( [pre_commit_cache, cache] ) + + def config_no_zbf (self): + cache = CCommandCache() + conf_t_command_set = [] + + # define security zones and security service policy to be applied on the interfaces + conf_t_command_set.append('no zone-pair security in2out source z_in destination z_out') + conf_t_command_set.append('no zone-pair security out2in source z_out destination z_in') + + conf_t_command_set.append('no policy-map type inspect p1') + conf_t_command_set.append('no class-map type inspect match-any c1') + + conf_t_command_set.append('no zone security z_in') + conf_t_command_set.append('no zone security z_out') + + cache.add('CONF', conf_t_command_set) + + for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False): + cache.add('IF', "no zone-member security z_in", dual_if.client_if.get_name() ) + cache.add('IF', "no zone-member security z_out", dual_if.server_if.get_name() ) + + self.cmd_link.run_command( [cache] ) + # toggle all duplicate interfaces back up + self.toggle_duplicated_intf(action = 'up') + # dup_ifs = self.if_mngr.get_duplicated_if() + # self.__toggle_interfaces(dup_ifs) + + + def config_ipv6_pbr (self, mode = 'config'): + idx = 1 + unconfig_str = '' if mode=='config' else 'no ' + cache = CCommandCache() + conf_t_command_set = [] + + conf_t_command_set.append('{mode}ipv6 unicast-routing'.format(mode = unconfig_str) ) + + for dual_if in self.if_mngr.get_dual_if_list(): + client_if_command_set = [] + server_if_command_set = [] + + client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) + server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) + + + client_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str)) + server_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str)) + + if dual_if.is_duplicated(): + prefix = 'ipv6_' + dual_if.get_vrf_name() + else: + prefix = 'ipv6' + + # config interfaces with relevant route-map + client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format( + mode = unconfig_str, + pre = prefix, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format( + mode = unconfig_str, + pre = prefix, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + + # config global arp to interfaces net address and vrf + conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format( + mode = unconfig_str, + next_hop = server_net_next_hop, + intf = dual_if.client_if.get_name(), + dest_mac = dual_if.client_if.get_dest_mac())) + conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format( + mode = unconfig_str, + next_hop = client_net_next_hop, + intf = dual_if.server_if.get_name(), + dest_mac = dual_if.server_if.get_dest_mac())) + + conf_t_command_set.append('{mode}route-map {pre}_{p1}_to_{p2} permit 10'.format( + mode = unconfig_str, + pre = prefix, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if (mode == 'config'): + conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = client_net_next_hop ) ) + conf_t_command_set.append('{mode}route-map {pre}_{p2}_to_{p1} permit 10'.format( + mode = unconfig_str, + pre = prefix, + p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) ) + if (mode == 'config'): + conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = server_net_next_hop ) ) + conf_t_command_set.append('exit') + + # assign generated config list to cache + cache.add('IF', server_if_command_set, dual_if.server_if.get_name()) + cache.add('IF', client_if_command_set, dual_if.client_if.get_name()) + idx += 2 + + cache.add('CONF', conf_t_command_set) + + # deploy the configs (order is important!) + self.cmd_link.run_command( [cache] ) + + def config_no_ipv6_pbr (self): + self.config_ipv6_pbr(mode = 'unconfig') + + # show methods + def get_cpu_util (self): + response = self.cmd_link.run_single_command('show platform hardware qfp active datapath utilization | inc Load') + return CShowParser.parse_cpu_util_stats(response) + + def get_cft_stats (self): + response = self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-show-all-instances') + return CShowParser.parse_cft_stats(response) + + def get_nbar_stats (self): + per_intf_stats = {} + for intf in self.if_mngr.get_if_list(if_type = IFType.Client): + response = self.cmd_link.run_single_command("show ip nbar protocol-discovery interface {interface} stats packet-count protocol".format( interface = intf.get_name() ), flush_first = True) + per_intf_stats[intf.get_name()] = CShowParser.parse_nbar_stats(response) + return per_intf_stats + + def get_nbar_profiling_stats (self): + response = self.cmd_link.run_single_command("show platform hardware qfp active feature nbar profiling") + return CShowParser.parse_nbar_profiling_stats(response) + + def get_drop_stats (self): + + response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics', flush_first = True) + # print response + # response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics') + # print response + if_list_by_name = map( lambda x: x.get_name(), self.if_mngr.get_if_list() ) + return CShowParser.parse_drop_stats(response, if_list_by_name ) + + def get_nat_stats (self): + response = self.cmd_link.run_single_command('show ip nat statistics') + return CShowParser.parse_nat_stats(response) + + def get_cvla_memory_usage(self): + response = self.cmd_link.run_single_command('show platform hardware qfp active infrastructure cvla client handles') + # (res, res2) = CShowParser.parse_cvla_memory_usage(response) + return CShowParser.parse_cvla_memory_usage(response) + + + # clear methods + def clear_nat_translations(self): + pre_commit_cache = CCommandCache() + pre_commit_cache.add('EXEC', 'clear ip nat translation *') + self.cmd_link.run_single_command( pre_commit_cache ) + + def clear_cft_counters (self): + """ clear_cft_counters(self) -> None + + Clears the CFT counters on the platform + """ + self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-clear-instance-stats') + + def clear_counters(self): + """ clear_counters(self) -> None + + Clears the platform counters + """ + + pre_commit_cache = CCommandCache() + pre_commit_cache.add('EXEC', ['clear counters','\r'] ) + self.cmd_link.run_single_command( pre_commit_cache ) + + def clear_nbar_stats(self): + """ clear_nbar_stats(self) -> None + + Clears the NBAR-PD classification stats + """ + pre_commit_cache = CCommandCache() + pre_commit_cache.add('EXEC', ['clear ip nbar protocol-discovery','\r'] ) + self.cmd_link.run_single_command( pre_commit_cache ) + + def clear_packet_drop_stats(self): + """ clear_packet_drop_stats(self) -> None + + Clears packet-drop stats + """ +# command = "show platform hardware qfp active statistics drop clear" + self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics clear_drop') + + ########################################### + # file transfer and image loading methods # + ########################################### + def get_running_image_details (self): + """ get_running_image_details() -> dict + + Check for the currently running image file on the platform. + Returns a dictionary, where 'drive' key is the drive in which the image is installed, + and 'image' key is the actual image file used. + """ + response = self.cmd_link.run_single_command('show version | include System image') + parsed_info = CShowParser.parse_show_image_version(response) + self.running_image = parsed_info + return parsed_info + + + def check_image_existence (self, img_name): + """ check_image_existence(self, img_name) -> boolean + + Parameters + ---------- + img_name : str + a string represents the image name. + + Check if the image file defined in the platform_config already loaded into the platform. + """ + search_drives = ['bootflash', 'harddisk', self.running_image['drive']] + for search_drive in search_drives: + command = "dir {drive}: | include {image}".format(drive = search_drive, image = img_name) + response = self.cmd_link.run_single_command(command, timeout = 10) + if CShowParser.parse_image_existence(response, img_name): + self.needed_image_path = '%s:%s' % (search_drive, img_name) + print 'Found image in platform:', self.needed_image_path + return True + return False + + def config_tftp_server(self, device_cfg_obj, external_tftp_config = None, applyToPlatform = False): + """ configure_tftp_server(self, external_tftp_config, applyToPlatform) -> str + + Parameters + ---------- + external_tftp_config : dict (Not is use) + A path to external tftp config file other than using the one defined in the instance. + applyToPlatform : boolean + set to True in order to apply the config into the platform + + Configures the tftp server on an interface of the platform. + """ +# tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_server_config + self.tftp_cfg = device_cfg_obj.get_tftp_info() + cache = CCommandCache() + + command = "ip tftp source-interface {intf}".format( intf = device_cfg_obj.get_mgmt_interface() ) + cache.add('CONF', command ) + self.cmd_link.run_single_command(cache) + self.config_history['tftp_server_config'] = True + + def load_platform_image(self, img_filename, external_tftp_config = None): + """ load_platform_image(self, img_filename, external_tftp_config) -> None + + Parameters + ---------- + external_tftp_config : dict + A path to external tftp config file other than using the one defined in the instance. + img_filename : str + image name to be saved into the platforms drive. + + This method loads the configured image into the platform's harddisk (unless it is already loaded), + and sets that image to be the boot_image of the platform. + """ + if not self.check_image_existence(img_filename): # check if this image isn't already saved in platform + #tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_cfg + + if self.config_history['tftp_server_config']: # make sure a TFTP configuration has been loaded + cache = CCommandCache() + if self.running_image is None: + self.get_running_image_details() + + command = "copy tftp://{tftp_ip}/{img_path}/{image} harddisk:".format( + tftp_ip = self.tftp_cfg['ip_address'], + img_path = self.tftp_cfg['images_path'], + image = img_filename) + cache.add('EXEC', [command, '\r', '\r']) + + progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Copying image via tftp, this may take a while...\n") + progress_thread.start() + + response = self.cmd_link.run_single_command(cache, timeout = 900, read_until = ['\?', '\#']) + print "RESPONSE:" + print response + progress_thread.join() + copy_ok = CShowParser.parse_file_copy(response) + + if not copy_ok: + raise UserWarning('Image file loading failed. Please make sure the accessed image exists and has read privileges') + else: + raise UserWarning('TFTP configuration is not available. Please make sure a valid TFTP configuration has been provided') + + def set_boot_image(self, boot_image): + """ set_boot_image(self, boot_image) -> None + + Parameters + ---------- + boot_image : str + An image file to be set as boot_image + + Configures boot_image as the boot image of the platform into the running-config + config-register + """ + cache = CCommandCache() + if self.needed_image_path is None: + if not self.check_image_existence(boot_image): + raise Exception("Trying to set boot image that's not found in router, please copy it first.") + + boot_img_cmd = "boot system flash %s" % self.needed_image_path + config_register_cmd = "config-register 0x2021" + cache.add('CONF', ["no boot system", boot_img_cmd, config_register_cmd]) + self.cmd_link.run_single_command( cache ) + self.save_config_to_startup_config() + + def is_image_matches(self, needed_image): + """ set_boot_image(self, needed_image) -> boolean + + Parameters + ---------- + needed_image : str + An image file to compare router running image + + Compares image name to router running image, returns match result. + + """ + if self.running_image is None: + self.get_running_image_details() + needed_image = needed_image.lower() + current_image = self.running_image['image'].lower() + if needed_image.find(current_image) != -1: + return True + if current_image.find(needed_image) != -1: + return True + return False + + # misc class related methods + + def load_platform_data_from_file (self, device_cfg_obj): + self.if_mngr.load_config(device_cfg_obj) + + def launch_connection (self, device_cfg_obj): + self.running_image = None # clear the image name "cache" + self.cmd_link.launch_platform_connectivity(device_cfg_obj) + + def reload_connection (self, device_cfg_obj): + self.cmd_link.close_platform_connection() + self.launch_connection(device_cfg_obj) + + def save_config_to_startup_config (self): + """ save_config_to_startup_config(self) -> None + + Copies running-config into startup-config. + """ + self.cmd_link.run_single_command('wr') + + def reload_platform(self, device_cfg_obj): + """ reload_platform(self) -> None + + Reloads the platform. + """ + from subprocess import call + import os + i = 0 + sleep_time = 30 # seconds + + try: + cache = CCommandCache() + + cache.add('EXEC', ['reload','n\r','\r'] ) + self.cmd_link.run_single_command( cache ) + + progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Reloading the platform, this may take a while...\n") + progress_thread.start() + time.sleep(60) # need delay for device to shut down before polling it + # poll the platform until ping response is received. + while True: + time.sleep(sleep_time) + try: + x = call(["ping", "-c 1", device_cfg_obj.get_ip_address()], stdout = open(os.devnull, 'wb')) + except: + x = 1 + if x == 0: + break + elif i > 20: + raise TimeoutError('Platform failed to reload after reboot for over {minutes} minutes!'.format(minutes = round(1 + i * sleep_time / 60))) + else: + i += 1 + + time.sleep(30) + self.reload_connection(device_cfg_obj) + finally: + progress_thread.join() + + def get_if_manager(self): + return self.if_mngr + + def dump_obj_config (self, object_name): + if object_name=='nat' and self.nat_config is not None: + self.nat_config.dump_config() + elif object_name=='static_route' and self.stat_route_config is not None: + self.stat_route_config.dump_config() + else: + raise UserWarning('No known configuration exists.') + + def toggle_duplicated_intf(self, action = 'down'): + + dup_ifs = self.if_mngr.get_duplicated_if() + self.__toggle_interfaces( dup_ifs, action = action ) + + + def __toggle_interfaces (self, intf_list, action = 'up'): + cache = CCommandCache() + mode_str = 'no ' if action == 'up' else '' + + for intf_obj in intf_list: + cache.add('IF', '{mode}shutdown'.format(mode = mode_str), intf_obj.get_name()) + + self.cmd_link.run_single_command( cache ) + + +class CStaticRouteConfig(object): + + def __init__(self, static_route_dict): + self.clients_start = static_route_dict['clients_start'] + self.servers_start = static_route_dict['servers_start'] + self.net_increment = misc_methods.gen_increment_dict(static_route_dict['dual_port_mask']) + self.client_mask = static_route_dict['client_destination_mask'] + self.server_mask = static_route_dict['server_destination_mask'] + self.client_net_start = self.extract_net_addr(self.clients_start, self.client_mask) + self.server_net_start = self.extract_net_addr(self.servers_start, self.server_mask) + self.static_route_dict = static_route_dict + + def extract_net_addr (self, ip_addr, ip_mask): + addr_lst = ip_addr.split('.') + mask_lst = ip_mask.split('.') + mask_lst = map(lambda x,y: int(x) & int(y), addr_lst, mask_lst ) + masked_str = map(lambda x: str(x), mask_lst ) + return '.'.join(masked_str) + + def dump_config (self): + import yaml + print yaml.dump( self.static_route_dict , default_flow_style=False) + + +class CNatConfig(object): + def __init__(self, nat_dict): + self.clients_net_start = nat_dict['clients_net_start'] + self.client_acl_wildcard= nat_dict['client_acl_wildcard_mask'] + self.net_increment = misc_methods.gen_increment_dict(nat_dict['dual_port_mask']) + self.nat_pool_start = nat_dict['pool_start'] + self.nat_netmask = nat_dict['pool_netmask'] + self.nat_dict = nat_dict + + @staticmethod + def calc_pool_end (nat_pool_start, netmask): + pool_start_lst = map(lambda x: int(x), nat_pool_start.split('.') ) + pool_end_lst = list( pool_start_lst ) # create new list object, don't point to the original one + mask_lst = map(lambda x: int(x), netmask.split('.')) + curr_octet = 3 # start with the LSB octet + inc_val = 1 + + while True: + tmp_masked = inc_val & mask_lst[curr_octet] + if tmp_masked == 0: + if (inc_val << 1) > 255: + inc_val = 1 + pool_end_lst[curr_octet] = 255 + curr_octet -= 1 + else: + inc_val <<= 1 + else: + pool_end_lst[curr_octet] += (inc_val - 1) + break + return '.'.join(map(lambda x: str(x), pool_end_lst)) + + def dump_config (self): + import yaml + print yaml.dump( self.nat_dict , default_flow_style=False) + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/CProgressDisp.py b/scripts/automation/regression/CProgressDisp.py new file mode 100755 index 00000000..ec7920c3 --- /dev/null +++ b/scripts/automation/regression/CProgressDisp.py @@ -0,0 +1,87 @@ +#!/router/bin/python + +import threading +import sys +import time +import outer_packages +import termstyle +import progressbar + + +class ProgressThread(threading.Thread): + def __init__(self, notifyMessage = None): + super(ProgressThread, self).__init__() + self.stoprequest = threading.Event() + self.notifyMessage = notifyMessage + + def run(self): + if self.notifyMessage is not None: + print(self.notifyMessage), + + while not self.stoprequest.is_set(): + print "\b.", + sys.stdout.flush() + time.sleep(5) + + def join(self, timeout=None): + if self.notifyMessage is not None: + print termstyle.green("Done!\n"), + self.stoprequest.set() + super(ProgressThread, self).join(timeout) + + +class TimedProgressBar(threading.Thread): + def __init__(self, time_in_secs): + super(TimedProgressBar, self).__init__() + self.stoprequest = threading.Event() + self.stopFlag = False + self.time_in_secs = time_in_secs + 15 # 80 # taking 15 seconds extra + widgets = ['Running T-Rex: ', progressbar.Percentage(), ' ', + progressbar.Bar(marker='>',left='[',right=']'), + ' ', progressbar.ETA()] + self.pbar = progressbar.ProgressBar(widgets=widgets, maxval=self.time_in_secs*2) + + + def run (self): + # global g_stop + print + self.pbar.start() + + try: + for i in range(0, self.time_in_secs*2 + 1): + if (self.stopFlag == True): + break + time.sleep(0.5) + self.pbar.update(i) + # self.pbar.finish() + + except KeyboardInterrupt: + # self.pbar.finish() + print "\nInterrupted by user!!" + self.join() + finally: + print + + def join(self, isPlannedStop = True, timeout=None): + if isPlannedStop: + self.pbar.update(self.time_in_secs*2) + self.stopFlag = True + else: + self.stopFlag = True # Stop the progress bar in its current location + self.stoprequest.set() + super(TimedProgressBar, self).join(timeout) + + +def timedProgressBar(time_in_secs): + widgets = ['Running T-Rex: ', progressbar.Percentage(), ' ', + Bar(marker='>',left='[',right=']'), + ' ', progressbar.ETA()] + pbar = progressbar.ProgressBar(widgets=widgets, maxval=time_in_secs*2) + pbar.start() + for i in range(0, time_in_secs*2 + 1): + time.sleep(0.5) + pbar.update(i) + pbar.finish() + print + + diff --git a/scripts/automation/regression/CShowParser.py b/scripts/automation/regression/CShowParser.py new file mode 100755 index 00000000..b3120eb1 --- /dev/null +++ b/scripts/automation/regression/CShowParser.py @@ -0,0 +1,228 @@ +#!/router/bin/python-2.7.4 + +import re +import misc_methods + +class PlatformResponseMissmatch(Exception): + def __init__(self, message): + # Call the base class constructor with the parameters it needs + super(PlatformResponseMissmatch, self).__init__(message + ' is not available for given platform state and data.\nPlease make sure the relevant features are turned on in the platform.') + +class PlatformResponseAmbiguity(Exception): + def __init__(self, message): + # Call the base class constructor with the parameters it needs + super(PlatformResponseAmbiguity, self).__init__(message + ' found more than one file matching the provided filename.\nPlease provide more distinct filename.') + + +class CShowParser(object): + + @staticmethod + def parse_drop_stats (query_response, interfaces_list): + res = {'total_drops' : 0} + response_lst = query_response.split('\r\n') + mtch_found = 0 + + for line in response_lst: + mtch = re.match("^\s*(\w+/\d/\d)\s+(\d+)\s+(\d+)", line) + if mtch: + mtch_found += 1 + if (mtch.group(1) in interfaces_list): + res[mtch.group(1)] = (int(mtch.group(2)) + int(mtch.group(3))) + res['total_drops'] += (int(mtch.group(2)) + int(mtch.group(3))) +# if mtch_found == 0: # no matches found at all +# raise PlatformResponseMissmatch('Drop stats') +# else: +# return res + return res + + @staticmethod + def parse_nbar_stats (query_response): + response_lst = query_response.split('\r\n') + stats = {} + final_stats = {} + mtch_found = 0 + + for line in response_lst: + mtch = re.match("\s*([\w-]+)\s*(\d+)\s*(\d+)\s+", line) + if mtch: + mtch_found += 1 + key = mtch.group(1) + pkt_in = int(mtch.group(2)) + pkt_out = int(mtch.group(3)) + + avg_pkt_cnt = ( pkt_in + pkt_out )/2 + if avg_pkt_cnt == 0.0: + # escaping zero division case + continue + if stats.has_key(key) : + stats[key] += avg_pkt_cnt + else: + stats[key] = avg_pkt_cnt + + # Normalize the results to percents + for protocol in stats: + protocol_norm_stat = int(stats[protocol]*10000/stats['Total'])/100.0 # round the result to x.xx format + if (protocol_norm_stat != 0.0): + final_stats[protocol] = protocol_norm_stat + + if mtch_found == 0: # no matches found at all + raise PlatformResponseMissmatch('NBAR classification stats') + else: + return { 'percentage' : final_stats, 'packets' : stats } + + @staticmethod + def parse_nat_stats (query_response): + response_lst = query_response.split('\r\n') + res = {} + mtch_found = 0 + + for line in response_lst: + mtch = re.match("Total (active translations):\s+(\d+).*(\d+)\s+static,\s+(\d+)\s+dynamic", line) + if mtch: + mtch_found += 1 + res['total_active_trans'] = int(mtch.group(2)) + res['static_active_trans'] = int(mtch.group(3)) + res['dynamic_active_trans'] = int(mtch.group(4)) + continue + + mtch = re.match("(Hits):\s+(\d+)\s+(Misses):\s+(\d+)", line) + if mtch: + mtch_found += 1 + res['num_of_hits'] = int(mtch.group(2)) + res['num_of_misses'] = int(mtch.group(4)) + + if mtch_found == 0: # no matches found at all + raise PlatformResponseMissmatch('NAT translations stats') + else: + return res + + @staticmethod + def parse_cpu_util_stats (query_response): + response_lst = query_response.split('\r\n') + res = { 'cpu0' : 0, + 'cpu1' : 0 } + mtch_found = 0 + for line in response_lst: + mtch = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", line) + if mtch: + mtch_found += 1 + res['cpu0'] += float(mtch.group(1)) + res['cpu1'] += float(mtch.group(2)) + + if mtch_found == 0: # no matches found at all + raise PlatformResponseMissmatch('CPU utilization processing') + else: + res['cpu0'] = res['cpu0']/mtch_found + res['cpu1'] = res['cpu1']/mtch_found + return res + + @staticmethod + def parse_cft_stats (query_response): + response_lst = query_response.split('\r\n') + res = {} + mtch_found = 0 + for line in response_lst: + mtch = re.match("\W*(\w+)\W*([:]|[=])\W*(\d+)", line) + if mtch: + mtch_found += 1 + res[ str( mix_string(m.group(1)) )] = float(m.group(3)) + if mtch_found == 0: # no matches found at all + raise PlatformResponseMissmatch('CFT counters stats') + else: + return res + + + @staticmethod + def parse_cvla_memory_usage(query_response): + response_lst = query_response.split('\r\n') + res = {} + res2 = {} + cnt = 0 + state = 0 + name = '' + number = 0.0 + + for line in response_lst: + if state == 0: + mtch = re.match("\W*Entity name:\W*(\w[^\r\n]+)", line) + if mtch: + name = misc_methods.mix_string(mtch.group(1)) + state = 1 + cnt += 1 + elif state == 1: + mtch = re.match("\W*Handle:\W*(\d+)", line) + if mtch: + state = state + 1 + else: + state = 0; + elif state == 2: + mtch = re.match("\W*Number of allocations:\W*(\d+)", line) + if mtch: + state = state + 1 + number=float(mtch.group(1)) + else: + state = 0; + elif state == 3: + mtch = re.match("\W*Memory allocated:\W*(\d+)", line) + if mtch: + state = 0 + res[name] = float(mtch.group(1)) + res2[name] = number + else: + state = 0 + if cnt == 0: + raise PlatformResponseMissmatch('CVLA memory usage stats') + + return (res,res2) + + + @staticmethod + def parse_show_image_version(query_response): + response_lst = query_response.split('\r\n') + res = {} + + for line in response_lst: + mtch = re.match("System image file is \"(\w+):(.*/)?(.+)\"", line) + if mtch: + res['drive'] = mtch.group(1) + res['image'] = mtch.group(3) + return res + + raise PlatformResponseMissmatch('Running image info') + + + @staticmethod + def parse_image_existence(query_response, img_name): + response_lst = query_response.split('\r\n') + cnt = 0 + + for line in response_lst: + regex = re.compile(".* (?!include) %s" % img_name ) + mtch = regex.match(line) + if mtch: + cnt += 1 + if cnt == 1: + return True + elif cnt > 1: + raise PlatformResponseAmbiguity('Image existence') + else: + return False + + @staticmethod + def parse_file_copy (query_response): + rev_response_lst = reversed(query_response.split('\r\n')) + lines_parsed = 0 + + for line in rev_response_lst: + mtch = re.match("\[OK - (\d+) bytes\]", line) + if mtch: + return True + lines_parsed += 1 + + if lines_parsed > 5: + return False + return False + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/CustomLogger.py b/scripts/automation/regression/CustomLogger.py new file mode 100755 index 00000000..14ef1362 --- /dev/null +++ b/scripts/automation/regression/CustomLogger.py @@ -0,0 +1,36 @@ + +import sys +import os +import logging + + +# def setup_custom_logger(name, log_path = None): +# logging.basicConfig(level = logging.INFO, +# format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s', +# datefmt = '%m-%d %H:%M') + + +def setup_custom_logger(name, log_path = None): + # first make sure path availabe + if log_path is None: + log_path = os.getcwd()+'/trex_log.log' + else: + directory = os.path.dirname(log_path) + if not os.path.exists(directory): + os.makedirs(directory) + logging.basicConfig(level = logging.DEBUG, + format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s', + datefmt = '%m-%d %H:%M', + filename= log_path, + filemode= 'w') + + # define a Handler which writes INFO messages or higher to the sys.stderr + consoleLogger = logging.StreamHandler() + consoleLogger.setLevel(logging.ERROR) + # set a format which is simpler for console use + formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') + # tell the handler to use this format + consoleLogger.setFormatter(formatter) + + # add the handler to the logger + logging.getLogger(name).addHandler(consoleLogger)
\ No newline at end of file diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py new file mode 100755 index 00000000..cab19d09 --- /dev/null +++ b/scripts/automation/regression/aggregate_results.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- +import xml.etree.ElementTree as ET +import argparse +import glob +from pprint import pprint +import sys, os +from collections import OrderedDict +import copy +import datetime, time +import cPickle as pickle +import subprocess, shlex + +FUNCTIONAL_CATEGORY = 'Functional' # how to display those categories +ERROR_CATEGORY = 'Error' + + +def pad_tag(text, tag): + return '<%s>%s</%s>' % (tag, text, tag) + +def is_functional_test_name(testname): + if testname.startswith('platform_') or testname.startswith('misc_methods_'): + return True + return False + +def is_good_status(text): + return text in ('Successful', 'Fixed', 'Passed', 'True', 'Pass') + +# input: xml element with test result +# output string: 'error', 'failure', 'skipped', 'passed' +def get_test_result(test): + for child in test.getchildren(): + if child.tag in ('error', 'failure', 'skipped'): + return child.tag + return 'passed' + +# returns row of table with <th> and <td> columns - key: value +def add_th_td(key, value): + return '<tr><th>%s</th><td>%s</td></tr>\n' % (key, value) + +# returns row of table with <td> and <td> columns - key: value +def add_td_td(key, value): + return '<tr><td>%s</td><td>%s</td></tr>\n' % (key, value) + +# returns row of table with <th> and <th> columns - key: value +def add_th_th(key, value): + return '<tr><th>%s</th><th>%s</th></tr>\n' % (key, value) + +# returns <div> with table of tests under given category. +# category - string with name of category +# hidden - bool, true = <div> is hidden by CSS +# tests - list of tests, derived from aggregated xml report, changed a little to get easily stdout etc. +# category_info_dir - folder to search for category info file +# expanded - bool, false = outputs (stdout etc.) of tests are hidden by CSS +# brief - bool, true = cut some part of tests outputs (useful for errors section with expanded flag) +def add_category_of_tests(category, tests, hidden = False, category_info_dir = None, expanded = False, brief = False): + is_actual_category = category not in (FUNCTIONAL_CATEGORY, ERROR_CATEGORY) + html_output = '<div style="display:%s;" id="cat_tglr_%s">\n' % ('none' if hidden else 'block', category) + + if is_actual_category: + html_output += '<br><table class="reference">\n' + + if category_info_dir: + category_info_file = '%s/report_%s.info' % (category_info_dir, category) + if os.path.exists(category_info_file): + with open(category_info_file) as f: + for info_line in f.readlines(): + key_value = info_line.split(':', 1) + if key_value[0].startswith('User'): # always 'hhaim', no need to show + continue + html_output += add_th_td('%s:' % key_value[0], key_value[1]) + else: + html_output += add_th_td('Info:', 'No info') + print 'add_category_of_tests: no category info %s' % category_info_file + if len(tests): + total_duration = 0.0 + for test in tests: + total_duration += float(test.attrib['time']) + html_output += add_th_td('Tests duration:', datetime.timedelta(seconds = int(total_duration))) + html_output += '</table>\n' + + if not len(tests): + return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b') + '</div>' + html_output += '<br>\n<table class="reference">\n<tr><th align="left">' + + if category == ERROR_CATEGORY: + html_output += 'Setup</th><th align="left">Failed tests:' + else: + html_output += '%s tests:' % category + html_output += '</th><th align="center">Final Result</th>\n<th align="center">Time (s)</th>\n</tr>\n' + for test in tests: + functional_test = is_functional_test_name(test.attrib['name']) + if functional_test and is_actual_category: + continue + if category == ERROR_CATEGORY: + test_id = ('err_' + test.attrib['classname'] + test.attrib['name']).replace('.', '_') + else: + test_id = (category + test.attrib['name']).replace('.', '_') + if expanded: + html_output += '<tr>\n<th>' + else: + html_output += '<tr onclick=tgl_test("%s") class=linktr>\n<td class=linktext>' % test_id + if category == ERROR_CATEGORY: + html_output += FUNCTIONAL_CATEGORY if functional_test else test.attrib['classname'] + if expanded: + html_output += '</th><td>' + else: + html_output += '</td><td class=linktext>' + html_output += '%s</td>\n<td align="center">' % test.attrib['name'] + test_result = get_test_result(test) + if test_result == 'error': + html_output += '<font color="red"><b>ERROR</b></font></td>' + elif test_result == 'failure': + html_output += '<font color="red"><b>FAILED</b></font></td>' + elif test_result == 'skipped': + html_output += '<font color="blue"><b>SKIPPED</b></font></td>' + else: + html_output += '<font color="green"><b>PASSED</b></font></td>' + html_output += '<td align="center"> '+ test.attrib['time'] + '</td></center></tr>' + + result, result_text = test.attrib.get('result', ('', '')) + if result_text: + result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>')) + stderr = '' if brief and result_text else test.get('stderr', '') + if stderr: + stderr = '<b style="color:000080;"><text color=000080>Stderr</text>:</b><br>%s<br><br>\n' % stderr.replace('\n', '<br>') + stdout = '' if brief and result_text else test.get('stdout', '') + if stdout: + if brief: # cut off server logs + stdout = stdout.split('>>>>>>>>>>>>>>>', 1)[0] + stdout = '<b style="color:000080;">Stdout:</b><br>%s<br><br>\n' % stdout.replace('\n', '<br>') + + html_output += '<tr style="%scolor:603000;" id="%s"><td colspan=%s>' % ('' if expanded else 'display:none;', test_id, 4 if category == ERROR_CATEGORY else 3) + if result_text or stderr or stdout: + html_output += '%s%s%s</td></tr>' % (result_text, stderr, stdout) + else: + html_output += '<b style="color:000080;">No output</b></td></tr>' + + html_output += '\n</table>\n</div>' + return html_output + +style_css = """ +html {overflow-y:scroll;} + +body { + font-size:12px; + color:#000000; + background-color:#ffffff; + margin:0px; + font-family:verdana,helvetica,arial,sans-serif; +} + +div {width:100%;} + +table,th,td,input,textarea { + font-size:100%; +} + +table.reference, table.reference_fail { + background-color:#ffffff; + border:1px solid #c3c3c3; + border-collapse:collapse; + vertical-align:middle; +} + +table.reference th { + background-color:#e5eecc; + border:1px solid #c3c3c3; + padding:3px; +} + +table.reference_fail th { + background-color:#ffcccc; + border:1px solid #c3c3c3; + padding:3px; +} + + +table.reference td, table.reference_fail td { + border:1px solid #c3c3c3; + padding:3px; +} + +a.example {font-weight:bold} + +#a:link,a:visited {color:#900B09; background-color:transparent} +#a:hover,a:active {color:#FF0000; background-color:transparent} + +.linktr { + cursor: pointer; +} + +.linktext { + color:#0000FF; + text-decoration: underline; +} +""" + + +# main +if __name__ == '__main__': + + # deal with input args + argparser = argparse.ArgumentParser(description='Aggregate test results of from ./reports dir, produces xml, html, mail report.') + argparser.add_argument('--input_dir', default='./reports', + help='Directory with xmls/setups info. Filenames: report_<setup name>.xml/report_<setup name>.info') + argparser.add_argument('--output_xml', default='./reports/aggregated_tests.xml', + dest = 'output_xmlfile', help='Name of output xml file with aggregated results.') + argparser.add_argument('--output_html', default='./reports/aggregated_tests.html', + dest = 'output_htmlfile', help='Name of output html file with aggregated results.') + argparser.add_argument('--output_mail', default='./reports/aggregated_tests_mail.html', + dest = 'output_mailfile', help='Name of output html file with aggregated results for mail.') + argparser.add_argument('--output_title', default='./reports/aggregated_tests_title.txt', + dest = 'output_titlefile', help='Name of output file to contain title of mail.') + argparser.add_argument('--build_status_file', default='./reports/build_status', + dest = 'build_status_file', help='Name of output file to save scenaries build results (should not be wiped).') + args = argparser.parse_args() + + +##### get input variables/TRex commit info + + scenario = os.environ.get('SCENARIO') + build_url = os.environ.get('BUILD_URL') + build_id = os.environ.get('BUILD_ID') + trex_last_commit_hash = os.environ.get('TREX_LAST_COMMIT_HASH') # TODO: remove it, take from setups info + trex_repo = os.environ.get('TREX_CORE_REPO') + if not scenario: + print 'Warning: no environment variable SCENARIO, using default' + scenario = 'TRex regression' + if not build_url: + print 'Warning: no environment variable BUILD_URL' + if not build_id: + print 'Warning: no environment variable BUILD_ID' + trex_last_commit_info = '' + if scenario == 'trex_build' and trex_last_commit_hash and trex_repo: + try: + print 'Getting TRex commit with hash %s' % trex_last_commit_hash + command = 'timeout 10 git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash) + print 'Executing: %s' % command + proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (trex_last_commit_info, stderr) = proc.communicate() + print 'Stdout:\n\t' + trex_last_commit_info.replace('\n', '\n\t') + print 'Stderr:', stderr + print 'Return code:', proc.returncode + trex_last_commit_info = trex_last_commit_info.replace('\n', '<br>') + except Exception as e: + print 'Error getting last commit: %s' % e + +##### get xmls: report_<setup name>.xml + + err = [] + jobs_list = [] + jobs_file = '%s/jobs_list.info' % args.input_dir + if os.path.exists(jobs_file): + with open('%s/jobs_list.info' % args.input_dir) as f: + for line in f.readlines(): + line = line.strip() + if line: + jobs_list.append(line) + else: + message = '%s does not exist!' % jobs_file + print message + err.append(message) + +##### aggregate results to 1 single tree + aggregated_root = ET.Element('testsuite') + setups = {} + for job in jobs_list: + xml_file = '%s/report_%s.xml' % (args.input_dir, job) + if not os.path.exists(xml_file): + message = '%s referenced in jobs_list.info does not exist!' % xml_file + print message + err.append(message) + continue + if os.path.basename(xml_file) == os.path.basename(args.output_xmlfile): + continue + setups[job] = [] + print('Processing setup: %s' % job) + tree = ET.parse(xml_file) + root = tree.getroot() + for key, value in root.attrib.items(): + if key in aggregated_root.attrib and value.isdigit(): # sum total number of failed tests etc. + aggregated_root.attrib[key] = str(int(value) + int(aggregated_root.attrib[key])) + else: + aggregated_root.attrib[key] = value + tests = root.getchildren() + if not len(tests): # there should be tests: + message = 'No tests in xml %s' % xml_file + print message + err.append(message) + for test in tests: + setups[job].append(test) + test.attrib['name'] = test.attrib['classname'] + '.' + test.attrib['name'] + test.attrib['classname'] = job + aggregated_root.append(test) + +##### save output xml + + print('Writing output file: %s' % args.output_xmlfile) + ET.ElementTree(aggregated_root).write(args.output_xmlfile) + + +##### build output html + error_tests = [] + functional_tests = OrderedDict() + # categorize and get output of each test + for test in aggregated_root.getchildren(): # each test in xml + if is_functional_test_name(test.attrib['name']): + functional_tests[test.attrib['name']] = test + result_tuple = None + for child in test.getchildren(): # <system-out>, <system-err> (<failure>, <error>, <skipped> other: passed) +# if child.tag in ('failure', 'error'): + #temp = copy.deepcopy(test) + #print temp._children + #print test._children +# error_tests.append(test) + if child.tag == 'failure': + error_tests.append(test) + result_tuple = ('failure', child.text) + elif child.tag == 'error': + error_tests.append(test) + result_tuple = ('error', child.text) + elif child.tag == 'skipped': + result_tuple = ('skipped', child.text) + elif child.tag == 'system-out': + test.attrib['stdout'] = child.text + elif child.tag == 'system-err': + test.attrib['stderr'] = child.text + if result_tuple: + test.attrib['result'] = result_tuple + + html_output = '''\ +<html> +<head> +<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> +<style type="text/css"> +''' + html_output += style_css + html_output +=''' +</style> +</head> + +<body> +<table class="reference"> +''' + html_output += add_th_td('Scenario:', scenario.capitalize()) + start_time_file = '%s/start_time.info' % args.input_dir + if os.path.exists(start_time_file): + with open(start_time_file) as f: + start_time = int(f.read()) + total_time = int(time.time()) - start_time + html_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S')) + html_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time)) + if trex_last_commit_info: + html_output += add_th_td('Last commit:', trex_last_commit_info) + html_output += '</table><br>\n' + if err: + html_output += '<font color=red>%s<font><br><br>\n' % '\n<br>'.join(err) + +#<table style="width:100%;"> +# <tr> +# <td>Summary:</td>\ +#''' + #passed_quantity = len(result_types['passed']) + #failed_quantity = len(result_types['failed']) + #error_quantity = len(result_types['error']) + #skipped_quantity = len(result_types['skipped']) + + #html_output += '<td>Passed: %s</td>' % passed_quantity + #html_output += '<td>Failed: %s</td>' % (pad_tag(failed_quantity, 'b') if failed_quantity else '0') + #html_output += '<td>Error: %s</td>' % (pad_tag(error_quantity, 'b') if error_quantity else '0') + #html_output += '<td>Skipped: %s</td>' % (pad_tag(skipped_quantity, 'b') if skipped_quantity else '0') +# html_output += ''' +# </tr> +#</table>''' + + category_arr = [FUNCTIONAL_CATEGORY, ERROR_CATEGORY] + +# Adding buttons + # Error button + if len(error_tests): + html_output += '\n<button onclick=tgl_cat("cat_tglr_{error}")>{error}</button>'.format(error = ERROR_CATEGORY) + # Setups buttons + for category, tests in setups.items(): + category_arr.append(category) + html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (category_arr[-1], category) + # Functional buttons + if len(functional_tests): + html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (FUNCTIONAL_CATEGORY, FUNCTIONAL_CATEGORY) + +# Adding tests + # Error tests + if len(error_tests): + html_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False) + # Setups tests + for category, tests in setups.items(): + html_output += add_category_of_tests(category, tests, hidden=True, category_info_dir=args.input_dir) + # Functional tests + if len(functional_tests): + html_output += add_category_of_tests(FUNCTIONAL_CATEGORY, functional_tests.values(), hidden=True) + + html_output += '\n\n<script type="text/javascript">\n var category_arr = %s\n' % ['cat_tglr_%s' % x for x in category_arr] + html_output += ''' + function tgl_cat(id) + { + for(var i=0; i<category_arr.length; i++) + { + var e = document.getElementById(category_arr[i]); + if (id == category_arr[i]) + { + if(e.style.display == 'block') + e.style.display = 'none'; + else + e.style.display = 'block'; + } + else + { + if (e) e.style.display = 'none'; + } + } + } + function tgl_test(id) + { + var e = document.getElementById(id); + if(e.style.display == 'table-row') + e.style.display = 'none'; + else + e.style.display = 'table-row'; + } +</script> +</body> +</html>\ +''' + +# mail report (only error tests, expanded) + + mail_output = '''\ +<html> +<head> +<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> +<style type="text/css"> +''' + mail_output += style_css + mail_output +=''' +</style> +</head> + +<body> +<table class="reference"> +''' + mail_output += add_th_td('Scenario:', scenario.capitalize()) + if build_url: + mail_output += add_th_td('Full HTML report:', '<a class="example" href="%s/HTML_Report">link</a>' % build_url) + start_time_file = '%s/start_time.info' % args.input_dir + if os.path.exists(start_time_file): + with open(start_time_file) as f: + start_time = int(f.read()) + total_time = int(time.time()) - start_time + mail_output += add_th_td('Started:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M:%S')) + mail_output += add_th_td('Total duration:', datetime.timedelta(seconds = total_time)) + if trex_last_commit_info: + mail_output += add_th_td('Last commit:', trex_last_commit_info) + mail_output += '</table><br>\n<table width=100%><tr><td>\n' + + for category in setups.keys(): + failing_category = False + for test in error_tests: + if test.attrib['classname'] == category: + failing_category = True + if failing_category or not len(setups[category]): + mail_output += '<table class="reference_fail" align=left style="Margin-bottom:10;Margin-right:10;">\n' + else: + mail_output += '<table class="reference" align=left style="Margin-bottom:10;Margin-right:10;">\n' + mail_output += add_th_th('Setup:', pad_tag(category.replace('.', '/'), 'b')) + category_info_file = '%s/report_%s.info' % (args.input_dir, category.replace('.', '_')) + if os.path.exists(category_info_file): + with open(category_info_file) as f: + for info_line in f.readlines(): + key_value = info_line.split(':', 1) + if key_value[0].startswith('User'): # always 'hhaim', no need to show + continue + mail_output += add_th_td('%s:' % key_value[0], key_value[1]) + else: + mail_output += add_th_td('Info:', 'No info') + mail_output += '</table>\n' + mail_output += '</td></tr></table>\n' + + # Error tests + if len(error_tests) or err: + if err: + mail_output += '<font color=red>%s<font>' % '\n<br>'.join(err) + if len(error_tests) > 5: + mail_output += '\n<br><font color=red>More than 5 failed tests, showing brief output.<font>\n<br>' + # show only brief version (cut some info) + mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True, brief=True) + else: + mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, hidden=False, expanded=True) + else: + mail_output += '<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n' + mail_output += '\n</body>\n</html>' + +##### save outputs + +# html + with open(args.output_htmlfile, 'w') as f: + print('Writing output file: %s' % args.output_htmlfile) + f.write(html_output) + +# mail content + with open(args.output_mailfile, 'w') as f: + print('Writing output file: %s' % args.output_mailfile) + f.write(mail_output) + +# build status + category_dict_status = {} + if os.path.exists(args.build_status_file): + with open(args.build_status_file) as f: + print('Reading: %s' % args.build_status_file) + category_dict_status = pickle.load(f) + if type(category_dict_status) is not dict: + print '%s is corrupt, truncating' % args.build_status_file + category_dict_status = {} + + last_status = category_dict_status.get(scenario, 'Successful') # assume last is passed if no history + if err or len(error_tests): # has fails + if is_good_status(last_status): + current_status = 'Failure' + else: + current_status = 'Still Failing' + else: + if is_good_status(last_status): + current_status = 'Successful' + else: + current_status = 'Fixed' + category_dict_status[scenario] = current_status + + with open(args.build_status_file, 'w') as f: + print('Writing output file: %s' % args.build_status_file) + pickle.dump(category_dict_status, f) + +# mail title + mailtitle_output = scenario.capitalize() + if build_id: + mailtitle_output += ' - Build #%s' % build_id + mailtitle_output += ' - %s!' % current_status + + with open(args.output_titlefile, 'w') as f: + print('Writing output file: %s' % args.output_titlefile) + f.write(mailtitle_output) diff --git a/scripts/automation/regression/functional_unit_tests.py b/scripts/automation/regression/functional_unit_tests.py new file mode 100755 index 00000000..30e915c4 --- /dev/null +++ b/scripts/automation/regression/functional_unit_tests.py @@ -0,0 +1,78 @@ +#!/router/bin/python + +__copyright__ = "Copyright 2014" + + + +import os +import sys +import outer_packages +import nose +from nose.plugins import Plugin +import logging +from rednose import RedNose +import termstyle + + + + +def set_report_dir (report_dir): + if not os.path.exists(report_dir): + os.mkdir(report_dir) + +if __name__ == "__main__": + + # setting defaults. By default we run all the test suite + specific_tests = False + disableLogCapture = False + long_test = False + report_dir = "reports" + + nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors'] + +# for arg in sys.argv: +# if 'unit_tests/' in arg: +# specific_tests = True +# if 'log-path' in arg: +# disableLogCapture = True +# if arg=='--collect-only': # this is a user trying simply to view the available tests. removing xunit param from nose args +# nose_argv[5:7] = [] + + + + try: + result = nose.run(argv = nose_argv, addplugins = [RedNose()]) + + if (result == True): + print termstyle.green(""" + ..::''''::.. + .;'' ``;. + :: :: :: :: + :: :: :: :: + :: :: :: :: + :: .:' :: :: `:. :: + :: : : :: + :: `:. .:' :: + `;..``::::''..;' + ``::,,,,::'' + + ___ ___ __________ + / _ \/ _ | / __/ __/ / + / ___/ __ |_\ \_\ \/_/ + /_/ /_/ |_/___/___(_) + + """) + sys.exit(0) + else: + sys.exit(-1) + + finally: + pass + + + + + + + + diff --git a/scripts/automation/regression/interactive_platform b/scripts/automation/regression/interactive_platform new file mode 100755 index 00000000..5c5e920e --- /dev/null +++ b/scripts/automation/regression/interactive_platform @@ -0,0 +1,4 @@ +#!/bin/bash +/router/bin/python-2.7.4 interactive_platform.py $@ +sts=$? +exit $sts
\ No newline at end of file diff --git a/scripts/automation/regression/interactive_platform.py b/scripts/automation/regression/interactive_platform.py new file mode 100755 index 00000000..bfedd37d --- /dev/null +++ b/scripts/automation/regression/interactive_platform.py @@ -0,0 +1,338 @@ +#!/router/bin/python-2.7.4 + +from CPlatform import * +import cmd +import outer_packages +import termstyle +import os +from misc_methods import load_object_config_file +from optparse import OptionParser +from CShowParser import PlatformResponseMissmatch, PlatformResponseAmbiguity + +class InteractivePlatform(cmd.Cmd): + + intro = termstyle.green("\nInteractive shell to control a remote Cisco IOS platform.\nType help to view available pre-defined configurations\n(c) All rights reserved.\n") + prompt = '> ' + + def __init__(self, cfg_yaml_path = None, silent_mode = False, virtual_mode = False ): +# super(InteractivePlatform, self).__init__() + cmd.Cmd.__init__(self) + self.virtual_mode = virtual_mode + self.platform = CPlatform(silent_mode) + if cfg_yaml_path is None: + try: + cfg_yaml_path = raw_input(termstyle.cyan("Please enter a readable .yaml configuration file path: ")) + cfg_yaml_path = os.path.abspath(cfg_yaml_path) + except KeyboardInterrupt: + exit(-1) + try: + self.device_cfg = CDeviceCfg(cfg_yaml_path) + self.platform.load_platform_data_from_file(self.device_cfg) + if not virtual_mode: + # if not virtual mode, try to establish a phyisical connection to platform + self.platform.launch_connection(self.device_cfg) + + except Exception as inst: + print termstyle.magenta(inst) + exit(-1) + + def do_show_cfg (self, line): + """Outputs the loaded interface configuration""" + self.platform.get_if_manager().dump_if_config() + print termstyle.green("*** End of interface configuration ***") + + def do_show_nat_cfg (self, line): + """Outputs the loaded nat provided configuration""" + try: + self.platform.dump_obj_config('nat') + print termstyle.green("*** End of nat configuration ***") + except UserWarning as inst: + print termstyle.magenta(inst) + + + def do_show_static_route_cfg (self, line): + """Outputs the loaded static route configuration""" + try: + self.platform.dump_obj_config('static_route') + print termstyle.green("*** End of static route configuration ***") + except UserWarning as inst: + print termstyle.magenta(inst) + + def do_switch_cfg (self, cfg_file_path): + """Switch the current platform interface configuration with another one""" + if cfg_file_path: + cfg_yaml_path = os.path.abspath(cfg_file_path) + self.device_cfg = CDeviceCfg(cfg_yaml_path) + self.platform.load_platform_data_from_file(self.device_cfg) + if not self.virtual_mode: + self.platform.reload_connection(self.device_cfg) + print termstyle.green("Configuration switching completed successfully.") + else: + print termstyle.magenta("Configuration file is missing. Please try again.") + + def do_load_clean (self, arg): + """Loads a clean configuration file onto the platform + Specify no arguments will load 'clean_config.cfg' file from bootflash disk + First argument is clean config filename + Second argument is platform file's disk""" + if arg: + in_val = arg.split(' ') + if len(in_val)==2: + self.platform.load_clean_config(in_val[0], in_val[1]) + else: + print termstyle.magenta("One of the config inputs is missing.") + else: + self.platform.load_clean_config() +# print termstyle.magenta("Configuration file definition is missing. use 'help load_clean' for further info.") + + def do_basic_if_config(self, line): + """Apply basic interfaces configuartion to all platform interfaces""" + self.platform.configure_basic_interfaces() + print termstyle.green("Basic interfaces configuration applied successfully.") + + def do_pbr(self, line): + """Apply IPv4 PBR configuration on all interfaces""" + self.platform.config_pbr() + print termstyle.green("IPv4 PBR configuration applied successfully.") + + def do_no_pbr(self, line): + """Removes IPv4 PBR configuration from all interfaces""" + self.platform.config_no_pbr() + print termstyle.green("IPv4 PBR configuration removed successfully.") + + def do_nbar(self, line): + """Apply NBAR PD configuration on all interfaces""" + self.platform.config_nbar_pd() + print termstyle.green("NBAR configuration applied successfully.") + + def do_no_nbar(self, line): + """Removes NBAR PD configuration from all interfaces""" + self.platform.config_no_nbar_pd() + print termstyle.green("NBAR configuration removed successfully.") + + def do_static_route(self, arg): + """Apply IPv4 static routing configuration on all interfaces + Specify no arguments will apply static routing with following config: + 1. clients_start - 16.0.0.1 + 2. servers_start - 48.0.0.1 + 3. dual_port_mask - 1.0.0.0 + 4. client_destination_mask - 255.0.0.0 + 5. server_destination_mask - 255.0.0.0 + """ + if arg: + stat_route_dict = load_object_config_file(arg) +# else: +# print termstyle.magenta("Unknown configutaion option requested. use 'help static_route' for further info.") + else: + stat_route_dict = { 'clients_start' : '16.0.0.1', + 'servers_start' : '48.0.0.1', + 'dual_port_mask': '1.0.0.0', + 'client_destination_mask' : '255.0.0.0', + 'server_destination_mask' : '255.0.0.0' } + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.platform.config_static_routing(stat_route_obj) + print termstyle.green("IPv4 static routing configuration applied successfully.") +# print termstyle.magenta("Specific configutaion is missing. use 'help static_route' for further info.") + + def do_no_static_route(self, line): + """Removes IPv4 static route configuration from all non-duplicated interfaces""" + try: + self.platform.config_no_static_routing() + print termstyle.green("IPv4 static routing configuration removed successfully.") + except UserWarning as inst: + print termstyle.magenta(inst) + + def do_nat(self, arg): + """Apply NAT configuration on all non-duplicated interfaces + Specify no arguments will apply NAT with following config: + 1. clients_net_start - 16.0.0.0 + 2. client_acl_wildcard_mask - 0.0.0.255 + 3. dual_port_mask - 1.0.0.0 + 4. pool_start - 200.0.0.0 + 5. pool_netmask - 255.255.255.0 + """ + if arg: + nat_dict = load_object_config_file(arg) +# else: +# print termstyle.magenta("Unknown nat configutaion option requested. use 'help nat' for further info.") + else: +# print termstyle.magenta("Specific nat configutaion is missing. use 'help nat' for further info.") + nat_dict = { 'clients_net_start' : '16.0.0.0', + 'client_acl_wildcard_mask' : '0.0.0.255', + 'dual_port_mask' : '1.0.0.0', + 'pool_start' : '200.0.0.0', + 'pool_netmask' : '255.255.255.0' } + nat_obj = CNatConfig(nat_dict) + self.platform.config_nat(nat_obj) + print termstyle.green("NAT configuration applied successfully.") + + def do_no_nat(self, arg): + """Removes NAT configuration from all non-duplicated interfaces""" + try: + self.platform.config_no_nat() + print termstyle.green("NAT configuration removed successfully.") + except UserWarning as inst: + print termstyle.magenta(inst) + + + def do_ipv6_pbr(self, line): + """Apply IPv6 PBR configuration on all interfaces""" + self.platform.config_ipv6_pbr() + print termstyle.green("IPv6 PBR configuration applied successfully.") + + def do_no_ipv6_pbr(self, line): + """Removes IPv6 PBR configuration from all interfaces""" + self.platform.config_no_ipv6_pbr() + print termstyle.green("IPv6 PBR configuration removed successfully.") + + def do_zbf(self, line): + """Apply Zone-Based policy Firewall configuration on all interfaces""" + self.platform.config_zbf() + print termstyle.green("Zone-Based policy Firewall configuration applied successfully.") + + def do_no_zbf(self, line): + """Removes Zone-Based policy Firewall configuration from all interfaces""" + self.platform.config_no_zbf() + print termstyle.green("Zone-Based policy Firewall configuration removed successfully.") + + def do_show_cpu_util(self, line): + """Fetches CPU utilization stats from the platform""" + try: + print self.platform.get_cpu_util() + print termstyle.green("*** End of show_cpu_util output ***") + except PlatformResponseMissmatch as inst: + print termstyle.magenta(inst) + + def do_show_drop_stats(self, line): + """Fetches packet drop stats from the platform.\nDrop are summed and presented for both input and output traffic of each interface""" + print self.platform.get_drop_stats() + print termstyle.green("*** End of show_drop_stats output ***") + + def do_show_nbar_stats(self, line): + """Fetches NBAR classification stats from the platform.\nStats are available both as raw data and as percentage data.""" + try: + print self.platform.get_nbar_stats() + print termstyle.green("*** End of show_nbar_stats output ***") + except PlatformResponseMissmatch as inst: + print termstyle.magenta(inst) + + def do_show_nat_stats(self, line): + """Fetches NAT translations stats from the platform""" + print self.platform.get_nat_stats() + print termstyle.green("*** End of show_nat_stats output ***") + + def do_show_cft_stats(self, line): + """Fetches CFT stats from the platform""" + print self.platform.get_cft_stats() + print termstyle.green("*** End of show_sft_stats output ***") + + def do_show_cvla_memory_usage(self, line): + """Fetches CVLA memory usage stats from the platform""" + (res, res2) = self.platform.get_cvla_memory_usage() + print res + print res2 + print termstyle.green("*** End of show_cvla_memory_usage output ***") + + def do_clear_counters(self, line): + """Clears interfaces counters""" + self.platform.clear_counters() + print termstyle.green("*** clear counters completed ***") + + def do_clear_nbar_stats(self, line): + """Clears interfaces counters""" + self.platform.clear_nbar_stats() + print termstyle.green("*** clear nbar stats completed ***") + + def do_clear_cft_counters(self, line): + """Clears interfaces counters""" + self.platform.clear_cft_counters() + print termstyle.green("*** clear cft counters completed ***") + + def do_clear_drop_stats(self, line): + """Clears interfaces counters""" + self.platform.clear_packet_drop_stats() + print termstyle.green("*** clear packet drop stats completed ***") + + def do_clear_nat_translations(self, line): + """Clears nat translations""" + self.platform.clear_nat_translations() + print termstyle.green("*** clear nat translations completed ***") + + def do_set_tftp_server (self, line): + """Configures TFTP access on platform""" + self.platform.config_tftp_server(self.device_cfg) + print termstyle.green("*** TFTP config deployment completed ***") + + def do_show_running_image (self, line): + """Fetches currently loaded image of the platform""" + res = self.platform.get_running_image_details() + print res + print termstyle.green("*** Show running image completed ***") + + def do_check_image_existence(self, arg): + """Check if specific image file (usually *.bin) is already stored in platform drive""" + if arg: + try: + res = self.platform.check_image_existence(arg.split(' ')[0]) + print res + print termstyle.green("*** Check image existence completed ***") + except PlatformResponseAmbiguity as inst: + print termstyle.magenta(inst) + else: + print termstyle.magenta("Please provide an image name in order to check for existance.") + + def do_load_image (self, arg): + """Loads a given image filename from tftp server (if not available on disk) and sets it as the boot image on the platform""" + if arg: + try: + self.platform.load_platform_image('asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin')#arg.split(' ')[0]) + except UserWarning as inst: + print termstyle.magenta(inst) + else: + print termstyle.magenta("Image filename is missing.") + + def do_reload (self, line): + """Reloads the platform""" + + ans = misc_methods.query_yes_no('This will reload the platform. Are you sure?', default = None) + if ans: + # user confirmed he wishes to reload the platform + self.platform.reload_platform(self.device_cfg) + print termstyle.green("*** Platform reload completed ***") + else: + print termstyle.green("*** Platform reload aborted ***") + + def do_quit(self, arg): + """Quits the application""" + return True + + def do_exit(self, arg): + """Quits the application""" + return True + + def do_all(self, arg): + """Configures bundle of commands to set PBR routing""" + self.do_load_clean('') + self.do_set_tftp_server('') + self.do_basic_if_config('') + self.do_pbr('') + self.do_ipv6_pbr('') + + + +if __name__ == "__main__": + parser = OptionParser(version="%prog 1.0 \t (C) Cisco Systems Inc.\n") + parser.add_option("-c", "--config-file", dest="cfg_yaml_path", + action="store", help="Define the interface configuration to load the applicatino with.", metavar="FILE_PATH") + parser.add_option("-s", "--silent", dest="silent_mode", default = False, + action="store_true", help="Silence the generated input when commands launched.") + parser.add_option("-v", "--virtual", dest="virtual_mode", default = False, + action="store_true", help="Interact with a virtual router, no actual link will apply. Show commands are NOT available in this mode.") + (options, args) = parser.parse_args() + + try: + InteractivePlatform(**vars(options)).cmdloop() + + except KeyboardInterrupt: + exit(-1) + diff --git a/scripts/automation/regression/interfaces_e.py b/scripts/automation/regression/interfaces_e.py new file mode 100755 index 00000000..15301623 --- /dev/null +++ b/scripts/automation/regression/interfaces_e.py @@ -0,0 +1,8 @@ +#!/router/bin/python + +import outer_packages +from enum import Enum + + +# define the states in which a T-Rex can hold during its lifetime +IFType = Enum('IFType', 'Client Server All') diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py new file mode 100755 index 00000000..2341b9be --- /dev/null +++ b/scripts/automation/regression/misc_methods.py @@ -0,0 +1,280 @@ +#!/router/bin/python + +import ConfigParser +import outer_packages +import yaml +import sys +from collections import namedtuple +import subprocess, shlex +import os + +TRexConfig = namedtuple('TRexConfig', 'trex, router, tftp') + +# debug/development purpose, lists object's attributes and their values +def print_r(obj): + for attr in dir(obj): + print 'obj.%s %s' % (attr, getattr(obj, attr)) + +def mix_string (str): + """Convert all string to lowercase letters, and replaces spaces with '_' char""" + return str.replace(' ', '_').lower() + +# executes given command, returns tuple (return_code, stdout, stderr) +def run_command(cmd): + print 'Running command:', cmd + proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout, stderr) = proc.communicate() + if stdout: + print 'Stdout:\n%s' % stdout + if stderr: + print 'Stderr:\n%s' % stderr + print 'Return code: %s' % proc.returncode + return (proc.returncode, stdout, stderr) + + +def run_remote_command(host, passwd, command_string): + cmd = 'ssh -tt %s \'sudo sh -c "%s"\'' % (host, command_string) + print 'Trying connection with ssh...' + return_code, stdout, stderr = run_command(cmd) + if return_code == 0: + return (return_code, stdout, stderr) + elif passwd is not None: + print 'Trying connection with expect + sshpass.exp...' + cmd = 'sshpass.exp %s %s root "%s"' % (passwd, host, command_string) + return_code, stdout, stderr = run_command(cmd) + return (return_code, stdout, stderr) + + +def generate_intf_lists (interfacesList): + retDict = { + 'relevant_intf' : [], + 'relevant_ip_addr' : [], + 'relevant_mac_addr' : [], + 'total_pairs' : None + } + + for intf in interfacesList: + retDict['relevant_intf'].append(intf['client']) + retDict['relevant_ip_addr'].append(intf['client_config']['ip_addr']) + retDict['relevant_mac_addr'].append(intf['client_config']['mac_addr']) + retDict['relevant_intf'].append(intf['server']) + retDict['relevant_ip_addr'].append(intf['server_config']['ip_addr']) + retDict['relevant_mac_addr'].append(intf['server_config']['mac_addr']) + + retDict['total_pairs'] = len(interfacesList) + + return retDict + +def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'): + """ get_single_net_client_addr(ip_addr, octetListDict, ip_type) -> str + + Parameters + ---------- + ip_addr : str + a string an IP address (by default, of type A.B.C.D) + octetListDict : dict + a ditionary representing the octets on which to act such that ip[octet_key] = ip[octet_key] + octet_value + ip_type : str + a string that defines the ip type to parse. possible inputs are 'ipv4', 'ipv6' + + By default- Returns a new ip address - A.B.C.(D+1) + """ + if ip_type == 'ipv4': + ip_lst = ip_addr.split('.') + + for octet,increment in octetListDict.iteritems(): + int_octet = int(octet) + if ((int_octet < 0) or (int_octet > 3)): + raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) ) + else: + if (int(ip_lst[int_octet]) + increment) < 255: + ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment) + else: + raise ValueError('the requested increment exceeds 255 client address limit') + + return '.'.join(ip_lst) + + else: # this is a ipv6 address, handle accordingly + ip_lst = ip_addr.split(':') + + for octet,increment in octetListDict.iteritems(): + int_octet = int(octet) + if ((int_octet < 0) or (int_octet > 7)): + raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) ) + else: + if (int(ip_lst[int_octet]) + increment) < 65535: + ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X') + else: + raise ValueError('the requested increment exceeds 65535 client address limit') + + return ':'.join(ip_lst) + + +def load_complete_config_file (filepath): + """load_complete_config_file(filepath) -> list + + Loads a configuration file (.yaml) for both trex config and router config + Returns a list with a dictionary to each of the configurations + """ + + # create response dictionaries + trex_config = {} + rtr_config = {} + tftp_config = {} + + try: + with open(filepath, 'r') as f: + config = yaml.load(f) + + # Handle T-Rex configuration + trex_config['trex_name'] = config["trex"]["hostname"] + trex_config['trex_password'] = config["trex"].get("password") + #trex_config['trex_is_dual'] = config["trex"]["is_dual"] + trex_config['trex_cores'] = int(config["trex"]["cores"]) + #trex_config['trex_latency'] = int(config["trex"]["latency"]) +# trex_config['trex_version_path'] = config["trex"]["version_path"] + trex_config['modes'] = config['trex'].get('modes', []) + + if 'loopback' not in trex_config['modes']: + trex_config['router_interface'] = config["router"]["ip_address"] + + # Handle Router configuration + rtr_config['model'] = config["router"]["model"] + rtr_config['hostname'] = config["router"]["hostname"] + rtr_config['ip_address'] = config["router"]["ip_address"] + rtr_config['image'] = config["router"]["image"] + rtr_config['line_pswd'] = config["router"]["line_password"] + rtr_config['en_pswd'] = config["router"]["en_password"] + rtr_config['interfaces'] = config["router"]["interfaces"] + rtr_config['clean_config'] = config["router"]["clean_config"] + rtr_config['intf_masking'] = config["router"]["intf_masking"] + rtr_config['ipv6_mask'] = config["router"]["ipv6_mask"] + rtr_config['mgmt_interface'] = config["router"]["mgmt_interface"] + + # Handle TFTP configuration + tftp_config['hostname'] = config["tftp"]["hostname"] + tftp_config['ip_address'] = config["tftp"]["ip_address"] + tftp_config['images_path'] = config["tftp"]["images_path"] + + if rtr_config['clean_config'] is None: + raise ValueError('A clean router configuration wasn`t provided.') + + except ValueError: + print '!!!!!' + raise + + except Exception as inst: + print "\nBad configuration file provided: '{0}'\n".format(filepath) + raise inst + + return TRexConfig(trex_config, rtr_config, tftp_config) + +def load_object_config_file (filepath): + try: + with open(filepath, 'r') as f: + config = yaml.load(f) + return config + except Exception as inst: + print "\nBad configuration file provided: '{0}'\n".format(filepath) + print inst + exit(-1) + + +def query_yes_no(question, default="yes"): + """Ask a yes/no question via raw_input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits <Enter>. + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + """ + valid = { "yes": True, "y": True, "ye": True, + "no": False, "n": False } + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = raw_input().lower() + if default is not None and choice == '': + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' " + "(or 'y' or 'n').\n") + + +def load_benchmark_config_file (filepath): + """load_benchmark_config_file(filepath) -> list + + Loads a configuration file (.yaml) for both trex config and router config + Returns a list with a dictionary to each of the configurations + """ + + # create response dictionary + benchmark_config = {} + + try: + with open(filepath, 'r') as f: + benchmark_config = yaml.load(f) + + except Exception as inst: + print "\nBad configuration file provided: '{0}'\n".format(filepath) + print inst + exit(-1) + + return benchmark_config + + +def get_benchmark_param (benchmark_path, test_name, param, sub_param = None): + + config = load_benchmark_config_file(benchmark_path) + if sub_param is None: + return config[test_name][param] + else: + return config[test_name][param][sub_param] + +def gen_increment_dict (dual_port_mask): + addr_lst = dual_port_mask.split('.') + result = {} + for idx, octet_increment in enumerate(addr_lst): + octet_int = int(octet_increment) + if octet_int>0: + result[str(idx)] = octet_int + + return result + + +def get_network_addr (ip_type = 'ipv4'): + ipv4_addr = [1, 1, 1, 0] # base ipv4 address to start generating from- 1.1.1.0 + ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0] # base ipv6 address to start generating from- 2001:DB8:1111:2222:0:0 + while True: + if ip_type == 'ipv4': + if (ipv4_addr[2] < 255): + yield [".".join( map(str, ipv4_addr) ), '255.255.255.0'] + ipv4_addr[2] += 1 + else: # reached defined maximum limit of address allocation + return + else: # handling ipv6 addressing + if (ipv6_addr[2] < 4369): + tmp_ipv6_addr = list(ipv6_addr) + tmp_ipv6_addr[2] = hex(tmp_ipv6_addr[2])[2:] + yield ":".join( map(str, tmp_ipv6_addr) ) + ipv6_addr[2] += 1 + else: # reached defined maximum limit of address allocation + return + + + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py new file mode 100755 index 00000000..05bedc71 --- /dev/null +++ b/scripts/automation/regression/outer_packages.py @@ -0,0 +1,38 @@ +#!/router/bin/python + +import sys, site +import platform, os + +CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) # alternate use with: os.getcwd() +TREX_PATH = os.getenv('TREX_UNDER_TEST') # path to <trex-core>/scripts directory, env. variable TREX_UNDER_TEST should override it. +if not TREX_PATH or not os.path.isfile('%s/trex_daemon_server' % TREX_PATH): + TREX_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir)) +PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs')) +PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane')) + +NIGHTLY_MODULES = ['enum34-1.0.4', + 'nose-1.3.4', + 'rednose-0.4.1', + 'progressbar-2.2', + 'termstyle', + 'dpkt-1.8.6', + 'yaml-3.11', + ] + +def import_nightly_modules (): + sys.path.append(TREX_PATH) + sys.path.append(PATH_TO_CTRL_PLANE) + import_module_list(NIGHTLY_MODULES) + +def import_module_list (modules_list): + assert(isinstance(modules_list, list)) + for p in modules_list: + full_path = os.path.join(PATH_TO_PYTHON_LIB, p) + fix_path = os.path.normcase(full_path) #CURRENT_PATH+p) + sys.path.insert(1, full_path) + +import_nightly_modules() + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/platform_cmd_link.py b/scripts/automation/regression/platform_cmd_link.py new file mode 100755 index 00000000..3d577baf --- /dev/null +++ b/scripts/automation/regression/platform_cmd_link.py @@ -0,0 +1,442 @@ +#!/router/bin/python + +from interfaces_e import IFType +import CustomLogger +import misc_methods +import telnetlib +import socket + +class CCommandCache(object): + def __init__(self): + self.__gen_clean_data_structure() + + def __gen_clean_data_structure (self): + self.cache = {"IF" : {}, + "CONF" : [], + "EXEC" : []} + + def __list_append (self, dest_list, cmd): + if isinstance(cmd, list): + dest_list.extend( cmd ) + else: + dest_list.append( cmd ) + + def add (self, cmd_type, cmd, interface = None): + + if interface is not None: # this is an interface ("IF") config command + if interface in self.cache['IF']: + # interface commands already exists + self.__list_append(self.cache['IF'][interface], cmd) + else: + # no chached commands for this interface + self.cache['IF'][interface] = [] + self.__list_append(self.cache['IF'][interface], cmd) + else: # this is either a CONF or EXEC command + self.__list_append(self.cache[cmd_type.upper()], cmd) + + def dump_config (self): + # dump IF config: + print "configure terminal" + for intf, intf_cmd_list in self.cache['IF'].iteritems(): + print "interface {if_name}".format( if_name = intf ) + print '\n'.join(intf_cmd_list) + + if self.cache['IF']: + # add 'exit' note only if if config actually took place + print 'exit' # exit to global config mode + + # dump global config + if self.cache['CONF']: + print '\n'.join(self.cache['CONF']) + + # exit back to en mode + print "exit" + + # dump exec config + if self.cache['EXEC']: + print '\n'.join(self.cache['EXEC']) + + def get_config_list (self): + conf_list = [] + + conf_list.append("configure terminal") + for intf, intf_cmd_list in self.cache['IF'].iteritems(): + conf_list.append( "interface {if_name}".format( if_name = intf ) ) + conf_list.extend( intf_cmd_list ) + if len(conf_list)>1: + # add 'exit' note only if if config actually took place + conf_list.append("exit") + + conf_list.extend( self.cache['CONF'] ) + conf_list.append("exit") + conf_list.extend( self.cache['EXEC'] ) + + + return conf_list + + def clear_cache (self): + # clear all pointers to cache data (erase the data structure) + self.cache.clear() + # Re-initialize the cache + self.__gen_clean_data_structure() + + pass + + +class CCommandLink(object): + def __init__(self, silent_mode = False): + self.history = [] + self.virtual_mode = True + self.silent_mode = silent_mode + self.telnet_con = None + + + def __transmit (self, cmd_list, **kwargs): + self.history.extend(cmd_list) + if not self.silent_mode: + print '\n'.join(cmd_list) # prompting the pushed platform commands + if not self.virtual_mode: + # transmit the command to platform. + return self.telnet_con.write_ios_cmd(cmd_list, **kwargs) + + def run_command (self, cmd_list, **kwargs): + response = '' + for cmd in cmd_list: + + # check which type of cmd we handle + if isinstance(cmd, CCommandCache): + tmp_response = self.__transmit( cmd.get_config_list(), **kwargs ) # join the commands with new-line delimiter + else: + tmp_response = self.__transmit([cmd], **kwargs) + if not self.virtual_mode: + response += tmp_response + return response + + def run_single_command (self, cmd, **kwargs): + return self.run_command([cmd], **kwargs) + + def get_history (self, as_string = False): + if as_string: + return '\n'.join(self.history) + else: + return self.history + + def clear_history (self): + # clear all pointers to history data (erase the data structure) + del self.history[:] + # Re-initialize the histoyr with clear one + self.history = [] + + def launch_platform_connectivity (self, device_config_obj): + connection_info = device_config_obj.get_platform_connection_data() + self.telnet_con = CIosTelnet( **connection_info ) + self.virtual_mode = False # if physical connectivity was successful, toggle virtual mode off + + def close_platform_connection(self): + if self.telnet_con is not None: + self.telnet_con.close() + + + +class CDeviceCfg(object): + def __init__(self, cfg_yaml_path = None): + if cfg_yaml_path is not None: + (self.platform_cfg, self.tftp_cfg) = misc_methods.load_complete_config_file(cfg_yaml_path)[1:3] + + self.interfaces_cfg = self.platform_cfg['interfaces'] # extract only the router interface configuration + + def set_platform_config(self, config_dict): + self.platform_cfg = config_dict + self.interfaces_cfg = self.platform_cfg['interfaces'] + + def set_tftp_config(self, tftp_cfg): + self.tftp_cfg = tftp_cfg + + def get_interfaces_cfg (self): + return self.interfaces_cfg + + def get_ip_address (self): + return self.__get_attr('ip_address') + + def get_line_password (self): + return self.__get_attr('line_pswd') + + def get_en_password (self): + return self.__get_attr('en_pswd') + + def get_mgmt_interface (self): + return self.__get_attr('mgmt_interface') + + def get_platform_connection_data (self): + return { 'host' : self.get_ip_address(), 'line_pass' : self.get_line_password(), 'en_pass' : self.get_en_password() } + + def get_tftp_info (self): + return self.tftp_cfg + + def get_image_name (self): + return self.__get_attr('image') + + def __get_attr (self, attr): + return self.platform_cfg[attr] + + def dump_config (self): + import yaml + print yaml.dump(self.interfaces_cfg, default_flow_style=False) + +class CIfObj(object): + _obj_id = 0 + + def __init__(self, if_name, ipv4_addr, ipv6_addr, src_mac_addr, dest_mac_addr, if_type): + self.__get_and_increment_id() + self.if_name = if_name + self.if_type = if_type + self.src_mac_addr = src_mac_addr + self.dest_mac_addr = dest_mac_addr + self.ipv4_addr = ipv4_addr + self.ipv6_addr = ipv6_addr + self.pair_parent = None # a pointer to CDualIfObj which holds this interface and its pair-complement + + def __get_and_increment_id (self): + self._obj_id = CIfObj._obj_id + CIfObj._obj_id += 1 + + def get_name (self): + return self.if_name + + def get_src_mac_addr (self): + return self.src_mac_addr + + def get_dest_mac (self): + return self.dest_mac_addr + + def get_id (self): + return self._obj_id + + def get_if_type (self): + return self.if_type + + def get_ipv4_addr (self): + return self.ipv4_addr + + def get_ipv6_addr (self): + return self.ipv6_addr + + def set_ipv4_addr (self, addr): + self.ipv4_addr = addr + + def set_ipv6_addr (self, addr): + self.ipv6_addr = addr + + def set_pair_parent (self, dual_if_obj): + self.pair_parent = dual_if_obj + + def get_pair_parent (self): + return self.pair_parent + + def is_client (self): + return (self.if_type == IFType.Client) + + def is_server (self): + return (self.if_type == IFType.Server) + + pass + + +class CDualIfObj(object): + _obj_id = 0 + + def __init__(self, vrf_name, client_if_obj, server_if_obj): + self.__get_and_increment_id() + self.vrf_name = vrf_name + self.client_if = client_if_obj + self.server_if = server_if_obj + + # link if_objects to its parent dual_if + self.client_if.set_pair_parent(self) + self.server_if.set_pair_parent(self) + pass + + def __get_and_increment_id (self): + self._obj_id = CDualIfObj._obj_id + CDualIfObj._obj_id += 1 + + def get_id (self): + return self._obj_id + + def get_vrf_name (self): + return self.vrf_name + + def is_duplicated (self): + return self.vrf_name != None + +class CIfManager(object): + _ipv4_gen = misc_methods.get_network_addr() + _ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6') + + def __init__(self): + self.interfarces = {} + self.dual_intf = [] + self.full_device_cfg = None + + def __add_if_to_manager (self, if_obj): + self.interfarces[if_obj.get_name()] = if_obj + + def __add_dual_if_to_manager (self, dual_if_obj): + self.dual_intf.append(dual_if_obj) + + def __get_ipv4_net_client_addr(self, ipv4_addr): + return misc_methods.get_single_net_client_addr (ipv4_addr) + + def __get_ipv6_net_client_addr(self, ipv6_addr): + return misc_methods.get_single_net_client_addr (ipv6_addr, {'7' : 1}, ip_type = 'ipv6') + + def load_config (self, device_config_obj): + self.full_device_cfg = device_config_obj + # first, erase all current config + self.interfarces.clear() + del self.dual_intf[:] + + # than, load the configuration + intf_config = device_config_obj.get_interfaces_cfg() + + # finally, parse the information into data-structures + for intf_pair in intf_config: + # generate network addresses for client side, and initialize client if object + tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0]) + tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen)) + + client_obj = CIfObj(if_name = intf_pair['client']['name'], + ipv4_addr = tmp_ipv4_addr, + ipv6_addr = tmp_ipv6_addr, + src_mac_addr = intf_pair['client']['src_mac_addr'], + dest_mac_addr = intf_pair['client']['dest_mac_addr'], + if_type = IFType.Client) + + # generate network addresses for server side, and initialize server if object + tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0]) + tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen)) + + server_obj = CIfObj(if_name = intf_pair['server']['name'], + ipv4_addr = tmp_ipv4_addr, + ipv6_addr = tmp_ipv6_addr, + src_mac_addr = intf_pair['server']['src_mac_addr'], + dest_mac_addr = intf_pair['server']['dest_mac_addr'], + if_type = IFType.Server) + + dual_intf_obj = CDualIfObj(vrf_name = intf_pair['vrf_name'], + client_if_obj = client_obj, + server_if_obj = server_obj) + + # update single interfaces pointers + client_obj.set_pair_parent(dual_intf_obj) + server_obj.set_pair_parent(dual_intf_obj) + + # finally, update the data-structures with generated objects + self.__add_if_to_manager(client_obj) + self.__add_if_to_manager(server_obj) + self.__add_dual_if_to_manager(dual_intf_obj) + + + def get_if_list (self, if_type = IFType.All, is_duplicated = None): + result = [] + for if_name,if_obj in self.interfarces.iteritems(): + if (if_type == IFType.All) or ( if_obj.get_if_type() == if_type) : + if (is_duplicated is None) or (if_obj.get_pair_parent().is_duplicated() == is_duplicated): + # append this if_obj only if matches both IFType and is_duplicated conditions + result.append(if_obj) + return result + + def get_duplicated_if (self): + result = [] + for dual_if_obj in self.dual_intf: + if dual_if_obj.get_vrf_name() is not None : + result.extend( (dual_if_obj.client_if, dual_if_obj.server_if) ) + return result + + def get_dual_if_list (self, is_duplicated = None): + result = [] + for dual_if in self.dual_intf: + if (is_duplicated is None) or (dual_if.is_duplicated() == is_duplicated): + result.append(dual_if) + return result + + def dump_if_config (self): + if self.full_device_cfg is None: + print "Device configuration isn't loaded.\nPlease load config and try again." + else: + self.full_device_cfg.dump_config() + + +class AuthError(Exception): + pass + +class CIosTelnet(telnetlib.Telnet): + AuthError = AuthError + def __init__ (self, host, line_pass, en_pass, port = 23, str_wait = "#"): + telnetlib.Telnet.__init__(self) + self.host = host + self.port = port + self.line_passwd = line_pass + self.enable_passwd = en_pass + self.pr = str_wait +# self.set_debuglevel (1) + try: + self.open(self.host,self.port, timeout = 5) + self.read_until("word:",1) + self.write("{line_pass}\n".format(line_pass = self.line_passwd) ) + res = self.read_until(">",1) + if 'Password' in res: + raise AuthError('Invalid line password was provided') + self.write("enable 15\n") + self.read_until("d:",1) + self.write("{en_pass}\n".format(en_pass = self.enable_passwd) ) + res = self.read_until(self.pr,1) + if 'Password' in res: + raise AuthError('Invalid en password was provided') + self.write_ios_cmd(['terminal length 0']) + + except socket.timeout: + raise socket.timeout('A timeout error has occured.\nCheck platform connectivity or the hostname defined in the config file') + except Exception as inst: + raise + + def write_ios_cmd (self, cmd_list, result_from = 0, timeout = 1, **kwargs): + assert (isinstance (cmd_list, list) == True) + + if 'flush_first' in kwargs: + self.read_until(self.pr, timeout) # clear any accumulated data in telnet session + + res = '' + wf = '' + if 'read_until' in kwargs: + wf = kwargs['read_until'] + else: + wf = self.pr + + for idx, cmd in enumerate(cmd_list): + self.write(cmd+'\r\n') + if idx < result_from: + # don't care for return string + if type(wf) is list: + self.expect(wf, timeout)[2] + else: + self.read_until(wf, timeout) + else: + # care for return string + if type(wf) is list: + res += self.expect(wf, timeout)[2] + else: + res += self.read_until(wf, timeout) +# return res.split('\r\n') + return res # return the received response as a string, each line is seperated by '\r\n'. + + +if __name__ == "__main__": +# dev_cfg = CDeviceCfg('config/config.yaml') +# print dev_cfg.get_platform_connection_data() +# telnet = CIosTelnet( **(dev_cfg.get_platform_connection_data() ) ) + +# if_mng = CIfManager() +# if_mng.load_config(dev_cfg) +# if_mng.dump_config() + pass diff --git a/scripts/automation/regression/reports/.keep b/scripts/automation/regression/reports/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/automation/regression/reports/.keep diff --git a/scripts/automation/regression/setups/dave/benchmark.yaml b/scripts/automation/regression/setups/dave/benchmark.yaml new file mode 100755 index 00000000..0427f9a1 --- /dev/null +++ b/scripts/automation/regression/setups/dave/benchmark.yaml @@ -0,0 +1,118 @@ +################################################################ +#### T-Rex benchmark configuration file #### +################################################################ + +test_nbar_simple : + multiplier : 0.5 + cores : 4 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 29.95 + rtp_audio : 20.75 + oracle_sqlnet : 11.09 + rtp : 10.9 + exchange : 8.16 + citrix : 5.54 + rtsp : 2.85 + sctp : 3.83 + ssl : 2.41 + sip : 0.09 + dns : 1.92 + smtp : 0.56 + pop3 : 0.36 + unknown : 3.15 + +test_rx_check : + multiplier : 25 + cores : 4 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 400 + cpu_to_core_ratio : 37270000 + cores : 4 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 400 + cores : 4 + nat_opened : 100000 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 2500 + cores : 4 + cpu_to_core_ratio : 8900 + exp_latency : 1 + +test_routing_imix : + multiplier : 70 + cores : 2 + cpu_to_core_ratio : 8900 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 70 + cores : 2 + cpu_to_core_ratio : 3766666 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 36 + cores : 1 + cpu_to_core_ratio : 3766666 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 36 + cores : 4 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + + diff --git a/scripts/automation/regression/setups/dave/config.yaml b/scripts/automation/regression/setups/dave/config.yaml new file mode 100755 index 00000000..66e92097 --- /dev/null +++ b/scripts/automation/regression/setups/dave/config.yaml @@ -0,0 +1,94 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : cpp-rtp-trex-01 + cores : 4 + +router: + model : ESP100 + hostname : cpp-rtp-ts-15 + ip_address : 172.18.4.34 + port : 2054 + image : trex_regression_v155_315.bin + line_password : cisco + en_password : cisco + mgmt_interface : dummy + clean_config : dummy + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : TenGigabitEthernet0/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet0/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : TenGigabitEthernet0/2/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet0/3/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : TenGigabitEthernet1/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet1/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : TenGigabitEthernet1/2/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet1/3/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/images/ + images_path : /images/RP1/ diff --git a/scripts/automation/regression/setups/dummy/config.yaml b/scripts/automation/regression/setups/dummy/config.yaml new file mode 100644 index 00000000..8426ec6c --- /dev/null +++ b/scripts/automation/regression/setups/dummy/config.yaml @@ -0,0 +1,11 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +# dummy setup, all Trex tests are expected to be skipped + +trex: + hostname : csi-trex-04 + cores : 2 + modes : [loopback, virtual, dummy_mode] diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml new file mode 100644 index 00000000..b50662e1 --- /dev/null +++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml @@ -0,0 +1,138 @@ +################################################################ +#### T-Rex benchmark configuration file #### +################################################################ + +test_nbar_simple : + multiplier : 20 + cores : 2 + exp_gbps : 4.5 + cpu_to_core_ratio : 37270000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 30.41 + rtp_audio : 21.22 + rtp : 11.4 + oracle_sqlnet : 11.3 + exchange : 10.95 + citrix : 5.65 + rtsp : 2.67 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + sctp : 0.09 + sip : 0.09 + ssl : 0.06 + unknown : 3.2 + +test_rx_check : + multiplier : 25 + cores : 4 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 400 + cpu_to_core_ratio : 37270000 + cores : 4 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 400 + cores : 4 + nat_opened : 100000 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 2500 + cores : 4 + cpu_to_core_ratio : 8900 + exp_latency : 1 + +test_routing_imix : + multiplier : 36 + cores : 4 + cpu_to_core_ratio : 8900 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 36 + cores : 4 + cpu_to_core_ratio : 3766666 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 19 + cores : 4 + cpu_to_core_ratio : 3766666 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 36 + cores : 4 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + +test_rx_check_sfr: + multiplier : 25 + cores : 4 + rx_sample_rate : 32 + +test_rx_check_http: + multiplier : 40000 + cores : 2 + rx_sample_rate : 32 + +test_rx_check_sfr_ipv6: + multiplier : 25 + cores : 4 + rx_sample_rate : 32 + +test_rx_check_http_ipv6: + multiplier : 40000 + cores : 2 + rx_sample_rate : 32 + + diff --git a/scripts/automation/regression/setups/kiwi02/config.yaml b/scripts/automation/regression/setups/kiwi02/config.yaml new file mode 100644 index 00000000..1154b558 --- /dev/null +++ b/scripts/automation/regression/setups/kiwi02/config.yaml @@ -0,0 +1,95 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : 10.56.217.210 #10.56.192.189 + cores : 4 + +router: + model : ESP100 + hostname : csi-mcp-asr1k-40 + ip_address : 10.56.192.57 + image : BLD_V155_2_S_XE315_THROTTLE_LATEST_20150424_100040-std.bin # is in harddisk of router + #image : asr1000rp2-adventerprisek9.2014-11-10_18.33_etis.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0 + clean_config : /tmp/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : TenGigabitEthernet0/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet0/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : duplicate + - client : + name : TenGigabitEthernet0/2/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet0/3/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : duplicate + - client : + name : TenGigabitEthernet1/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet1/1/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + - client : + name : TenGigabitEthernet1/2/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : TenGigabitEthernet1/3/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : + + +tftp: + hostname : kiwi02_tftp_server + ip_address : 10.56.217.7 + root_dir : /scratch/tftp/ + images_path : hhaim/ diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml new file mode 100644 index 00000000..419fe7b3 --- /dev/null +++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml @@ -0,0 +1,153 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_nbar_simple : + multiplier : 1.5 + cores : 1 + exp_gbps : 0.5 + cpu_to_core_ratio : 20800000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 30.3 + rtp_audio : 21.06 + oracle_sqlnet : 11.25 + rtp : 11.1 + exchange : 10.16 + citrix : 5.6 + rtsp : 2.84 + sctp : 0.65 + ssl : 0.8 + sip : 0.09 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + unknown : 3.19 + +test_rx_check : + multiplier : 0.8 + cores : 1 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 150 + cores : 1 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 150 + cores : 1 + nat_opened : 40000 + cpu_to_core_ratio : 270 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 28 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 1 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.7 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 1.5 + cores : 1 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + +test_rx_check_sfr: + multiplier : 1.7 + cores : 2 + rx_sample_rate : 16 + +test_rx_check_http: + multiplier : 2200 + cores : 1 + rx_sample_rate : 16 + +test_rx_check_sfr_ipv6: + multiplier : 1.7 + cores : 2 + rx_sample_rate : 16 + +test_rx_check_http_ipv6: + multiplier : 2200 + cores : 1 + rx_sample_rate : 16 + +test_rx_check_http_negative: + multiplier : 2200 + cores : 1 + rx_sample_rate : 16 + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0
\ No newline at end of file diff --git a/scripts/automation/regression/setups/trex-dan/config.yaml b/scripts/automation/regression/setups/trex-dan/config.yaml new file mode 100644 index 00000000..ae60f9ad --- /dev/null +++ b/scripts/automation/regression/setups/trex-dan/config.yaml @@ -0,0 +1,69 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : trex-dan +# version_path : /auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.57/ #/auto/srg-sce-swinfra-usr/emb/users/danklei/Work/asr1k/emb/private/bpsim/main/scripts + cores : 2 + modes : [VM] + +router: + model : 1RU + hostname : ASR1001_T-Rex + ip_address : 10.56.199.247 + image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin + line_password : lab + en_password : lab + mgmt_interface : GigabitEthernet0/0/0 + clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/0/2 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/1RU/ diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml new file mode 100644 index 00000000..d448910e --- /dev/null +++ b/scripts/automation/regression/setups/trex04/benchmark.yaml @@ -0,0 +1,60 @@ +################################################################ +#### T-Rex benchmark configuration file #### +################################################################ + + +test_rx_check : + multiplier : 0.8 + cores : 1 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + + +test_routing_imix_64 : + multiplier : 28 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 0.5 + cores : 1 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + + diff --git a/scripts/automation/regression/setups/trex04/config.yaml b/scripts/automation/regression/setups/trex04/config.yaml new file mode 100644 index 00000000..f9cc21df --- /dev/null +++ b/scripts/automation/regression/setups/trex04/config.yaml @@ -0,0 +1,39 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the Trex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-04 + cores : 2 + modes : [loopback, virt_nics, VM] diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml new file mode 100644 index 00000000..3676abf3 --- /dev/null +++ b/scripts/automation/regression/setups/trex08/benchmark.yaml @@ -0,0 +1,37 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_routing_imix_64 : + multiplier : 8000 + cores : 7 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 80 + cores : 4 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 70 + cores : 3 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 80 + cores : 7 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + + diff --git a/scripts/automation/regression/setups/trex08/config.yaml b/scripts/automation/regression/setups/trex08/config.yaml new file mode 100644 index 00000000..fd3a6a78 --- /dev/null +++ b/scripts/automation/regression/setups/trex08/config.yaml @@ -0,0 +1,40 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-08 + cores : 2 + modes : ['loopback'] + diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml new file mode 100644 index 00000000..3f7b9a95 --- /dev/null +++ b/scripts/automation/regression/setups/trex09/benchmark.yaml @@ -0,0 +1,118 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_nbar_simple : + multiplier : 1.5 + cores : 1 + exp_gbps : 0.5 + cpu_to_core_ratio : 20800000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 30.3 + rtp_audio : 21.06 + oracle_sqlnet : 11.25 + rtp : 11.1 + exchange : 10.16 + citrix : 5.6 + rtsp : 2.84 + sctp : 0.65 + ssl : 0.8 + sip : 0.09 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + unknown : 3.19 + +test_rx_check : + multiplier : 0.8 + cores : 1 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 150 + cores : 1 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 150 + cores : 1 + nat_opened : 40000 + cpu_to_core_ratio : 270 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 28 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 1 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.7 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 1.5 + cores : 1 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + + diff --git a/scripts/automation/regression/setups/trex09/config.yaml b/scripts/automation/regression/setups/trex09/config.yaml new file mode 100644 index 00000000..9820ce6e --- /dev/null +++ b/scripts/automation/regression/setups/trex09/config.yaml @@ -0,0 +1,67 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-09 + cores : 2 + +router: + model : 1RU + hostname : ASR1001_T-Rex + ip_address : 10.56.199.247 + image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin + line_password : lab + en_password : lab + mgmt_interface : GigabitEthernet0/0/0 + clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/0/2 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/1RU/ diff --git a/scripts/automation/regression/setups/trex10/benchmark.yaml b/scripts/automation/regression/setups/trex10/benchmark.yaml new file mode 100644 index 00000000..999eff3d --- /dev/null +++ b/scripts/automation/regression/setups/trex10/benchmark.yaml @@ -0,0 +1,60 @@ +################################################################ +#### T-Rex benchmark configuration file #### +################################################################ + + +test_rx_check : + multiplier : 0.8 + cores : 1 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + + +test_routing_imix_64 : + multiplier : 37 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 0.5 + cores : 1 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + + diff --git a/scripts/automation/regression/setups/trex10/config.yaml b/scripts/automation/regression/setups/trex10/config.yaml new file mode 100644 index 00000000..093911a9 --- /dev/null +++ b/scripts/automation/regression/setups/trex10/config.yaml @@ -0,0 +1,38 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the Trex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-10 + cores : 2 + modes : [loopback, virtual] diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml new file mode 100644 index 00000000..7280cede --- /dev/null +++ b/scripts/automation/regression/setups/trex11/benchmark.yaml @@ -0,0 +1,133 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_nbar_simple : + multiplier : 1.5 + cores : 1 + exp_gbps : 0.5 + cpu_to_core_ratio : 20800000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 30.3 + rtp_audio : 21.06 + oracle_sqlnet : 11.25 + rtp : 11.1 + exchange : 10.16 + citrix : 5.6 + rtsp : 2.84 + sctp : 0.65 + ssl : 0.8 + sip : 0.09 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + unknown : 3.19 + +test_rx_check : + multiplier : 0.8 + cores : 1 + rx_sample_rate : 128 + exp_gbps : 0.5 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 150 + cores : 1 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 150 + cores : 1 + nat_opened : 40000 + cpu_to_core_ratio : 270 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 28 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 1 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.7 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 0.8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 1.5 + cores : 1 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + +test_rx_check_http_negative: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 4000 + cores : 1 + rx_sample_rate : 32 + diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml new file mode 100644 index 00000000..876a1afd --- /dev/null +++ b/scripts/automation/regression/setups/trex11/config.yaml @@ -0,0 +1,69 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-11 + cores : 2 + modes : ['loopback', 'virtual'] + +router: + model : 1RU + hostname : ASR1001_T-Rex +# ip_address : 10.56.199.247 + ip_address : 10.56.199.247123123123 + image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin + #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin + line_password : lab + en_password : lab + mgmt_interface : GigabitEthernet0/0/0 + clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : GigabitEthernet0/0/2 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/1RU/ diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml new file mode 100644 index 00000000..98f7215e --- /dev/null +++ b/scripts/automation/regression/setups/trex12/benchmark.yaml @@ -0,0 +1,161 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +test_nbar_simple : + multiplier : 7.5 + cores : 2 + exp_gbps : 3.5 + cpu_to_core_ratio : 20800000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + exp_max_latency : 1000 + + nbar_classification: + http : 30.18 + rtp-audio : 21.27 + rtp : 11.26 + oracle_sqlnet : 11.2 + exchange : 10.78 + citrix : 5.61 + rtsp : 2.82 + dns : 1.94 + smtp : 0.57 + pop3 : 0.36 + ssl : 0.16 + sctp : 0.13 + sip : 0.09 + unknown : 3.54 + +test_rx_check : + multiplier : 13 + cores : 4 + rx_sample_rate : 128 + exp_gbps : 6 + cpu_to_core_ratio : 37270000 + exp_bw : 13 + exp_latency : 1 + +test_nat_simple : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + multiplier : 12000 + cores : 1 + cpu_to_core_ratio : 37270000 + exp_bw : 1 + exp_latency : 1 + allow_timeout_dev : YES + +test_nat_learning : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 12000 + cores : 1 + nat_opened : 40000 + cpu_to_core_ratio : 270 + exp_bw : 8 + exp_latency : 1 + allow_timeout_dev : YES + +test_routing_imix_64 : + multiplier : 430 + cores : 1 + cpu_to_core_ratio : 280 + exp_latency : 1 + +test_routing_imix : + multiplier : 10 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix : + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 10 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_static_routing_imix_asymmetric: + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + multiplier : 8 + cores : 1 + cpu_to_core_ratio : 1800 + exp_latency : 1 + +test_ipv6_simple : + multiplier : 18 + cores : 4 + cpu_to_core_ratio : 30070000 + cpu2core_custom_dev: YES + cpu2core_dev : 0.07 + + +test_rx_check_sfr: + multiplier : 15 + cores : 3 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http: + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_sfr_ipv6: + multiplier : 15 + cores : 3 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http_ipv6: + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + # allow 0.03% errors, bad router + error_tolerance : 0.03 + +test_rx_check_http_negative: + multiplier : 13000 + cores : 1 + rx_sample_rate : 16 + stat_route_dict : + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + nat_dict : + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 diff --git a/scripts/automation/regression/setups/trex12/config.yaml b/scripts/automation/regression/setups/trex12/config.yaml new file mode 100644 index 00000000..af17db45 --- /dev/null +++ b/scripts/automation/regression/setups/trex12/config.yaml @@ -0,0 +1,68 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-12 +# version_path : /auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.57/ #/auto/srg-sce-swinfra-usr/emb/users/danklei/Work/asr1k/emb/private/bpsim/main/scripts + cores : 1 + modes : [VM] + +router: + model : ASR1001x + hostname : csi-asr-01 + ip_address : 10.56.216.103 + image : asr1001x-universalk9_npe.BLD_V155_2_S_XE315_THROTTLE_LATEST_20151121_110441-std_2.SSA.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0 + clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : Te0/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : Te0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.128.23 + root_dir : /auto/avc-devtest/ + images_path : /images/RP2/ diff --git a/scripts/automation/regression/sshpass.exp b/scripts/automation/regression/sshpass.exp new file mode 100755 index 00000000..2262290f --- /dev/null +++ b/scripts/automation/regression/sshpass.exp @@ -0,0 +1,17 @@ +#!/usr/cisco/bin/expect -f +# sample command: ./ssh.exp password 192.168.1.11 id * +set pass [lrange $argv 0 0] +set server [lrange $argv 1 1] +set name [lrange $argv 2 2] +set cmd [lrange $argv 3 10] + +set cmd_str [join $cmd] + +spawn ssh -t $name@$server $cmd_str +match_max 100000 +expect "*?assword:*" +send -- "$pass\r" +send -- "\r" +expect eof +wait +#interact diff --git a/scripts/automation/regression/stateless_example.py b/scripts/automation/regression/stateless_example.py new file mode 100755 index 00000000..93fb2703 --- /dev/null +++ b/scripts/automation/regression/stateless_example.py @@ -0,0 +1,47 @@ +#!/router/bin/python + +import outer_packages +from client.trex_hltapi import CTRexHltApi +import traceback +import sys, time + +def fail(reason): + print 'Encountered error:\n%s' % reason + sys.exit(1) + +if __name__ == "__main__": + port_list = [0, 1] + #port_list = 1 + try: + print 'init' + hlt_client = CTRexHltApi() + + print 'connecting' + con = hlt_client.connect("localhost", port_list, "danklei", sync_port = 4501, async_port = 4500, break_locks=True, reset=True)#, port=6666) + print 'connected?', hlt_client.connected + if not hlt_client.trex_client or not hlt_client.connected: + fail(con['log']) + print 'connect result:', con + + res = hlt_client.traffic_config("create", 0)#, ip_src_addr="2000.2.2") + print 'traffic_config result:', res + + res = hlt_client.traffic_config("create", 1)#, ip_src_addr="2000.2.2") + print res + print 'got to running!' + #sys.exit(0) + res = hlt_client.traffic_control("run", 1, mul = {'type': 'raw', 'op': 'abs', 'value': 1}, duration = 15)#, ip_src_addr="2000.2.2") + print res + time.sleep(2) + res = hlt_client.traffic_control("stop", 1)#, ip_src_addr="2000.2.2") + print res + + + + except Exception as e: + raise + finally: + #pass + if hlt_client.trex_client: + res = hlt_client.cleanup_session(port_list) + print res diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py new file mode 100644 index 00000000..b9fd87ec --- /dev/null +++ b/scripts/automation/regression/trex.py @@ -0,0 +1,427 @@ +#!/router/bin/python + +import os +import sys +import subprocess +import misc_methods +import re +import signal +import time +from CProgressDisp import TimedProgressBar +import unit_tests.trex_general_test +from unit_tests.tests_exceptions import TRexInUseError +import datetime + +class CTRexRunner: + """This is an instance for generating a CTRexRunner""" + + def __init__ (self, config_dict, yaml): + self.trex_config = config_dict#misc_methods.load_config_file(config_file) + self.yaml = yaml + + + def get_config (self): + """ get_config() -> dict + + Returns the stored configuration of the T-Rex server of the CTRexRunner instance as a dictionary + """ + return self.trex_config + + def set_yaml_file (self, yaml_path): + """ update_yaml_file (self, yaml_path) -> None + + Defines the yaml file to be used by the T-Rex. + """ + self.yaml = yaml_path + + + def generate_run_cmd (self, multiplier, cores, duration, nc = True, export_path="/tmp/trex.txt", **kwargs): + """ generate_run_cmd(self, multiplier, duration, export_path) -> str + + Generates a custom running command for the kick-off of the T-Rex traffic generator. + Returns a command (string) to be issued on the trex server + + Parameters + ---------- + multiplier : float + Defines the T-Rex multiplier factor (platform dependant) + duration : int + Defines the duration of the test + export_path : str + a full system path to which the results of the trex-run will be logged. + + """ + fileName, fileExtension = os.path.splitext(self.yaml) + if self.yaml == None: + raise ValueError('T-Rex yaml file is not defined') + elif fileExtension != '.yaml': + raise TypeError('yaml path is not referencing a .yaml file') + + if 'results_file_path' in kwargs: + export_path = kwargs['results_file_path'] + + trex_cmd_str = './t-rex-64 -c %d -m %f -d %d -f %s ' + + if nc: + trex_cmd_str = trex_cmd_str + ' --nc ' + + trex_cmd = trex_cmd_str % (cores, + multiplier, + duration, + self.yaml) + # self.trex_config['trex_latency']) + + for key, value in kwargs.iteritems(): + tmp_key = key.replace('_','-') + dash = ' -' if (len(key)==1) else ' --' + if value == True: + trex_cmd += (dash + tmp_key) + else: + trex_cmd += (dash + '{k} {val}'.format( k = tmp_key, val = value )) + + print "\nT-REX COMMAND: ", trex_cmd + + cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'], + self.trex_config['trex_name'], + self.trex_config['trex_version_path'], + trex_cmd, + export_path) + + return cmd; + + def generate_fetch_cmd (self, result_file_full_path="/tmp/trex.txt"): + """ generate_fetch_cmd(self, result_file_full_path) -> str + + Generates a custom command for which will enable to fetch the resutls of the T-Rex run. + Returns a command (string) to be issued on the trex server. + + Example use: fetch_trex_results() - command that will fetch the content from the default log file- /tmp/trex.txt + fetch_trex_results("/tmp/trex_secondary_file.txt") - command that will fetch the content from a custom log file- /tmp/trex_secondary_file.txt + """ + #dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + script_running_dir = os.path.dirname(os.path.realpath(__file__)) # get the current script working directory so that the sshpass could be accessed. + cmd = script_running_dir + '/sshpass.exp %s %s root "cat %s"' % (self.trex_config['trex_password'], + self.trex_config['trex_name'], + result_file_full_path); + return cmd; + + + + def run (self, multiplier, cores, duration, **kwargs): + """ run(self, multiplier, duration, results_file_path) -> CTRexResults + + Running the T-Rex server based on the config file. + Returns a CTRexResults object containing the results of the run. + + Parameters + ---------- + multiplier : float + Defines the T-Rex multiplier factor (platform dependant) + duration : int + Defines the duration of the test + results_file_path : str + a full system path to which the results of the trex-run will be logged and fetched from. + + """ + tmp_path = None + # print kwargs + if 'export_path' in kwargs: + tmp_path = kwargs['export_path'] + del kwargs['export_path'] + cmd = self.generate_run_cmd(multiplier, cores, duration, tmp_path, **kwargs) + else: + cmd = self.generate_run_cmd(multiplier, cores, duration, **kwargs) + +# print 'T-REx complete command to be used:' +# print cmd + # print kwargs + + progress_thread = TimedProgressBar(duration) + progress_thread.start() + interrupted = False + try: + start_time = time.time() + start = datetime.datetime.now() + results = subprocess.call(cmd, shell = True, stdout = open(os.devnull, 'wb')) + end_time = time.time() + fin = datetime.datetime.now() + # print "Time difference : ", fin-start + runtime_deviation = abs(( (end_time - start_time)/ (duration+15) ) - 1) + print "runtime_deviation: %2.0f %%" % ( runtime_deviation*100.0) + if ( runtime_deviation > 0.6 ) : + # If the run stopped immediately - classify as Trex in use or reachability issue + interrupted = True + if ((end_time - start_time) < 2): + raise TRexInUseError ('T-Rex run failed since T-Rex is used by another process, or due to reachability issues') + else: + unit_tests.trex_general_test.CTRexScenario.trex_crashed = True + # results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'), + # shell=True, preexec_fn=os.setsid) + except KeyboardInterrupt: + print "\nT-Rex test interrupted by user during traffic generation!!" + results.killpg(results.pid, signal.SIGTERM) # Send the kill signal to all the process groups + interrupted = True + raise RuntimeError + finally: + progress_thread.join(isPlannedStop = (not interrupted) ) + + if results!=0: + sys.stderr.write("T-Rex run failed. Please Contact trex-dev mailer for further details") + sys.stderr.flush() + return None + elif interrupted: + sys.stderr.write("T-Rex run failed due user-interruption.") + sys.stderr.flush() + return None + else: + + if tmp_path: + cmd = self.generate_fetch_cmd( tmp_path )#**kwargs)#results_file_path) + else: + cmd = self.generate_fetch_cmd() + + try: + run_log = subprocess.check_output(cmd, shell = True) + trex_result = CTRexResult(None, run_log) + trex_result.load_file_lines() + trex_result.parse() + + return trex_result + + except subprocess.CalledProcessError: + sys.stderr.write("TRex result fetching failed. Please Contact trex-dev mailer for further details") + sys.stderr.flush() + return None + +class CTRexResult(): + """This is an instance for generating a CTRexResult""" + def __init__ (self, file, buffer = None): + self.file = file + self.buffer = buffer + self.result = {} + + + def load_file_lines (self): + """ load_file_lines(self) -> None + + Loads into the self.lines the content of self.file + """ + if self.buffer: + self.lines = self.buffer.split("\n") + else: + f = open(self.file,'r') + self.lines = f.readlines() + f.close() + + + def dump (self): + """ dump(self) -> None + + Prints nicely the content of self.result dictionary into the screen + """ + for key, value in self.result.items(): + print "{0:20} : \t{1}".format(key, float(value)) + + def update (self, key, val, _str): + """ update (self, key, val, _str) -> None + + Updates the self.result[key] with a possibly new value representation of val + Example: 15K might be updated into 15000.0 + + Parameters + ---------- + key : + Key of the self.result dictionary of the TRexResult instance + val : float + Key of the self.result dictionary of the TRexResult instance + _str : str + a represntation of the BW (. + + """ + + s = _str.strip() + + if s[0]=="G": + val = val*1E9 + elif s[0]=="M": + val = val*1E6 + elif s[0]=="K": + val = val*1E3 + + if self.result.has_key(key): + if self.result[key] > 0: + if (val/self.result[key] > 0.97 ): + self.result[key]= val + else: + self.result[key] = val + else: + self.result[key] = val + + + + def parse (self): + """ parse(self) -> None + + Parse the content of the result file from the TRex test and upload the data into + """ + stop_read = False + d = { + 'total-tx' : 0, + 'total-rx' : 0, + 'total-pps' : 0, + 'total-cps' : 0, + + 'expected-pps' : 0, + 'expected-cps' : 0, + 'expected-bps' : 0, + 'active-flows' : 0, + 'open-flows' : 0 + } + + self.error = "" + + # Parse the output of the test, line by line (each line matches another RegEx and as such + # different rules apply + for line in self.lines: + match = re.match(".*/var/run/.rte_config.*", line) + if match: + stop_read = True + continue + + #Total-Tx : 462.42 Mbps Nat_time_out : 0 ==> we try to parse the next decimal in this case Nat_time_out +# match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)\W*\w+\W+(\w+)\W*([:]|[=])\W*(\d+)(.*)", line); +# if match: +# key = misc_methods.mix_string(match.group(5)) +# val = float(match.group(7)) +# # continue to parse !! we try the second +# self.result[key] = val #update latest + + # check if we need to stop reading + match = re.match(".*latency daemon has stopped.*", line) + if match: + stop_read = True + continue + + match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*ps)\s+(\w+)\W*([:]|[=])\W*(\d+)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + val = float(match.group(4)) + if d.has_key(key): + if stop_read == False: + self.update (key, val, match.group(5)) + else: + self.result[key] = val # update latest + key2 = misc_methods.mix_string(match.group(6)) + val2 = int(match.group(8)) + self.result[key2] = val2 # always take latest + + + match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + val = float(match.group(4)) + if d.has_key(key): + if stop_read == False: + self.update (key, val, match.group(5)) + else: + self.result[key] = val # update latest + continue + + match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+)(.*)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + val = float(match.group(4)) + self.result[key] = val #update latest + continue + + match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(OK)(.*)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + val = 0 # valid + self.result[key] = val #update latest + continue + + match = re.match("\W*(Cpu Utilization)\W*([:]|[=])\W*(\d+[.]\d+) %(.*)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + val = float(match.group(3)) + if self.result.has_key(key): + if (self.result[key] < val): # update only if larger than previous value + self.result[key] = val + else: + self.result[key] = val + continue + + match = re.match(".*(rx_check\s.*)\s+:\s+(\w+)", line) + if match: + key = misc_methods.mix_string(match.group(1)) + try: + val = int(match.group(2)) + except ValueError: # corresponds with rx_check validation case + val = match.group(2) + finally: + self.result[key] = val + continue + + + def get_status (self, drop_expected = False): + if (self.error != ""): + print self.error + return (self.STATUS_ERR_FATAL) + + d = self.result + + # test for latency + latency_limit = 5000 + if ( d['maximum-latency'] > latency_limit ): + self.reason="Abnormal latency measured (higher than %s" % latency_limit + return self.STATUS_ERR_LATENCY + + # test for drops + if drop_expected == False: + if ( d['total-pkt-drop'] > 0 ): + self.reason=" At least one packet dropped " + return self.STATUS_ERR_DROP + + # test for rx/tx distance + rcv_vs_tx = d['total-tx']/d['total-rx'] + if ( (rcv_vs_tx >1.2) or (rcv_vs_tx <0.9) ): + self.reason="rx and tx should be close" + return self.STATUS_ERR_RX_TX_DISTANCE + + # expected measurement + expect_vs_measued=d['total-tx']/d['expected-bps'] + if ( (expect_vs_measued >1.1) or (expect_vs_measued < 0.9) ) : + print expect_vs_measued + print d['total-tx'] + print d['expected-bps'] + self.reason="measure is not as expected" + return self.STATUS_ERR_BAD_EXPECTED_MEASUREMENT + + if ( d['latency-any-error'] !=0 ): + self.reason=" latency-any-error has error" + return self.STATUS_ERR_LATENCY_ANY_ERROR + + return self.STATUS_OK + + # return types + STATUS_OK = 0 + STATUS_ERR_FATAL = 1 + STATUS_ERR_LATENCY = 2 + STATUS_ERR_DROP = 3 + STATUS_ERR_RX_TX_DISTANCE = 4 + STATUS_ERR_BAD_EXPECTED_MEASUREMENT = 5, + STATUS_ERR_LATENCY_ANY_ERROR = 6 + +def test_TRex_result_parser(): + t=CTRexResult('trex.txt'); + t.load_file_lines() + t.parse() + print t.result + + + + +if __name__ == "__main__": + #test_TRex_result_parser(); + pass diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py new file mode 100755 index 00000000..f38eb04e --- /dev/null +++ b/scripts/automation/regression/trex_unit_test.py @@ -0,0 +1,273 @@ +#!/router/bin/python + +__copyright__ = "Copyright 2014" + +""" +Name: + trex_unit_test.py + + +Description: + + This script creates the functionality to test the performance of the T-Rex traffic generator + The tested scenario is a T-Rex TG directly connected to a Cisco router. + +:: + + Topology: + + ------- -------- + | | Tx---1gig/10gig----Rx | | + | T-Rex | | router | + | | Rx---1gig/10gig----Tx | | + ------- -------- + +""" + +import os +import sys +import outer_packages +import nose +from nose.plugins import Plugin +import logging +import CustomLogger +import misc_methods +from rednose import RedNose +import termstyle +from unit_tests.trex_general_test import CTRexScenario +from client.trex_client import * +from common.trex_exceptions import * +import trex +import socket +from pprint import pprint +import subprocess +import re + +def check_trex_path(trex_path): + if os.path.isfile('%s/trex_daemon_server' % trex_path): + return os.path.abspath(trex_path) + +def check_setup_path(setup_path): + if os.path.isfile('%s/config.yaml' % setup_path): + return os.path.abspath(setup_path) + + +def get_trex_path(): + latest_build_path = check_trex_path(os.getenv('TREX_UNDER_TEST')) # TREX_UNDER_TEST is env var pointing to <trex-core>/scripts + if not latest_build_path: + latest_build_path = check_trex_path(os.path.join(os.pardir, os.pardir)) + if not latest_build_path: + raise Exception('Could not determine trex_under_test folder, try setting env.var. TREX_UNDER_TEST') + return latest_build_path + +DAEMON_STOP_COMMAND = 'cd %s; ./trex_daemon_server stop; sleep 1; ./trex_daemon_server stop;' % get_trex_path() +DAEMON_START_COMMAND = DAEMON_STOP_COMMAND + 'sleep 1; rm /var/log/trex/trex_daemon_server.log; ./trex_daemon_server start; sleep 2; ./trex_daemon_server show' + +def _start_stop_trex_remote_server(trex_data, command): + # start t-rex server as daemon process + # subprocess.call(["/usr/bin/python", "trex_daemon_server", "restart"], cwd = trex_latest_build) + misc_methods.run_remote_command(trex_data['trex_name'], + trex_data['trex_password'], + command) + +def start_trex_remote_server(trex_data, kill_running = False): + if kill_running: + (return_code, stdout, stderr) = misc_methods.run_remote_command(trex_data['trex_name'], + trex_data['trex_password'], + 'ps -u root --format comm,pid,cmd | grep t-rex-64') + if stdout: + for process in stdout.split('\n'): + try: + proc_name, pid, full_cmd = re.split('\s+', process, maxsplit=2) + if proc_name.find('t-rex-64') >= 0: + print 'Killing remote process: %s' % full_cmd + misc_methods.run_remote_command(trex_data['trex_name'], + trex_data['trex_password'], + 'kill %s' % pid) + except: + continue + + _start_stop_trex_remote_server(trex_data, DAEMON_START_COMMAND) + +def stop_trex_remote_server(trex_data): + _start_stop_trex_remote_server(trex_data, DAEMON_STOP_COMMAND) + +class CTRexTestConfiguringPlugin(Plugin): + def options(self, parser, env = os.environ): + super(CTRexTestConfiguringPlugin, self).options(parser, env) + parser.add_option('--cfg', '--trex-scenario-config', action='store', + dest='config_path', + help='Specify path to folder with config.yaml and benchmark.yaml') + parser.add_option('--skip-clean', '--skip_clean', action='store_true', + dest='skip_clean_config', + help='Skip the clean configuration replace on the platform.') + parser.add_option('--load-image', '--load_image', action='store_true', default = False, + dest='load_image', + help='Install image specified in config file on router.') + parser.add_option('--log-path', '--log_path', action='store', + dest='log_path', + help='Specify path for the tests` log to be saved at. Once applied, logs capturing by nose will be disabled.') # Default is CURRENT/WORKING/PATH/trex_log/trex_log.log') + parser.add_option('--verbose-mode', '--verbose_mode', action="store_true", default = False, + dest="verbose_mode", + help="Print RPC command and router commands.") + parser.add_option('--server-logs', '--server_logs', action="store_true", default = False, + dest="server_logs", + help="Print server side (TRex and trex_daemon) logs per test.") + parser.add_option('--kill-running', '--kill_running', action="store_true", default = False, + dest="kill_running", + help="Kills running TRex process on remote server (useful for regression).") + parser.add_option('--dave', action="store_true", default = False, + dest="dave", + help="Dave's setup (temporary workaround flag, remove it ASAP).") + + def configure(self, options, conf): + if CTRexScenario.setup_dir and options.config_path: + raise Exception('Please either define --cfg or use env. variable SETUP_DIR, not both.') + if not options.config_path and CTRexScenario.setup_dir: + options.config_path = CTRexScenario.setup_dir + if options.config_path: + self.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml')) + self.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml')) + self.enabled = True + else: + raise Exception('Please specify path to config.yaml using --cfg parameter or env. variable SETUP_DIR') + self.modes = self.configuration.trex.get('modes', []) + self.kill_running = options.kill_running + self.load_image = options.load_image + self.verbose_mode = options.verbose_mode + self.clean_config = False if options.skip_clean_config else True + self.server_logs = options.server_logs + self.dave = options.dave + + if options.log_path: + self.loggerPath = options.log_path + + def begin (self): + # launch t-rex server on relevant setup + if not self.dave: + start_trex_remote_server(self.configuration.trex, self.kill_running) + + # initialize CTRexScenario global testing class, to be used by all tests + CTRexScenario.configuration = self.configuration + CTRexScenario.benchmark = self.benchmark + CTRexScenario.modes = set(self.modes) + CTRexScenario.server_logs = self.server_logs + CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'], verbose = self.verbose_mode) + if 'loopback' not in self.modes: + CTRexScenario.router_cfg = dict( config_dict = self.configuration.router, + forceImageReload = self.load_image, + silent_mode = not self.verbose_mode, + forceCleanConfig = self.clean_config, + tftp_config_dict = self.configuration.tftp ) + try: + CustomLogger.setup_custom_logger('TRexLogger', self.loggerPath) + except AttributeError: + CustomLogger.setup_custom_logger('TRexLogger') + + def finalize(self, result): + CTRexScenario.is_init = False + if not self.dave: + stop_trex_remote_server(self.configuration.trex) + + +def save_setup_info(): + try: + if CTRexScenario.setup_name and CTRexScenario.trex_version: + setup_info = '' + for key, value in CTRexScenario.trex_version.items(): + setup_info += '{0:8}: {1}\n'.format(key, value) + cfg = CTRexScenario.configuration + setup_info += 'Server: %s, Modes: %s' % (cfg.trex.get('trex_name'), cfg.trex.get('modes')) + if cfg.router: + setup_info += '\nRouter: Model: %s, Image: %s' % (cfg.router.get('model'), CTRexScenario.router_image) + with open('%s/report_%s.info' % (CTRexScenario.report_dir, CTRexScenario.setup_name), 'w') as f: + f.write(setup_info) + except Exception as err: + print 'Error saving setup info: %s ' % err + + +def set_report_dir (report_dir): + if not os.path.exists(report_dir): + os.mkdir(report_dir) + + +if __name__ == "__main__": + + # setting defaults. By default we run all the test suite + specific_tests = False + disableLogCapture = False + long_test = False + xml_name = 'unit_test.xml' + CTRexScenario.report_dir = 'reports' + setup_dir = os.getenv('SETUP_DIR', '').rstrip('/') + CTRexScenario.setup_dir = check_setup_path(setup_dir) + if not CTRexScenario.setup_dir: + CTRexScenario.setup_dir = check_setup_path(os.path.join('setups', setup_dir)) + + if CTRexScenario.setup_dir: + CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir) + xml_name = 'report_%s.xml' % CTRexScenario.setup_name + + nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors', '--with-xunit', '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name)] + + for arg in sys.argv: + if 'unit_tests/' in arg: + specific_tests = True + if 'log-path' in arg: + disableLogCapture = True + if arg == '--collect-only': # this is a user trying simply to view the available tests. removing xunit param from nose args + nose_argv[-3:-1] = [] + CTRexScenario.is_test_list = True + else: + set_report_dir(CTRexScenario.report_dir) + + # Run all of the unit tests or just the selected ones + if not specific_tests: + nose_argv += ['unit_tests'] + if disableLogCapture: + nose_argv += ['--nologcapture'] + + try: + config_plugin = CTRexTestConfiguringPlugin() + red_nose = RedNose() + try: + result = nose.run(argv = nose_argv, addplugins = [red_nose, config_plugin]) + except socket.error: # handle consecutive tests exception, try once again + print "TRex connectivity error identified. Possibly due to consecutive nightly runs.\nRetrying..." + result = nose.run(argv = nose_argv, addplugins = [red_nose, config_plugin]) + finally: + save_setup_info() + + if (result == True and not CTRexScenario.is_test_list): + print termstyle.green(""" + ..::''''::.. + .;'' ``;. + :: :: :: :: + :: :: :: :: + :: :: :: :: + :: .:' :: :: `:. :: + :: : : :: + :: `:. .:' :: + `;..``::::''..;' + ``::,,,,::'' + + ___ ___ __________ + / _ \/ _ | / __/ __/ / + / ___/ __ |_\ \_\ \/_/ + /_/ /_/ |_/___/___(_) + + """) + sys.exit(0) + else: + sys.exit(-1) + + finally: + pass + + + + + + + + diff --git a/scripts/automation/regression/unit_tests/__init__.py b/scripts/automation/regression/unit_tests/__init__.py new file mode 100755 index 00000000..8b137891 --- /dev/null +++ b/scripts/automation/regression/unit_tests/__init__.py @@ -0,0 +1 @@ + diff --git a/scripts/automation/regression/unit_tests/functional_tests/config.yaml b/scripts/automation/regression/unit_tests/functional_tests/config.yaml new file mode 100644 index 00000000..4f4c7c40 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/config.yaml @@ -0,0 +1,74 @@ +################################################################ +#### T-Rex nightly test configuration file #### +################################################################ + + +### T-Rex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the t-rex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : hostname + password : root password + version_path : not used + cores : 1 + +router: + model : device model + hostname : device hostname + ip_address : device ip + image : device image name + line_password : telnet pass + en_password : enable pass + mgmt_interface : GigabitEthernet0/0/0 + clean_config : path to clean_config file + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : GigabitEthernet0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.1000.0000 + server : + name : GigabitEthernet0/0/2 + src_mac_addr : 0000.0002.0000 + dest_mac_addr : 0000.2000.0000 + vrf_name : null + - client : + name : GigabitEthernet0/0/3 + src_mac_addr : 0000.0003.0000 + dest_mac_addr : 0000.3000.0000 + server : + name : GigabitEthernet0/0/4 + src_mac_addr : 0000.0004.0000 + dest_mac_addr : 0000.4000.0000 + vrf_name : dup + + +tftp: + hostname : tftp hostname + ip_address : tftp ip + root_dir : tftp root dir + images_path : path related to root dir diff --git a/scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py b/scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py new file mode 100755 index 00000000..525b58d2 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/functional_general_test.py @@ -0,0 +1,22 @@ +#!/router/bin/python + +from nose.tools import assert_equal +from nose.tools import assert_not_equal +from nose.tools import assert_raises +from nose.tools import raises + + +class CGeneralFunctional_Test(object): + def __init__(self): + pass + + + def setUp(self): + pass + + + def tearDown(self): + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py b/scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py new file mode 100755 index 00000000..096f86d8 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/misc_methods_test.py @@ -0,0 +1,61 @@ +#!/router/bin/python + +import functional_general_test +import misc_methods +from nose.tools import assert_equal +from nose.tools import assert_not_equal +from nose.tools import assert_raises +from nose.tools import raises + + +class MiscMethods_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.ipv4_gen = misc_methods.get_network_addr() + self.ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6') + pass + + def test_ipv4_gen(self): + for i in range(1, 255): + assert_equal( next(self.ipv4_gen), [".".join( map(str, [1, 1, i, 0])), '255.255.255.0'] ) + + def test_ipv6_gen(self): + tmp_ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0] + for i in range(0, 255): + tmp_ipv6_addr[2] = hex(i)[2:] + assert_equal( next(self.ipv6_gen), ":".join( map(str, tmp_ipv6_addr)) ) + + def test_get_ipv4_client_addr(self): + tmp_ipv4_addr = next(self.ipv4_gen)[0] + assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv4_addr), '1.1.1.1') + assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv4_addr, {'3' : 255} ) + + def test_get_ipv6_client_addr(self): + tmp_ipv6_addr = next(self.ipv6_gen) + assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 1}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:1') + assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 2}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:2') + assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv6_addr, {'7' : 70000} ) + + + @raises(ValueError) + def test_ipv4_client_addr_exception(self): + tmp_ipv4_addr = next(self.ipv4_gen)[0] + misc_methods.get_single_net_client_addr(tmp_ipv4_addr, {'4' : 1}) + + @raises(ValueError) + def test_ipv6_client_addr_exception(self): + tmp_ipv6_addr = next(self.ipv6_gen) + misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'8' : 1}, ip_type = 'ipv6') + + @raises(StopIteration) + def test_gen_ipv4_to_limit (self): + while(True): + next(self.ipv4_gen) + + @raises(StopIteration) + def test_gen_ipv6_to_limit (self): + while(True): + next(self.ipv6_gen) + + def tearDown(self): + pass diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py new file mode 100755 index 00000000..24ccf7a5 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_cache_test.py @@ -0,0 +1,60 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.cache = CCommandCache() + self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1') + self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2') + self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa") + self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa") + self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count") + + def test_add(self): + assert_equal(self.cache.cache['IF'], + {'GigabitEthernet0/0/1' : ['ip nbar protocol-discovery'], + 'GigabitEthernet0/0/2' : ['ip nbar protocol-discovery'] + }) + assert_equal(self.cache.cache['CONF'], + ["arp 1.1.1.1 0000.0001.0000 arpa", + "arp 1.1.2.1 0000.0002.0000 arpa"] + ) + assert_equal(self.cache.cache['EXEC'], + ["show ip nbar protocol-discovery stats packet-count"]) + + def test_dump_config (self): + import sys + from StringIO import StringIO + saved_stdout = sys.stdout + try: + out = StringIO() + sys.stdout = out + self.cache.dump_config() + output = out.getvalue().strip() + assert_equal(output, + "configure terminal\ninterface GigabitEthernet0/0/1\nip nbar protocol-discovery\ninterface GigabitEthernet0/0/2\nip nbar protocol-discovery\nexit\narp 1.1.1.1 0000.0001.0000 arpa\narp 1.1.2.1 0000.0002.0000 arpa\nexit\nshow ip nbar protocol-discovery stats packet-count" + ) + finally: + sys.stdout = saved_stdout + + def test_get_config_list (self): + assert_equal(self.cache.get_config_list(), + ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"] + ) + + def test_clear_cache (self): + self.cache.clear_cache() + assert_equal(self.cache.cache, + {"IF" : {}, + "CONF" : [], + "EXEC" : []} + ) + + def tearDown(self): + self.cache.clear_cache() diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py new file mode 100755 index 00000000..7a31815b --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_cmd_link_test.py @@ -0,0 +1,62 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CCommandLink_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.cache = CCommandCache() + self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1') + self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2') + self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa") + self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa") + self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count") + self.com_link = CCommandLink() + + def test_transmit(self): + # test here future implemntatin of platform physical link + pass + + def test_run_cached_command (self): + self.com_link.run_command([self.cache]) + + assert_equal (self.com_link.get_history(), + ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"] + ) + + self.com_link.clear_history() + self.com_link.run_single_command(self.cache) + assert_equal (self.com_link.get_history(), + ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"] + ) + + def test_run_single_command(self): + self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count") + assert_equal (self.com_link.get_history(), + ["show ip nbar protocol-discovery stats packet-count"] + ) + + def test_run_mixed_commands (self): + self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count") + self.com_link.run_command([self.cache]) + self.com_link.run_command(["show ip interface brief"]) + + assert_equal (self.com_link.get_history(), + ["show ip nbar protocol-discovery stats packet-count", + "configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count", + "show ip interface brief"] + ) + + def test_clear_history (self): + self.com_link.run_command(["show ip interface brief"]) + self.com_link.clear_history() + assert_equal (self.com_link.get_history(), []) + + def tearDown(self): + self.cache.clear_cache() + + diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py new file mode 100755 index 00000000..890d0cb9 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_device_cfg_test.py @@ -0,0 +1,20 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CDeviceCfg_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.dev_cfg = CDeviceCfg('./unit_tests/functional_tests/config.yaml') + + def test_get_interfaces_cfg(self): + assert_equal (self.dev_cfg.get_interfaces_cfg(), + [{'client': {'src_mac_addr': '0000.0001.0000', 'name': 'GigabitEthernet0/0/1', 'dest_mac_addr': '0000.1000.0000'}, 'vrf_name': None, 'server': {'src_mac_addr': '0000.0002.0000', 'name': 'GigabitEthernet0/0/2', 'dest_mac_addr': '0000.2000.0000'}}, {'client': {'src_mac_addr': '0000.0003.0000', 'name': 'GigabitEthernet0/0/3', 'dest_mac_addr': '0000.3000.0000'}, 'vrf_name': 'dup', 'server': {'src_mac_addr': '0000.0004.0000', 'name': 'GigabitEthernet0/0/4', 'dest_mac_addr': '0000.4000.0000'}}] + ) + + def tearDown(self): + pass diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py new file mode 100755 index 00000000..ff54b9ee --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_dual_if_obj_test.py @@ -0,0 +1,31 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CDualIfObj_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client) + self.if_2 = CIfObj('gig0/0/2', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', IFType.Server) + self.if_3 = CIfObj('gig0/0/3', '1.1.3.1', '2001:DB8:2:2222:0:0:0:1', '0000.0003.0000', '0000.0003.0000', IFType.Client) + self.if_4 = CIfObj('gig0/0/4', '1.1.4.1', '2001:DB8:3:2222:0:0:0:1', '0000.0004.0000', '0000.0004.0000', IFType.Server) + self.dual_1 = CDualIfObj(None, self.if_1, self.if_2) + self.dual_2 = CDualIfObj('dup', self.if_3, self.if_4) + + def test_id_allocation(self): + assert (self.dual_1.get_id() < self.dual_2.get_id() < CDualIfObj._obj_id) + + def test_get_vrf_name (self): + assert_equal ( self.dual_1.get_vrf_name() , None ) + assert_equal ( self.dual_2.get_vrf_name() , 'dup' ) + + def test_is_duplicated (self): + assert_equal ( self.dual_1.is_duplicated() , False ) + assert_equal ( self.dual_2.is_duplicated() , True ) + + def tearDown(self): + pass
\ No newline at end of file diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py new file mode 100755 index 00000000..7ba6e66e --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_if_manager_test.py @@ -0,0 +1,40 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CIfManager_Test(functional_general_test.CGeneralFunctional_Test): + + def setUp(self): + self.dev_cfg = CDeviceCfg('./unit_tests/functional_tests/config.yaml') + self.if_mng = CIfManager() + + # main testing method to check the entire class + def test_load_config (self): + self.if_mng.load_config(self.dev_cfg) + + # check the number of items in each qeury + assert_equal( len(self.if_mng.get_if_list()), 4 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client)), 2 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = True)), 1 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = False)), 1 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server)), 2 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = True)), 1 ) + assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = False)), 1 ) + assert_equal( len(self.if_mng.get_duplicated_if()), 2 ) + assert_equal( len(self.if_mng.get_dual_if_list()), 2 ) + + # check the classification with intf name + assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list() ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] ) + assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] ) + assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] ) + assert_equal( map(CIfObj.get_name, self.if_mng.get_duplicated_if() ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] ) + + # check the classification with vrf name + assert_equal( map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list() ), [None, 'dup'] ) + + def tearDown(self): + pass diff --git a/scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py b/scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py new file mode 100755 index 00000000..534d4170 --- /dev/null +++ b/scripts/automation/regression/unit_tests/functional_tests/platform_if_obj_test.py @@ -0,0 +1,49 @@ +#!/router/bin/python + +from platform_cmd_link import * +import functional_general_test +from nose.tools import assert_equal +from nose.tools import assert_not_equal + + +class CIfObj_Test(functional_general_test.CGeneralFunctional_Test): + test_idx = 1 + + def setUp(self): + self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client) + self.if_2 = CIfObj('TenGig0/0/0', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', IFType.Server) + CIfObj_Test.test_idx += 1 + + def test_id_allocation(self): + assert (self.if_1.get_id() < self.if_2.get_id() < CIfObj._obj_id) + + def test_isClient(self): + assert_equal (self.if_1.is_client(), True) + + def test_isServer(self): + assert_equal (self.if_2.is_server(), True) + + def test_get_name (self): + assert_equal (self.if_1.get_name(), 'gig0/0/1') + assert_equal (self.if_2.get_name(), 'TenGig0/0/0') + + def test_get_src_mac_addr (self): + assert_equal (self.if_1.get_src_mac_addr(), '0000.0001.0000') + + def test_get_dest_mac (self): + assert_equal (self.if_2.get_dest_mac(), '0000.0002.0000') + + def test_get_ipv4_addr (self): + assert_equal (self.if_1.get_ipv4_addr(), '1.1.1.1' ) + assert_equal (self.if_2.get_ipv4_addr(), '1.1.2.1' ) + + def test_get_ipv6_addr (self): + assert_equal (self.if_1.get_ipv6_addr(), '2001:DB8:0:2222:0:0:0:1' ) + assert_equal (self.if_2.get_ipv6_addr(), '2001:DB8:1:2222:0:0:0:1' ) + + def test_get_type (self): + assert_equal (self.if_1.get_if_type(), IFType.Client) + assert_equal (self.if_2.get_if_type(), IFType.Server) + + def tearDown(self): + pass diff --git a/scripts/automation/regression/unit_tests/tests_exceptions.py b/scripts/automation/regression/unit_tests/tests_exceptions.py new file mode 100755 index 00000000..604efcc8 --- /dev/null +++ b/scripts/automation/regression/unit_tests/tests_exceptions.py @@ -0,0 +1,37 @@ +#!/router/bin/python + +class TRexInUseError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + +class TRexRunFailedError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + +class TRexIncompleteRunError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + +class TRexLowCpuUtilError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + +class AbnormalResultError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) + +class ClassificationMissmatchError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return repr(self.value) diff --git a/scripts/automation/regression/unit_tests/trex_general_test.py b/scripts/automation/regression/unit_tests/trex_general_test.py new file mode 100755 index 00000000..6a6ad79c --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_general_test.py @@ -0,0 +1,328 @@ +#!/router/bin/python + +__copyright__ = "Copyright 2014" + +""" +Name: + trex_general_test.py + + +Description: + + This script creates the functionality to test the performance of the T-Rex traffic generator + The tested scenario is a T-Rex TG directly connected to a Cisco router. + +:: + + Topology: + + ------- -------- + | | Tx---1gig/10gig----Rx | | + | T-Rex | | router | + | | Rx---1gig/10gig----Tx | | + ------- -------- + +""" +from nose.plugins import Plugin +from nose.plugins.skip import SkipTest +import trex +import misc_methods +import sys +import os +# from CPlatformUnderTest import * +from CPlatform import * +import termstyle +import threading +from tests_exceptions import * +from platform_cmd_link import * +import unittest + + +class CTRexScenario(): + modes = set() # list of modes of this setup: loopback, virtual etc. + server_logs = False + is_test_list = False + is_init = False + trex_crashed = False + configuration = None + trex = None + router = None + router_cfg = None + daemon_log_lines = 0 + setup_name = None + setup_dir = None + router_image = None + trex_version = None + report_dir = 'reports' + # logger = None + +#scenario = CTRexScenario() + +def setUpModule(module): +# print ("") # this is to get a newline after the dots +# print ("setup_module before anything in this file") +# # ff = CTRexScenario() +# scenario.configuration = misc_methods.load_complete_config_file('config/config.yaml') +# scenario.trex = trex.CTRexRunner(scenario.configuration[0], None) +# scenario.router = CPlatform(scenario.configuration[1], False, scenario.configuration[2]) +# scenario.router.platform.preCheck() +# print "Done instantiating trex scenario!" + pass + +def tearDownModule(module): +# print ("") # this is to get a newline after the dots +# scenario.router.platform.postCheck() +# print ("teardown_module after anything in this file") + pass + + + +class CTRexGeneral_Test(unittest.TestCase): + """This class defines the general testcase of the T-Rex traffic generator""" + def __init__ (self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + # Point test object to scenario global object + self.configuration = CTRexScenario.configuration + self.benchmark = CTRexScenario.benchmark + self.trex = CTRexScenario.trex + self.trex_crashed = CTRexScenario.trex_crashed + self.modes = CTRexScenario.modes + self.skipping = False + self.fail_reasons = [] + if not hasattr(self, 'unsupported_modes'): + self.unsupported_modes = [] + self.is_loopback = True if 'loopback' in self.modes else False + self.is_virt_nics = True if 'virt_nics' in self.modes else False + self.is_VM = True if 'VM' in self.modes else False + + if not CTRexScenario.is_init: + CTRexScenario.trex_version = self.trex.get_trex_version() + if not self.is_loopback: + # initilize the scenario based on received configuration, once per entire testing session + CTRexScenario.router = CPlatform(CTRexScenario.router_cfg['silent_mode']) + device_cfg = CDeviceCfg() + device_cfg.set_platform_config(CTRexScenario.router_cfg['config_dict']) + device_cfg.set_tftp_config(CTRexScenario.router_cfg['tftp_config_dict']) + CTRexScenario.router.load_platform_data_from_file(device_cfg) + CTRexScenario.router.launch_connection(device_cfg) + running_image = CTRexScenario.router.get_running_image_details()['image'] + print 'Current router image: %s' % running_image + if CTRexScenario.router_cfg['forceImageReload']: + needed_image = device_cfg.get_image_name() + if not CTRexScenario.router.is_image_matches(needed_image): + print 'Setting router image: %s' % needed_image + CTRexScenario.router.config_tftp_server(device_cfg) + CTRexScenario.router.load_platform_image(needed_image) + CTRexScenario.router.set_boot_image(needed_image) + CTRexScenario.router.reload_platform(device_cfg) + CTRexScenario.router.launch_connection(device_cfg) + running_image = CTRexScenario.router.get_running_image_details()['image'] # verify image + if not CTRexScenario.router.is_image_matches(needed_image): + self.fail('Unable to set router image: %s, current image is: %s' % (needed_image, running_image)) + else: + print 'Matches needed image: %s' % needed_image + CTRexScenario.router_image = running_image + + + if self.modes: + print termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes)) + + CTRexScenario.is_init = True + print termstyle.green("Done instantiating T-Rex scenario!\n") + +# raise RuntimeError('CTRexScenario class is not initialized!') + self.router = CTRexScenario.router + + + +# def assert_dict_eq (self, dict, key, val, error=''): +# v1 = int(dict[key])) +# self.assertEqual(v1, int(val), error) +# +# def assert_dict_gt (self, d, key, val, error=''): +# v1 = int(dict[key]) +# self.assert_gt(v1, int(val), error) + + def assertEqual(self, v1, v2, s): + if v1 != v2: + error='ERROR '+str(v1)+' != '+str(v2)+ ' '+s; + self.fail(error) + + def assert_gt(self, v1, v2, s): + if not v1 > v2: + error='ERROR {big} > {small} {str}'.format(big = v1, small = v2, str = s) + self.fail(error) + + def check_results_eq (self,res,name,val): + if res is None: + self.fail('TRex results cannot be None !') + return + + if name not in res: + self.fail('TRex results does not include key %s' % name) + return + + if res[name] != float(val): + self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val)) + + def check_CPU_benchmark (self, trex_res, err): + #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util")) + cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last + + if cpu_util < 30 and not self.is_virt_nics: + self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util ) + + cores = self.get_benchmark_param('cores') + trex_tx_bps = trex_res.get_last_value("trex-global.data.m_total_tx_bytes") + test_norm_cpu = 100.0*(trex_tx_bps/(cores*cpu_util))/1e6 + + print "TRex CPU utilization: %g%%, norm_cpu is : %d Mb/core" % (round(cpu_util), int(test_norm_cpu)) + + #expected_norm_cpu = self.get_benchmark_param('cpu_to_core_ratio') + + #calc_error_precent = abs(100.0*(test_norm_cpu/expected_norm_cpu)-100.0) + +# if calc_error_precent > err: +# msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core more than %2.0f %% - ERROR' % (test_norm_cpu, expected_norm_cpu, err) +# raise AbnormalResultError(msg) +# else: +# msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core less than %2.0f %% - OK' % (test_norm_cpu, expected_norm_cpu, err) +# print msg + + + def check_results_gt (self, res, name, val): + if res is None: + self.fail('TRex results canot be None !') + return + + if name not in res: + self.fail('TRex results does not include key %s' % name) + return + + if res[name]< float(val): + self.fail('TRex results[%s]<%f and not as expected greater than %f ' % (name, res[name], val)) + + def check_for_trex_crash(self): + pass + + def get_benchmark_param (self, param, sub_param = None, test_name = None): + if not test_name: + test_name = self.get_name() + if test_name not in self.benchmark: + self.skip('No data in benchmark.yaml for test %s, skipping.' % test_name) + if sub_param: + return self.benchmark[test_name][param].get(sub_param) + else: + return self.benchmark[test_name].get(param) + + def check_general_scenario_results (self, trex_res, check_latency = True): + + try: + # check if test is valid + if not trex_res.is_done_warmup(): + self.fail('T-Rex did not reach warm-up situtaion. Results are not valid.') + + # check T-Rex number of drops + trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts") + trex_drops = trex_res.get_total_drops() + trex_drop_rate = trex_res.get_drop_rate() + if ( (trex_drops/trex_tx_pckt) > 0.001) and (trex_drop_rate > 0.0): # deliberately mask kickoff drops when T-Rex first initiated + self.fail('Number of packet drops larger than 0.1% of all traffic') + + # # check T-Rex expected counters + #trex_exp_rate = trex_res.get_expected_tx_rate().get('m_tx_expected_bps') + #assert trex_exp_rate is not None + #trex_exp_gbps = trex_exp_rate/(10**9) + + if check_latency: + # check that max latency does not exceed 1 msec in regular setup or 20ms in VM + allowed_latency = 20000 if self.is_VM else 1000 + if max(trex_res.get_max_latency().values()) > allowed_latency: + print 'LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency + #raise AbnormalResultError('Maximal latency above 1ms') + + # check that avg latency does not exceed 1 msec in regular setup or 3ms in VM + allowed_latency = 3000 if self.is_VM else 1000 + if max(trex_res.get_avg_latency().values()) > allowed_latency: + print 'LatencyError: Average latency exceeds %s (usec)' % allowed_latency + #raise AbnormalResultError('Maximal latency above 1ms') + + if not self.is_loopback: + # check router number of drops --> deliberately masked- need to be figured out!!!!! + pkt_drop_stats = self.router.get_drop_stats() +# assert pkt_drop_stats['total_drops'] < 20 + + # check for trex-router packet consistency + # TODO: check if it's ok + print 'router drop stats: %s' % pkt_drop_stats + print 'TRex drop stats: %s' % trex_drops + #self.assertEqual(pkt_drop_stats, trex_drops, "TRex's and router's drop stats don't match.") + + except KeyError as e: + self.fail(e) + #assert False + + # except AssertionError as e: + # e.args += ('T-Rex has crashed!') + # raise + + # We encountered error, don't fail the test immediately + def fail(self, reason = 'Unknown error'): + print 'Error: %s' % reason + self.fail_reasons.append(reason) + + # skip running of the test, counts as 'passed' but prints 'skipped' + def skip(self, message = ''): + self.skipping = True + raise SkipTest(message) + + # get name of currently running test + def get_name(self): + return self._testMethodName + + def setUp(self): + test_setup_modes_conflict = self.modes & set(self.unsupported_modes) + if test_setup_modes_conflict: + self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict) + if not self.trex.is_idle(): + print 'Warning: TRex is not idle at setUp, trying to stop it.' + self.trex.force_kill(confirm = False) + if not self.is_loopback: + print '' + self.router.load_clean_config() + self.router.clear_counters() + self.router.clear_packet_drop_stats() + + ######################################################################## + #### DO NOT ADD TESTS TO THIS FILE #### + #### Added tests here will held once for EVERY test sub-class #### + ######################################################################## + + # masked example to such test. uncomment to watch how it affects # +# def test_isInitialized(self): +# assert CTRexScenario.is_init == True + def tearDown(self): + if not self.trex.is_idle(): + print 'Warning: TRex is not idle at tearDown, trying to stop it.' + self.trex.force_kill(confirm = False) + if not self.skipping: + # print server logs of test run + if CTRexScenario.server_logs: + try: + print termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<') + daemon_log = self.trex.get_trex_daemon_log() + log_size = len(daemon_log) + print ''.join(daemon_log[CTRexScenario.daemon_log_lines:]) + CTRexScenario.daemon_log_lines = log_size + except Exception as e: + print "Can't get TRex daemon log:", e + try: + print termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<') + print ''.join(self.trex.get_trex_log()) + except Exception as e: + print "Can't get TRex log:", e + if len(self.fail_reasons): + raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons)) + + def check_for_trex_crash(self): + pass diff --git a/scripts/automation/regression/unit_tests/trex_imix_test.py b/scripts/automation/regression/unit_tests/trex_imix_test.py new file mode 100755 index 00000000..b56f7f4e --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_imix_test.py @@ -0,0 +1,176 @@ + +#!/router/bin/python +from trex_general_test import CTRexGeneral_Test +from CPlatform import CStaticRouteConfig +from tests_exceptions import * +#import sys +import time; + +class CTRexIMIX_Test(CTRexGeneral_Test): + """This class defines the IMIX testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + # super(CTRexIMIX_Test, self).__init__() + CTRexGeneral_Test.__init__(self, *args, **kwargs) + pass + + def setUp(self): + super(CTRexIMIX_Test, self).setUp() # launch super test class setUp process + # CTRexGeneral_Test.setUp(self) # launch super test class setUp process + # self.router.clear_counters() + pass + + def test_routing_imix_64(self): + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + +# self.trex.set_yaml_file('cap2/imix_64.yaml') + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + +# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 30, l = 1000, p = True) + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 30, + f = 'cap2/imix_64.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + + self.check_CPU_benchmark(trex_res, 10.0) + + # the name intentionally not matches nose default pattern, including the test should be specified explicitly + def dummy(self): + self.assertEqual(1, 2, 'boo') + self.assertEqual(2, 2, 'boo') + self.assertEqual(2, 3, 'boo') + #print '' + #print dir(self) + #print locals() + #print '' + #print_r(unittest.TestCase) + #print '' + #print_r(self) + print '' + #print unittest.TestCase.shortDescription(self) + #self.skip("I'm just a dummy test") + + + def test_routing_imix (self): + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + +# self.trex.set_yaml_file('cap2/imix_fast_1g.yaml') + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 60, + f = 'cap2/imix_fast_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + + self.check_CPU_benchmark(trex_res, 10.0) + + + def test_static_routing_imix (self): + if self.is_loopback: # in loopback mode this test acts same as test_routing_imix, disable to avoid duplication + self.skip() + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + + # Configure static routing based on benchmark data input + stat_route_dict = self.get_benchmark_param('stat_route_dict') + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.router.config_static_routing(stat_route_obj, mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 60, + f = 'cap2/imix_fast_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + print ("\nLATEST DUMP:") + print trex_res.get_latest_dump() + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + + + def test_static_routing_imix_asymmetric (self): + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + + # Configure static routing based on benchmark data input + stat_route_dict = self.get_benchmark_param('stat_route_dict') + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.router.config_static_routing(stat_route_obj, mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + nc = True, + d = 100, + f = 'cap2/imix_fast_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResults instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + + self.check_CPU_benchmark(trex_res, 10) + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + # remove nbar config here + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/unit_tests/trex_ipv6_test.py b/scripts/automation/regression/unit_tests/trex_ipv6_test.py new file mode 100755 index 00000000..bffb4754 --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_ipv6_test.py @@ -0,0 +1,102 @@ +#!/router/bin/python +from trex_general_test import CTRexGeneral_Test +from tests_exceptions import * +import time +from nose.tools import assert_equal + +class CTRexIPv6_Test(CTRexGeneral_Test): + """This class defines the IPv6 testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + super(CTRexIPv6_Test, self).__init__(*args, **kwargs) + pass + + def setUp(self): + super(CTRexIPv6_Test, self).setUp() # launch super test class setUp process +# print " before sleep setup !!" +# time.sleep(100000); +# pass + + def test_ipv6_simple(self): + if self.is_virt_nics: + self.skip('--ipv6 flag does not work correctly in with virtual NICs') # TODO: fix + # test initializtion + if not self.is_loopback: + self.router.configure_basic_interfaces() + + self.router.config_pbr(mode = "config") + self.router.config_ipv6_pbr(mode = "config") + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + ipv6 = True, + d = 60, + f = 'avl/sfr_delay_10_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + + self.check_CPU_benchmark (trex_res, 10.0) + + assert True + + + def test_ipv6_negative (self): + if self.is_loopback: + self.skip('The test checks ipv6 drops by device and we are in loopback setup') + # test initializtion + self.router.configure_basic_interfaces() + + # NOT CONFIGURING IPv6 INTENTIONALLY TO GET DROPS! + self.router.config_pbr(mode = "config") + + # same params as test_ipv6_simple + mult = self.get_benchmark_param('multiplier', test_name = 'test_ipv6_simple') + core = self.get_benchmark_param('cores', test_name = 'test_ipv6_simple') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + ipv6 = True, + d = 60, + f = 'avl/sfr_delay_10_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + trex_tx_pckt = float(trex_res.get_last_value("trex-global.data.m_total_tx_pkts")) + trex_drops = int(trex_res.get_total_drops()) + + trex_drop_rate = trex_res.get_drop_rate() + + # make sure that at least 50% of the total transmitted packets failed + self.assert_gt((trex_drops/trex_tx_pckt), 0.5, 'packet drop ratio is not high enough') + + + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + # remove config here + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/unit_tests/trex_nat_test.py b/scripts/automation/regression/unit_tests/trex_nat_test.py new file mode 100755 index 00000000..452f7ecf --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_nat_test.py @@ -0,0 +1,164 @@ +#!/router/bin/python +from trex_general_test import CTRexGeneral_Test +from tests_exceptions import * +import time +from CPlatform import CStaticRouteConfig, CNatConfig +from nose.tools import assert_equal + + +class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase): + """This class defines the NAT testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + super(CTRexNoNat_Test, self).__init__(*args, **kwargs) + self.unsupported_modes = ['loopback'] # NAT requires device + pass + + def setUp(self): + super(CTRexNoNat_Test, self).setUp() # launch super test class setUp process + pass + + def check_nat_stats (self, nat_stats): + pass + + + def test_nat_learning(self): + # test initializtion + self.router.configure_basic_interfaces() + + stat_route_dict = self.get_benchmark_param('stat_route_dict') + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.router.config_static_routing(stat_route_obj, mode = "config") + + self.router.config_nat_verify() # shutdown duplicate interfaces + +# self.trex.set_yaml_file('cap2/http_simple.yaml') + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + +# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 100, l = 1000, learn_verify = True) + ret = self.trex.start_trex( + c = core, + m = mult, + learn_verify = True, + d = 100, + f = 'cap2/http_simple.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + print ("\nLATEST DUMP:") + print trex_res.get_latest_dump() + + + expected_nat_opened = self.get_benchmark_param('nat_opened') + learning_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data + + if self.get_benchmark_param('allow_timeout_dev'): + nat_timeout_ratio = learning_stats['m_total_nat_time_out']/learning_stats['m_total_nat_open'] + if nat_timeout_ratio > 0.005: + self.fail('TRex nat_timeout ratio %f > 0.005 (0.5%) and not as expected to be less than 0.5%' %(nat_timeout_ratio)) + else: + self.check_results_eq (learning_stats, 'm_total_nat_time_out', 0.0) + self.check_results_eq (learning_stats, 'm_total_nat_no_fid', 0.0) + self.check_results_gt (learning_stats, 'm_total_nat_learn_error', 0.0) +# + self.check_results_gt (learning_stats, 'm_total_nat_open', expected_nat_opened) + + self.check_general_scenario_results(trex_res) + + # self.check_CPU_benchmark(trex_res, 10) + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + pass + + +class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase): + """This class defines the NAT testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + super(CTRexNat_Test, self).__init__(*args, **kwargs) + self.unsupported_modes = ['loopback'] # NAT requires device + pass + + def setUp(self): + super(CTRexNat_Test, self).setUp() # launch super test class setUp process + # config nat here + + + def check_nat_stats (self, nat_stats): + pass + + + def test_nat_simple(self): + # test initializtion + self.router.configure_basic_interfaces() + + + stat_route_dict = self.get_benchmark_param('stat_route_dict') + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.router.config_static_routing(stat_route_obj, mode = "config") + + nat_dict = self.get_benchmark_param('nat_dict') + nat_obj = CNatConfig(nat_dict) + self.router.config_nat(nat_obj) + +# self.trex.set_yaml_file('cap2/http_simple.yaml') + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + +# trex_res = self.trex.run(nc=False,multiplier = mult, cores = core, duration = 100, l = 1000, learn = True) + ret = self.trex.start_trex( + c = core, + m = mult, + learn = True, + d = 100, + f = 'cap2/http_simple.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + print ("\nLATEST DUMP:") + print trex_res.get_latest_dump() + + trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data + if self.get_benchmark_param('allow_timeout_dev'): + nat_timeout_ratio = trex_nat_stats['m_total_nat_time_out']/trex_nat_stats['m_total_nat_open'] + if nat_timeout_ratio > 0.005: + self.fail('TRex nat_timeout ratio %f > 0.005 (0.5%) and not as expected to be less than 0.5%' %(nat_timeout_ratio)) + else: + self.check_results_eq (trex_nat_stats,'m_total_nat_time_out', 0.0) + self.check_results_eq (trex_nat_stats,'m_total_nat_no_fid', 0.0) + self.check_results_gt (trex_nat_stats,'m_total_nat_open', 6000) + + + self.check_general_scenario_results(trex_res) +## test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization'])) +# trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_bps") +# cpu_util = int(trex_res.get_last_value("trex-global.data.m_cpu_util")) +# test_norm_cpu = 2*(trex_tx_pckt/(core*cpu_util)) +# print "test_norm_cpu is: ", test_norm_cpu + + # self.check_CPU_benchmark(trex_res, 10) + + #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > 0.03): + # raiseraise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds 3%') + + nat_stats = self.router.get_nat_stats() + print nat_stats + + self.assert_gt(nat_stats['total_active_trans'], 5000, 'total active translations is not high enough') + self.assert_gt(nat_stats['dynamic_active_trans'], 5000, 'total dynamic active translations is not high enough') + self.assertEqual(nat_stats['static_active_trans'], 0, "NAT statistics nat_stats['static_active_trans'] should be zero") + self.assert_gt(nat_stats['num_of_hits'], 50000, 'total nat hits is not high enough') + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + self.router.clear_nat_translations() + + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/unit_tests/trex_nbar_test.py b/scripts/automation/regression/unit_tests/trex_nbar_test.py new file mode 100755 index 00000000..e4f7eb37 --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_nbar_test.py @@ -0,0 +1,193 @@ +#!/router/bin/python +from trex_general_test import CTRexGeneral_Test +from tests_exceptions import * +from interfaces_e import IFType +from nose.tools import nottest +from misc_methods import print_r + +class CTRexNbar_Test(CTRexGeneral_Test): + """This class defines the NBAR testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + super(CTRexNbar_Test, self).__init__(*args, **kwargs) + self.unsupported_modes = ['loopback'] # obviously no NBar in loopback + pass + + def setUp(self): + super(CTRexNbar_Test, self).setUp() # launch super test class setUp process +# self.router.kill_nbar_flows() + self.router.clear_cft_counters() + self.router.clear_nbar_stats() + + def match_classification (self): + nbar_benchmark = self.get_benchmark_param("nbar_classification") + test_classification = self.router.get_nbar_stats() + print "TEST CLASSIFICATION:" + print test_classification + missmatchFlag = False + missmatchMsg = "NBAR classification contians a missmatch on the following protocols:" + fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%' + noise_level = 0.045 # percents + + for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client): + client_intf = cl_intf.get_name() + + # removing noise classifications + for key, value in test_classification[client_intf]['percentage'].items(): + if value <= noise_level: + print 'Removing noise classification: %s' % key + del test_classification[client_intf]['percentage'][key] + + if len(test_classification[client_intf]['percentage']) != (len(nbar_benchmark) + 1): # adding 'total' key to nbar_benchmark + raise ClassificationMissmatchError ('The total size of classification result does not match the provided benchmark.') + + for protocol, bench in nbar_benchmark.iteritems(): + if protocol != 'total': + try: + bench = float(bench) + protocol = protocol.replace('_','-') + protocol_test_res = test_classification[client_intf]['percentage'][protocol] + deviation = 100 * abs(bench/protocol_test_res - 1) # percents + difference = abs(bench - protocol_test_res) + if (deviation > 10 and difference > noise_level): # allowing 10% deviation and 'noise_level'% difference + missmatchFlag = True + missmatchMsg += fmt.format(protocol, bench, protocol_test_res) + except KeyError as e: + missmatchFlag = True + print e + print "Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf ) + missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf ) + except ZeroDivisionError as e: + print "ZeroDivisionError: %s" % protocol + pass + if missmatchFlag: + self.fail(missmatchMsg) + + + def test_nbar_simple(self): + # test initializtion + deviation_compare_value = 0.03 # default value of deviation - 3% + self.router.configure_basic_interfaces() + + self.router.config_pbr(mode = "config") + self.router.config_nbar_pd() + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 100, + f = 'avl/sfr_delay_10_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + print ("\nLATEST DUMP:") + print trex_res.get_latest_dump() + + + self.check_general_scenario_results(trex_res, check_latency = False) + # test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization'])) + trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts") + cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util") + cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util") + print "cpu util is:", cpu_util + print cpu_util_hist + test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util) + print "test_norm_cpu is:", test_norm_cpu + + + if self.get_benchmark_param('cpu2core_custom_dev'): + # check this test by custom deviation + deviation_compare_value = self.get_benchmark_param('cpu2core_dev') + print "Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) ) + + # need to be fixed ! + #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value): + # raise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds benchmark boundaries') + + self.match_classification() + + assert True + + @nottest + def test_rx_check (self): + # test initializtion + self.router.configure_basic_interfaces() + + self.router.config_pbr(mode = "config") + self.router.config_nbar_pd() + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + rx_check = sample_rate, + d = 100, + f = 'cap2/sfr.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + print ("\nLATEST DUMP:") + print trex_res.get_latest_dump() + + self.check_general_scenario_results(trex_res) + + self.check_CPU_benchmark(trex_res, 10) + +# if trex_res.result['rx_check_tx']==trex_res.result['rx_check_rx']: # rx_check verification shoud pass +# assert trex_res.result['rx_check_verification'] == "OK" +# else: +# assert trex_res.result['rx_check_verification'] == "FAIL" + + # the name intentionally not matches nose default pattern, including the test should be specified explicitly + def NBarLong(self): + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + self.router.config_nbar_pd() + + mult = self.get_benchmark_param('multiplier') + core = self.get_benchmark_param('cores') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + d = 18000, # 5 hours + f = 'avl/sfr_delay_10_1g.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + # trex_res is a CTRexResult instance- and contains the summary of the test results + # you may see all the results keys by simply calling here for 'print trex_res.result' + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res, check_latency = False) + + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/unit_tests/trex_rx_test.py b/scripts/automation/regression/unit_tests/trex_rx_test.py new file mode 100755 index 00000000..a37615c4 --- /dev/null +++ b/scripts/automation/regression/unit_tests/trex_rx_test.py @@ -0,0 +1,285 @@ +#!/router/bin/python +from trex_general_test import CTRexGeneral_Test +from CPlatform import CStaticRouteConfig, CNatConfig +from tests_exceptions import * +#import sys +import time +import copy +from nose.tools import nottest +import traceback + +class CTRexRx_Test(CTRexGeneral_Test): + """This class defines the rx testcase of the T-Rex traffic generator""" + def __init__(self, *args, **kwargs): + CTRexGeneral_Test.__init__(self, *args, **kwargs) + self.unsupported_modes = ['virt_nics'] # TODO: fix + pass + + def setUp(self): + CTRexGeneral_Test.setUp(self) + pass + + + def check_rx_errors(self, trex_res): + try: + # counters to check + + latency_counters_display = {'m_unsup_prot': 0, 'm_no_magic': 0, 'm_no_id': 0, 'm_seq_error': 0, 'm_length_error': 0, 'm_no_ipv4_option': 0, 'm_tx_pkt_err': 0} + rx_counters = {'m_err_drop': 0, 'm_err_aged': 0, 'm_err_no_magic': 0, 'm_err_wrong_pkt_id': 0, 'm_err_fif_seen_twice': 0, 'm_err_open_with_no_fif_pkt': 0, 'm_err_oo_dup': 0, 'm_err_oo_early': 0, 'm_err_oo_late': 0, 'm_err_flow_length_changed': 0} + + # get relevant TRex results + + try: + ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+') + if not ports_names: + raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*') + for port_name in ports_names: + path = 'trex-latecny-v2.data.%s.stats' % port_name + port_result = trex_res.get_last_value(path) + if not port_result: + raise AbnormalResultError('Could not find port stats in TRex results, path: %s' % path) + for key in latency_counters_display: + latency_counters_display[key] += port_result[key] + + # using -k flag in TRex produces 1 error per port in latency counter m_seq_error, allow it until issue resolved. For comparing use dict with reduces m_seq_error number. + latency_counters_compare = copy.deepcopy(latency_counters_display) + latency_counters_compare['m_seq_error'] = max(0, latency_counters_compare['m_seq_error'] - len(ports_names)) + + path = 'rx-check.data.stats' + rx_check_results = trex_res.get_last_value(path) + if not rx_check_results: + raise AbnormalResultError('No TRex results by path: %s' % path) + for key in rx_counters: + rx_counters[key] = rx_check_results[key] + + path = 'rx-check.data.stats.m_total_rx' + total_rx = trex_res.get_last_value(path) + if not total_rx: + raise AbnormalResultError('No TRex results by path: %s' % path) + + + print 'Total packets checked: %s' % total_rx + print 'Latency counters: %s' % latency_counters_display + print 'rx_check counters: %s' % rx_counters + + except KeyError as e: + self.fail('Expected key in TRex result was not found.\n%s' % traceback.print_exc()) + + # the check. in loopback expect 0 problems, at others allow errors <error_tolerance>% of total_rx + + total_errors = sum(rx_counters.values()) + sum(latency_counters_compare.values()) + error_tolerance = self.get_benchmark_param('error_tolerance') + if not error_tolerance: + error_tolerance = 0 + error_percentage = float(total_errors) * 100 / total_rx + + if total_errors > 0: + if self.is_loopback or error_percentage > error_tolerance: + self.fail('Too much errors in rx_check. (~%s%% of traffic)' % error_percentage) + else: + print 'There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance) + else: + print 'No errors in rx_check.' + except Exception as e: + print traceback.print_exc() + self.fail('Errors in rx_check: %s' % e) + + def test_rx_check_sfr(self): + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = 'config') + + core = self.get_benchmark_param('cores') + mult = self.get_benchmark_param('multiplier') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + rx_check = sample_rate, + d = 100, + f = 'avl/sfr_delay_10_1g_no_bundeling.yaml', + l = 1000, + k = 10, + learn_verify = True, + l_pkt_mode = 2) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + #print ("\nLATEST DUMP:") + #print trex_res.get_latest_dump() + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + + + def test_rx_check_http(self): + if not self.is_loopback: + # TODO: skip as test_rx_check_http_negative will cover it + #self.skip('This test is covered by test_rx_check_http_negative') + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + core = self.get_benchmark_param('cores') + mult = self.get_benchmark_param('multiplier') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + rx_check = sample_rate, + d = 100, + f = 'cap2/http_simple.yaml', + l = 1000, + k = 10, + learn_verify = True, + l_pkt_mode = 2) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + + + def test_rx_check_sfr_ipv6(self): + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = 'config') + self.router.config_ipv6_pbr(mode = "config") + + core = self.get_benchmark_param('cores') + mult = self.get_benchmark_param('multiplier') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + rx_check = sample_rate, + d = 100, + f = 'avl/sfr_delay_10_1g_no_bundeling.yaml', + l = 1000, + k = 10, + ipv6 = True) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + #print ("\nLATEST DUMP:") + #print trex_res.get_latest_dump() + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + + + def test_rx_check_http_ipv6(self): + if not self.is_loopback: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + self.router.config_ipv6_pbr(mode = "config") + + core = self.get_benchmark_param('cores') + mult = self.get_benchmark_param('multiplier') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + p = True, + nc = True, + rx_check = sample_rate, + d = 100, + f = 'cap2/http_simple.yaml', + l = 1000, + k = 10, + ipv6 = True) + + trex_res = self.trex.sample_to_run_finish() + + print ("\nLATEST RESULT OBJECT:") + print trex_res + + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + + @nottest + def test_rx_check_http_negative(self): + if self.is_loopback: + self.skip('This test uses NAT, not relevant for loopback') + + self.router.configure_basic_interfaces() + stat_route_dict = self.get_benchmark_param('stat_route_dict') + stat_route_obj = CStaticRouteConfig(stat_route_dict) + self.router.config_static_routing(stat_route_obj, mode = "config") + + core = self.get_benchmark_param('cores') + mult = self.get_benchmark_param('multiplier') + sample_rate = self.get_benchmark_param('rx_sample_rate') + + ret = self.trex.start_trex( + c = core, + m = mult, + #p = True, + #nc = True, + rx_check = sample_rate, + d = 80, + f = 'cap2/http_simple.yaml', + l = 1000, + k = 10, + learn_verify = True, + l_pkt_mode = 2) + + print 'Run for 2 minutes, expect no errors' + trex_res = self.trex.sample_x_seconds(60) + print ("\nLATEST RESULT OBJECT:") + print trex_res + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + + try: + # TODO: add nat/zbf config for router + nat_dict = self.get_benchmark_param('nat_dict') + nat_obj = CNatConfig(nat_dict) + self.router.config_nat(nat_obj) + self.router.config_nat_verify() + self.router.config_zbf() + + print 'Run until finish, expect errors' + trex_res = self.trex.sample_to_run_finish() + + self.router.config_no_zbf() + self.router.clear_nat_translations() + print ("\nLATEST RESULT OBJECT:") + print trex_res + nat_stats = self.router.get_nat_stats() + print nat_stats + self.check_general_scenario_results(trex_res) + self.check_CPU_benchmark(trex_res, 10) + self.check_rx_errors(trex_res) + self.fail('Expected errors here, got none.') + except Exception as e: + print 'Got errors as expected: %s' % e + pass + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/trex_control_plane/client/trex_hltapi.py b/scripts/automation/trex_control_plane/client/trex_hltapi.py index 92768ca4..848d5a9e 100755 --- a/scripts/automation/trex_control_plane/client/trex_hltapi.py +++ b/scripts/automation/trex_control_plane/client/trex_hltapi.py @@ -17,14 +17,16 @@ class CTRexHltApi(object): self._port_data = {} # ----- session functions ----- # - - def connect(self, device, port_list, username, port=5050, reset=False, break_locks=False): + # sync = RPC, async = ZMQ + def connect(self, device, port_list, username, sync_port = 4501, async_port = 4500, reset=False, break_locks=False): ret_dict = {"status": 0} - self.trex_client = CTRexStatelessClient(username, device, port) - res_ok, msg = self.trex_client.connect() - if not res_ok: + self.trex_client = CTRexStatelessClient(username, device, sync_port, async_port) + + rc = self.trex_client.connect() + if rc.bad(): + self.trex_client = None - ret_dict.update({"log": msg}) + ret_dict.update({"log": rc.err()}) return ret_dict # arrived here, connection successfully created with server # next, try acquiring ports of TRex @@ -70,7 +72,6 @@ class CTRexHltApi(object): port_list = self.parse_port_list(port_list) response = self.trex_client.release(port_list) res_ok, log = CTRexHltApi.process_response(port_list, response) - print log if not res_ok: ret_dict.update({"log": log}) return ret_dict @@ -89,11 +90,13 @@ class CTRexHltApi(object): return {"status": 1, "log": None} # ----- traffic functions ----- # - def traffic_config(self, mode, port_handle, + def traffic_config(self, mode, port_list, l2_encap="ethernet_ii", mac_src="00:00:01:00:00:01", mac_dst="00:00:00:00:00:00", l3_protocol="ipv4", ip_src_addr="0.0.0.0", ip_dst_addr="192.0.0.1", l3_length=110, transmit_mode="continuous", rate_pps=100, **kwargs): + if type(port_list) is not list(): + port_list = [port_list] ALLOWED_MODES = ["create", "modify", "remove", "enable", "disable", "reset"] if mode not in ALLOWED_MODES: raise ValueError("mode must be one of the following values: {modes}".format(modes=ALLOWED_MODES)) @@ -119,45 +122,55 @@ class CTRexHltApi(object): except Exception as e: # some exception happened during the stream creation return {"status": 0, "log": str(e)} - # try adding the stream, until free stream_id is found - port_data = self._port_data.get(port_handle) - id_candidate = None - # TODO: change this to better implementation - while True: - id_candidate = port_data["stream_id_gen"].next() - response = self.trex_client.add_stream(stream_id=id_candidate, - stream_obj=stream_obj, - port_id=port_handle) - res_ok, log = CTRexHltApi.process_response(port_handle, response) - if res_ok: - # found non-taken stream_id on server - # save it for modifying needs - port_data["streams"].update({id_candidate: stream_obj}) - break - else: - # proceed to another iteration to use another id - continue - return {"status": 1, - "stream_id": id_candidate, - "log": None} + # try adding the stream per port, until free stream_id is found + for port_id in port_list: + port_data = self._port_data.get(port_id) + id_candidate = None + # TODO: change this to better implementation + while True: + id_candidate = port_data["stream_id_gen"].next() + response = self.trex_client.add_stream(stream_id=id_candidate, + stream_obj=stream_obj.dump(), + port_id_list=port_id) + res_ok, log = CTRexHltApi.process_response(port_id, response) + if res_ok: + # found non-taken stream_id on server + # save it for modifying needs + port_data["streams"].update({id_candidate: stream_obj}) + break + else: + print log + # proceed to another iteration to use another id + print 'need another iteration?' + continue + return {"status": 1, + "stream_id": id_candidate, + "log": None} + else: raise NotImplementedError("mode '{0}' is not supported yet on TRex".format(mode)) - def traffic_control(self, action, port_handle): + def traffic_control(self, action, port_handle, **kwargs): ALLOWED_ACTIONS = ["clear_stats", "run", "stop", "sync_run"] if action not in ALLOWED_ACTIONS: raise ValueError("action must be one of the following values: {actions}".format(actions=ALLOWED_ACTIONS)) # ret_dict = {"status": 0, "stopped": 1} port_list = self.parse_port_list(port_handle) + if type(port_list) is not list(): + port_list = [port_list] if action == "run": - response = self.trex_client.start_traffic(port_id=port_list) + if not set(kwargs.keys()) >= set(['mul', 'duration']): + raise ValueError("For 'run' action should be specified mul and duration arguments") + response = self.trex_client.start_traffic(kwargs['mul'], kwargs['duration'], port_id_list=port_list) res_ok, log = CTRexHltApi.process_response(port_list, response) if res_ok: return {"status": 1, "stopped": 0, "log": None} + else: + print log elif action == "stop": - response = self.trex_client.stop_traffic(port_id=port_list) + response = self.trex_client.stop_traffic(port_id_list=port_list) res_ok, log = CTRexHltApi.process_response(port_list, response) if res_ok: return {"status": 1, @@ -236,13 +249,10 @@ class CTRexHltApi(object): @staticmethod def process_response(port_list, response): + log = response.data() if response.good() else response.err() if isinstance(port_list, list): - res_ok, response = response - log = CTRexHltApi.join_batch_response(response) - else: - res_ok = response.success - log = str(response) - return res_ok, log + log = CTRexHltApi.join_batch_response(log) + return response.good(), log @staticmethod def parse_port_list(port_list): @@ -257,8 +267,9 @@ class CTRexHltApi(object): @staticmethod def join_batch_response(responses): - return "\n".join([str(response) - for response in responses]) + if type(responses) is list(): + return "\n". join([str(response) for response in responses]) + return responses @staticmethod def generate_stream(l2_encap, mac_src, mac_dst, diff --git a/scripts/automation/trex_control_plane/client/trex_stateless_sim.py b/scripts/automation/trex_control_plane/client/trex_stateless_sim.py index 7655b27c..d38411a3 100644 --- a/scripts/automation/trex_control_plane/client/trex_stateless_sim.py +++ b/scripts/automation/trex_control_plane/client/trex_stateless_sim.py @@ -26,6 +26,8 @@ except ImportError: from client_utils.jsonrpc_client import JsonRpcClient, BatchMessage from client_utils.packet_builder import CTRexPktBuilder +from client_utils import parsing_opts + import json from common.trex_streams import * @@ -34,32 +36,57 @@ import argparse import tempfile import subprocess import os +from dpkt import pcap +from operator import itemgetter + +class BpSimException(Exception): + pass + +def merge_cap_files (pcap_file_list, out_filename, delete_src = False): + + out_pkts = [] + + # read all packets to a list + for src in pcap_file_list: + f = open(src, 'r') + reader = pcap.Reader(f) + pkts = reader.readpkts() + out_pkts += pkts + f.close() + if delete_src: + os.unlink(src) + + # sort by the timestamp + out_pkts = sorted(out_pkts, key=itemgetter(0)) + + + out = open(out_filename, 'w') + out_writer = pcap.Writer(out) + + for ts, pkt in out_pkts: + out_writer.writepkt(pkt, ts) + + out.close() + class SimRun(object): - def __init__ (self, yaml_file, dp_core_count, core_index, packet_limit, output_filename, is_valgrind, is_gdb): + def __init__ (self, options): - self.yaml_file = yaml_file - self.output_filename = output_filename - self.dp_core_count = dp_core_count - self.core_index = core_index - self.packet_limit = packet_limit - self.is_valgrind = is_valgrind - self.is_gdb = is_gdb + self.options = options # dummies self.handler = 0 self.port_id = 0 - self.mul = {"op": "abs", - "type": "raw", - "value": 1} + + self.mul = options.mult self.duration = -1 def load_yaml_file (self): streams_db = CStreamsDB() - stream_list = streams_db.load_yaml_file(self.yaml_file) + stream_list = streams_db.load_yaml_file(self.options.input_file) streams_json = [] for stream in stream_list.compiled: @@ -99,18 +126,73 @@ class SimRun(object): f.close() try: - cmd = ['bp-sim-64-debug', '--sl', '--cores', str(self.dp_core_count), '--core_index', str(self.core_index), '-f', f.name, '-o', self.output_filename] - if self.is_valgrind: - cmd = ['valgrind', '--leak-check=full'] + cmd - elif self.is_gdb: - cmd = ['gdb', '--args'] + cmd - - subprocess.call(cmd) - + if self.options.json: + with open(f.name) as file: + data = "\n".join(file.readlines()) + print json.dumps(json.loads(data), indent = 4, separators=(',', ': '), sort_keys = True) + else: + self.execute_bp_sim(f.name) finally: os.unlink(f.name) + + def execute_bp_sim (self, json_filename): + exe = 'bp-sim-64' if self.options.release else 'bp-sim-64-debug' + if not os.path.exists(exe): + print "cannot find executable '{0}'".format(exe) + exit(-1) + + cmd = [exe, + '--pcap', + '--sl', + '--cores', + str(self.options.cores), + '--limit', + str(self.options.limit), + '-f', + json_filename, + '-o', + self.options.output_file] + + if self.options.dry: + cmd += ['--dry'] + + if self.options.core_index != None: + cmd += ['--core_index', str(self.options.core_index)] + + if self.options.valgrind: + cmd = ['valgrind', '--leak-check=full', '--error-exitcode=1'] + cmd + + elif self.options.gdb: + cmd = ['gdb', '--args'] + cmd + + print "executing command: '{0}'".format(" ".join(cmd)) + rc = subprocess.call(cmd) + if rc != 0: + raise BpSimException() + + self.merge_results() + + + def merge_results (self): + if self.options.dry: + return + + if self.options.cores == 1: + return + + if self.options.core_index != None: + return + + + print "Mering cores output to a single pcap file...\n" + inputs = ["{0}-{1}".format(self.options.output_file, index) for index in xrange(0, self.options.cores)] + merge_cap_files(inputs, self.options.output_file, delete_src = True) + + + + def is_valid_file(filename): if not os.path.isfile(filename): raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename) @@ -120,8 +202,8 @@ def is_valid_file(filename): def unsigned_int (x): x = int(x) - if x <= 0: - raise argparse.ArgumentTypeError("argument must be >= 1") + if x < 0: + raise argparse.ArgumentTypeError("argument must be >= 0") return x @@ -143,20 +225,30 @@ def setParserOptions(): choices = xrange(1, 9)) parser.add_argument("-n", "--core_index", - help = "DP core index to examine [default is 0]", - default = 0, + help = "Record only a specific core", + default = None, type = int) - parser.add_argument("-j", "--join", - help = "run and join output from 0..core_count [default is False]", - default = False, - type = bool) + parser.add_argument("-r", "--release", + help = "runs on release image instead of debug [default is False]", + action = "store_true", + default = False) + + parser.add_argument("-s", "--dry", + help = "dry run only (nothing will be written to the file) [default is False]", + action = "store_true", + default = False) parser.add_argument("-l", "--limit", help = "limit test total packet count [default is 5000]", default = 5000, type = unsigned_int) + parser.add_argument('-m', '--multiplier', + help = parsing_opts.match_multiplier_help, + dest = 'mult', + default = {'type':'raw', 'value':1, 'op': 'abs'}, + type = parsing_opts.match_multiplier_strict) group = parser.add_mutually_exclusive_group() @@ -170,13 +262,23 @@ def setParserOptions(): action = "store_true", default = False) + group.add_argument("--json", + help = "generate JSON output only to stdout [default is False]", + action = "store_true", + default = False) + return parser def validate_args (parser, options): - if options.core_index < 0 or options.core_index >= options.cores: - parser.error("DP core index valid range is 0 to {0}".format(options.cores - 1)) + if options.core_index: + if not options.core_index in xrange(0, options.cores): + parser.error("DP core index valid range is 0 to {0}".format(options.cores - 1)) + + # zero is ok - no limit, but other values must be at least as the number of cores + if (options.limit != 0) and options.limit < options.cores: + parser.error("limit cannot be lower than number of DP cores") def main (): @@ -185,16 +287,19 @@ def main (): validate_args(parser, options) - r = SimRun(options.input_file, - options.cores, - options.core_index, - options.limit, - options.output_file, - options.valgrind, - options.gdb) + r = SimRun(options) + + try: + r.run() + except KeyboardInterrupt as e: + print "\n\n*** Caught Ctrl + C... Exiting...\n\n" + exit(1) - r.run() + except BpSimException as e: + print "\n\n*** BP sim exit code was non zero\n\n" + exit(1) + exit(0) if __name__ == '__main__': main() diff --git a/scripts/automation/trex_control_plane/client_utils/external_packages.py b/scripts/automation/trex_control_plane/client_utils/external_packages.py index 3982a1b2..9d8c4dcf 100755 --- a/scripts/automation/trex_control_plane/client_utils/external_packages.py +++ b/scripts/automation/trex_control_plane/client_utils/external_packages.py @@ -8,7 +8,7 @@ ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs')) CLIENT_UTILS_MODULES = ['dpkt-1.8.6', - 'PyYAML-3.01/lib', + 'yaml-3.11', 'texttable-0.8.4' ] diff --git a/scripts/automation/trex_control_plane/client_utils/packet_builder.py b/scripts/automation/trex_control_plane/client_utils/packet_builder.py index 19ac7f32..e7fdb5d9 100755 --- a/scripts/automation/trex_control_plane/client_utils/packet_builder.py +++ b/scripts/automation/trex_control_plane/client_utils/packet_builder.py @@ -730,7 +730,7 @@ class CTRexPktBuilder(object): None """ super(CTRexPktBuilder.CTRexVM, self).__init__() - self.vm_variables = {} + self.vm_variables = {'instructions': [], 'split_by_var': ""} self._inst_by_offset = {} # this data structure holds only offset-related instructions, ordered in tuples self._off_inst_by_name = {} @@ -845,6 +845,10 @@ class CTRexPktBuilder(object): list holds variables data of VM """ + + return self.vm_variables + # !!! TODO: review code below !!! + # at first, dump all CTRexVMFlowVariable instructions ret_val = [var.dump() for key, var in self.vm_variables.items()] diff --git a/scripts/automation/trex_control_plane/common/external_packages.py b/scripts/automation/trex_control_plane/common/external_packages.py index 62121d4f..7353c397 100755 --- a/scripts/automation/trex_control_plane/common/external_packages.py +++ b/scripts/automation/trex_control_plane/common/external_packages.py @@ -7,7 +7,7 @@ CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs')) -CLIENT_UTILS_MODULES = ['PyYAML-3.01/lib' +CLIENT_UTILS_MODULES = ['yaml-3.11' ] def import_common_modules(): diff --git a/scripts/automation/trex_control_plane/common/trex_streams.py b/scripts/automation/trex_control_plane/common/trex_streams.py index 44731088..800b6d49 100755 --- a/scripts/automation/trex_control_plane/common/trex_streams.py +++ b/scripts/automation/trex_control_plane/common/trex_streams.py @@ -288,10 +288,10 @@ class CStreamsDB(object): loaded_obj, [StreamPack(v.stream_id, v.stream.dump()) for k, v in compiled_streams.items()])) - except Exception as e: return None + return self.get_stream_pack(stream_pack_name) def load_streams(self, LoadedStreamList_obj): diff --git a/scripts/cap2/imix_9k.yaml b/scripts/cap2/imix_9k.yaml new file mode 100644 index 00000000..0f194e67 --- /dev/null +++ b/scripts/cap2/imix_9k.yaml @@ -0,0 +1,70 @@ +# +# Simple IMIX test 64B +# +- duration : 3 + generator : + distribution : "seq" + clients_start : "16.0.0.1" + clients_end : "16.0.0.255" + servers_start : "48.0.0.1" + servers_end : "48.0.255.255" + clients_per_gb : 201 + min_clients : 101 + dual_port_mask : "1.0.0.0" + tcp_aging : 0 + udp_aging : 0 + mac : [0x0,0x0,0x0,0x1,0x0,0x00] +# +# the templates are duplicated in purpose , to utilized all DRAM BW and get better performance, we should do it automaticly +# but for now it like this , you should have at least 8 +# + cap_info : + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + - name: stl/ipv4_udp_9k.pcap + cps : 1000.0 + ipg : 10000 + rtt : 10000 + w : 1 + limit : 200 + diff --git a/scripts/exp/stl_vm_inc_size_64_128-ex.pcap b/scripts/exp/stl_vm_inc_size_64_128-ex.pcap Binary files differnew file mode 100644 index 00000000..1e44c30f --- /dev/null +++ b/scripts/exp/stl_vm_inc_size_64_128-ex.pcap diff --git a/scripts/exp/stl_vm_rand_size_512B_64_128-ex.pcap b/scripts/exp/stl_vm_rand_size_512B_64_128-ex.pcap Binary files differnew file mode 100644 index 00000000..324ffb1c --- /dev/null +++ b/scripts/exp/stl_vm_rand_size_512B_64_128-ex.pcap diff --git a/scripts/exp/stl_vm_rand_size_64_128-ex.pcap b/scripts/exp/stl_vm_rand_size_64_128-ex.pcap Binary files differnew file mode 100644 index 00000000..b0b01def --- /dev/null +++ b/scripts/exp/stl_vm_rand_size_64_128-ex.pcap diff --git a/scripts/exp/stl_vm_rand_size_64_128.pcap-ex.pcap b/scripts/exp/stl_vm_rand_size_64_128.pcap-ex.pcap Binary files differnew file mode 100644 index 00000000..b0b01def --- /dev/null +++ b/scripts/exp/stl_vm_rand_size_64_128.pcap-ex.pcap diff --git a/scripts/external_libs/PyYAML-3.01/LICENSE b/scripts/external_libs/PyYAML-3.01/LICENSE deleted file mode 100644 index 050ced23..00000000 --- a/scripts/external_libs/PyYAML-3.01/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/scripts/external_libs/PyYAML-3.01/PKG-INFO b/scripts/external_libs/PyYAML-3.01/PKG-INFO deleted file mode 100644 index 6ec73b1f..00000000 --- a/scripts/external_libs/PyYAML-3.01/PKG-INFO +++ /dev/null @@ -1,28 +0,0 @@ -Metadata-Version: 1.0 -Name: PyYAML -Version: 3.01 -Summary: YAML parser and emitter for Python -Home-page: http://pyyaml.org/wiki/PyYAML -Author: Kirill Simonov -Author-email: xi@resolvent.net -License: MIT -Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.01.tar.gz -Description: YAML is a data serialization format designed for human readability and - interaction with scripting languages. PyYAML is a YAML parser and - emitter for Python. - - PyYAML features a complete YAML 1.1 parser, Unicode support, pickle - support, capable extension API, and sensible error messages. PyYAML - supports standard YAML tags and provides Python-specific tags that allow - to represent an arbitrary Python object. - - PyYAML is applicable for a broad range of tasks from complex - configuration files to object serialization and persistance. -Platform: Any -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup diff --git a/scripts/external_libs/PyYAML-3.01/README b/scripts/external_libs/PyYAML-3.01/README deleted file mode 100644 index 8a6dec77..00000000 --- a/scripts/external_libs/PyYAML-3.01/README +++ /dev/null @@ -1,18 +0,0 @@ -PyYAML 3000 - The next generation YAML parser and emitter for Python. - -To install, type 'python setup.py install'. - -For more information, check the PyYAML homepage: -'http://pyyaml.org/wiki/PyYAML'. - -Documentation (rough and incomplete though): -'http://pyyaml.org/wiki/PyYAMLDocumentation'. - -Post your questions and opinions to the YAML-Core mailing list: -'http://lists.sourceforge.net/lists/listinfo/yaml-core'. - -Submit bug reports and feature requests to the PyYAML bug tracker: -'http://pyyaml.org/newticket?component=pyyaml'. - -PyYAML 3000 is written by Kirill Simonov <xi@resolvent.net>. It is released -under the MIT license. See the file LICENSE for more details. diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py deleted file mode 100644 index 2aec0fe3..00000000 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py +++ /dev/null @@ -1,484 +0,0 @@ - -# YAML can be parsed by an LL(1) parser! -# -# We use the following production rules: -# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END? -# implicit_document ::= block_node DOCUMENT-END? -# block_node ::= ALIAS | properties? block_content -# flow_node ::= ALIAS | properties? flow_content -# properties ::= TAG ANCHOR? | ANCHOR TAG? -# block_content ::= block_collection | flow_collection | SCALAR -# flow_content ::= flow_collection | SCALAR -# block_collection ::= block_sequence | block_mapping -# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -# block_mapping ::= BLOCK-MAPPING_START ((KEY block_node_or_indentless_sequence?)? (VALUE block_node_or_indentless_sequence?)?)* BLOCK-END -# block_node_or_indentless_sequence ::= ALIAS | properties? (block_content | indentless_block_sequence) -# indentless_block_sequence ::= (BLOCK-ENTRY block_node?)+ -# flow_collection ::= flow_sequence | flow_mapping -# flow_sequence ::= FLOW-SEQUENCE-START (flow_sequence_entry FLOW-ENTRY)* flow_sequence_entry? FLOW-SEQUENCE-END -# flow_mapping ::= FLOW-MAPPING-START (flow_mapping_entry FLOW-ENTRY)* flow_mapping_entry? FLOW-MAPPING-END -# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -# TODO: support for BOM within a stream. -# stream ::= (BOM? implicit_document)? (BOM? explicit_document)* STREAM-END - -# FIRST sets: -# stream: { STREAM-START } -# explicit_document: { DIRECTIVE DOCUMENT-START } -# implicit_document: FIRST(block_node) -# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } -# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# block_sequence: { BLOCK-SEQUENCE-START } -# block_mapping: { BLOCK-MAPPING-START } -# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } -# indentless_sequence: { ENTRY } -# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } -# flow_sequence: { FLOW-SEQUENCE-START } -# flow_mapping: { FLOW-MAPPING-START } -# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } -# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } - -__all__ = ['Parser', 'ParserError'] - -from error import MarkedYAMLError -from tokens import * -from events import * -from scanner import * - -class ParserError(MarkedYAMLError): - pass - -class Parser: - # Since writing a recursive-descendant parser is a straightforward task, we - # do not give many comments here. - # Note that we use Python generators. If you rewrite the parser in another - # language, you may replace all 'yield'-s with event handler calls. - - DEFAULT_TAGS = { - u'!': u'!', - u'!!': u'tag:yaml.org,2002:', - } - - def __init__(self): - self.current_event = None - self.yaml_version = None - self.tag_handles = {} - self.event_generator = self.parse_stream() - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - try: - self.current_event = self.event_generator.next() - except StopIteration: - pass - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - try: - self.current_event = self.event_generator.next() - except StopIteration: - pass - return self.current_event - - def get_event(self): - # Get the next event. - if self.current_event is None: - try: - self.current_event = self.event_generator.next() - except StopIteration: - pass - value = self.current_event - self.current_event = None - return value - - def __iter__(self): - # Iterator protocol. - return self.event_generator - - def parse_stream(self): - # STREAM-START implicit_document? explicit_document* STREAM-END - - # Parse start of stream. - token = self.get_token() - yield StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Parse implicit document. - if not self.check_token(DirectiveToken, DocumentStartToken, - StreamEndToken): - self.tag_handles = self.DEFAULT_TAGS - token = self.peek_token() - start_mark = end_mark = token.start_mark - yield DocumentStartEvent(start_mark, end_mark, - explicit=False) - for event in self.parse_block_node(): - yield event - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - while self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit = True - yield DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Parse explicit documents. - while not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = token.start_mark - version, tags = self.process_directives() - if not self.check_token(DocumentStartToken): - raise ParserError(None, None, - "expected '<document start>', but found %r" - % self.peek_token().id, - self.peek_token().start_mark) - token = self.get_token() - end_mark = token.end_mark - yield DocumentStartEvent(start_mark, end_mark, - explicit=True, version=version, tags=tags) - if self.check_token(DirectiveToken, - DocumentStartToken, DocumentEndToken, StreamEndToken): - yield self.process_empty_scalar(token.end_mark) - else: - for event in self.parse_block_node(): - yield event - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - while self.check_token(DocumentEndToken): - token = self.get_token() - end_mark = token.end_mark - explicit=True - yield DocumentEndEvent(start_mark, end_mark, - explicit=explicit) - - # Parse end of stream. - token = self.get_token() - yield StreamEndEvent(token.start_mark, token.end_mark) - - def process_directives(self): - # DIRECTIVE* - self.yaml_version = None - self.tag_handles = {} - while self.check_token(DirectiveToken): - token = self.get_token() - if token.name == u'YAML': - if self.yaml_version is not None: - raise ParserError(None, None, - "found duplicate YAML directive", token.start_mark) - major, minor = token.value - if major != 1: - raise ParserError(None, None, - "found incompatible YAML document (version 1.* is required)", - token.start_mark) - self.yaml_version = token.value - elif token.name == u'TAG': - handle, prefix = token.value - if handle in self.tag_handles: - raise ParserError(None, None, - "duplicate tag handle %r" % handle.encode('utf-8'), - token.start_mark) - self.tag_handles[handle] = prefix - if self.tag_handles: - value = self.yaml_version, self.tag_handles.copy() - else: - value = self.yaml_version, None - for key in self.DEFAULT_TAGS: - if key not in self.tag_handles: - self.tag_handles[key] = self.DEFAULT_TAGS[key] - return value - - def parse_block_node(self): - return self.parse_node(block=True) - - def parse_flow_node(self): - return self.parse_node() - - def parse_block_node_or_indentless_sequence(self): - return self.parse_node(block=True, indentless_sequence=True) - - def parse_node(self, block=False, indentless_sequence=False): - # block_node ::= ALIAS | properties? block_content - # flow_node ::= ALIAS | properties? flow_content - # properties ::= TAG ANCHOR? | ANCHOR TAG? - # block_content ::= block_collection | flow_collection | SCALAR - # flow_content ::= flow_collection | SCALAR - # block_collection ::= block_sequence | block_mapping - # block_node_or_indentless_sequence ::= ALIAS | properties? - # (block_content | indentless_block_sequence) - if self.check_token(AliasToken): - token = self.get_token() - yield AliasEvent(token.value, token.start_mark, token.end_mark) - else: - anchor = None - tag = None - start_mark = end_mark = tag_mark = None - if self.check_token(AnchorToken): - token = self.get_token() - start_mark = token.start_mark - end_mark = token.end_mark - anchor = token.value - if self.check_token(TagToken): - token = self.get_token() - tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - elif self.check_token(TagToken): - token = self.get_token() - start_mark = tag_mark = token.start_mark - end_mark = token.end_mark - tag = token.value - if self.check_token(AnchorToken): - token = self.get_token() - end_mark = token.end_mark - anchor = token.value - if tag is not None and tag != u'!': - handle, suffix = tag - if handle is not None: - if handle not in self.tag_handles: - raise ParserError("while parsing a node", start_mark, - "found undefined tag handle %r" % handle.encode('utf-8'), - tag_mark) - tag = self.tag_handles[handle]+suffix - else: - tag = suffix - #if tag == u'!': - # raise ParserError("while parsing a node", start_mark, - # "found non-specific tag '!'", tag_mark, - # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - collection_events = None - implicit = (tag is None or tag == u'!') - if indentless_sequence and self.check_token(BlockEntryToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark) - collection_events = self.parse_indentless_sequence() - else: - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if (token.plain and tag is None) or tag == u'!': - implicit = (True, False) - elif tag is None: - implicit = (False, True) - else: - implicit = (False, False) - event = ScalarEvent(anchor, tag, implicit, token.value, - start_mark, end_mark, style=token.style) - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - collection_events = self.parse_flow_sequence() - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=True) - collection_events = self.parse_flow_mapping() - elif block and self.check_token(BlockSequenceStartToken): - end_mark = self.peek_token().start_mark - event = SequenceStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - collection_events = self.parse_block_sequence() - elif block and self.check_token(BlockMappingStartToken): - end_mark = self.peek_token().start_mark - event = MappingStartEvent(anchor, tag, implicit, - start_mark, end_mark, flow_style=False) - collection_events = self.parse_block_mapping() - elif anchor is not None or tag is not None: - # Empty scalars are allowed even if a tag or an anchor is - # specified. - event = ScalarEvent(anchor, tag, (implicit, False), u'', - start_mark, end_mark) - else: - if block: - node = 'block' - else: - node = 'flow' - token = self.peek_token() - raise ParserError("while scanning a %s node" % node, start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - yield event - if collection_events is not None: - for event in collection_events: - yield event - - def parse_block_sequence(self): - # BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - token = self.get_token() - start_mark = token.start_mark - while self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, BlockEndToken): - for event in self.parse_block_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while scanning a block collection", start_mark, - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - yield SequenceEndEvent(token.start_mark, token.end_mark) - - def parse_indentless_sequence(self): - # (BLOCK-ENTRY block_node?)+ - while self.check_token(BlockEntryToken): - token = self.get_token() - if not self.check_token(BlockEntryToken, - KeyToken, ValueToken, BlockEndToken): - for event in self.parse_block_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - token = self.peek_token() - yield SequenceEndEvent(token.start_mark, token.start_mark) - - def parse_block_mapping(self): - # BLOCK-MAPPING_START - # ((KEY block_node_or_indentless_sequence?)? - # (VALUE block_node_or_indentless_sequence?)?)* - # BLOCK-END - token = self.get_token() - start_mark = token.start_mark - while self.check_token(KeyToken, ValueToken): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - for event in self.parse_block_node_or_indentless_sequence(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(KeyToken, ValueToken, BlockEndToken): - for event in self.parse_block_node_or_indentless_sequence(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - else: - token = self.peek_token() - yield self.process_empty_scalar(token.start_mark) - if not self.check_token(BlockEndToken): - token = self.peek_token() - raise ParserError("while scanning a block mapping", start_mark, - "expected <block end>, but found %r" % token.id, token.start_mark) - token = self.get_token() - yield MappingEndEvent(token.start_mark, token.end_mark) - - def parse_flow_sequence(self): - # flow_sequence ::= FLOW-SEQUENCE-START - # (flow_sequence_entry FLOW-ENTRY)* - # flow_sequence_entry? - # FLOW-SEQUENCE-END - # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - # - # Note that while production rules for both flow_sequence_entry and - # flow_mapping_entry are equal, their interpretations are different. - # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` - # generate an inline mapping (set syntax). - token = self.get_token() - start_mark = token.start_mark - while not self.check_token(FlowSequenceEndToken): - if self.check_token(KeyToken): - token = self.get_token() - yield MappingStartEvent(None, None, True, - token.start_mark, token.end_mark, - flow_style=True) - if not self.check_token(ValueToken, - FlowEntryToken, FlowSequenceEndToken): - for event in self.parse_flow_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - for event in self.parse_flow_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - else: - token = self.peek_token() - yield self.process_empty_scalar(token.start_mark) - token = self.peek_token() - yield MappingEndEvent(token.start_mark, token.start_mark) - else: - for event in self.parse_flow_node(): - yield event - if not self.check_token(FlowEntryToken, FlowSequenceEndToken): - token = self.peek_token() - raise ParserError("while scanning a flow sequence", start_mark, - "expected ',' or ']', but got %r" % token.id, token.start_mark) - if self.check_token(FlowEntryToken): - self.get_token() - token = self.get_token() - yield SequenceEndEvent(token.start_mark, token.end_mark) - - def parse_flow_mapping(self): - # flow_mapping ::= FLOW-MAPPING-START - # (flow_mapping_entry FLOW-ENTRY)* - # flow_mapping_entry? - # FLOW-MAPPING-END - # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - token = self.get_token() - start_mark = token.start_mark - while not self.check_token(FlowMappingEndToken): - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - for event in self.parse_flow_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - for event in self.parse_flow_node(): - yield event - else: - yield self.process_empty_scalar(token.end_mark) - else: - token = self.peek_token() - yield self.process_empty_scalar(token.start_mark) - else: - for event in self.parse_flow_node(): - yield event - yield self.process_empty_scalar(self.peek_token().start_mark) - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - token = self.peek_token() - raise ParserError("while scanning a flow mapping", start_mark, - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(FlowEntryToken): - self.get_token() - if not self.check_token(FlowMappingEndToken): - token = self.peek_token() - raise ParserError("while scanning a flow mapping", start_mark, - "expected '}', but found %r" % token.id, token.start_mark) - token = self.get_token() - yield MappingEndEvent(token.start_mark, token.end_mark) - - def process_empty_scalar(self, mark): - return ScalarEvent(None, None, (True, False), u'', mark, mark) - diff --git a/scripts/external_libs/PyYAML-3.01/setup.py b/scripts/external_libs/PyYAML-3.01/setup.py deleted file mode 100644 index 23c1efac..00000000 --- a/scripts/external_libs/PyYAML-3.01/setup.py +++ /dev/null @@ -1,52 +0,0 @@ - -NAME = 'PyYAML' -VERSION = '3.01' -DESCRIPTION = "YAML parser and emitter for Python" -LONG_DESCRIPTION = """\ -YAML is a data serialization format designed for human readability and -interaction with scripting languages. PyYAML is a YAML parser and -emitter for Python. - -PyYAML features a complete YAML 1.1 parser, Unicode support, pickle -support, capable extension API, and sensible error messages. PyYAML -supports standard YAML tags and provides Python-specific tags that allow -to represent an arbitrary Python object. - -PyYAML is applicable for a broad range of tasks from complex -configuration files to object serialization and persistance.""" -AUTHOR = "Kirill Simonov" -AUTHOR_EMAIL = 'xi@resolvent.net' -LICENSE = "MIT" -PLATFORMS = "Any" -URL = "http://pyyaml.org/wiki/PyYAML" -DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION) -CLASSIFIERS = [ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Topic :: Software Development :: Libraries :: Python Modules", - "Topic :: Text Processing :: Markup", -] - - -from distutils.core import setup - -setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - long_description=LONG_DESCRIPTION, - author=AUTHOR, - author_email=AUTHOR_EMAIL, - license=LICENSE, - platforms=PLATFORMS, - url=URL, - download_url=DOWNLOAD_URL, - classifiers=CLASSIFIERS, - - package_dir={'': 'lib'}, - packages=['yaml'], -) - diff --git a/scripts/external_libs/nose-1.3.4/AUTHORS b/scripts/external_libs/nose-1.3.4/AUTHORS new file mode 100755 index 00000000..5414bcda --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/AUTHORS @@ -0,0 +1,27 @@ +Jason Pellerin +Kumar McMillan +Mika Eloranta +Jay Parlar +Scot Doyle +James Casbon +Antoine Pitrou +John J Lee +Allen Bierbaum +Pam Zerbinos +Augie Fackler +Peter Fein +Kevin Mitchell +Alex Stewart +Timothee Peignier +Thomas Kluyver +Heng Liu +Rosen Diankov +Buck Golemon +Bobby Impollonia +Takafumi Arakaki +Peter Bengtsson +Gary Donovan +Brendan McCollam +Erik Rose +Sascha Peilicke +Andre Caron diff --git a/scripts/external_libs/nose-1.3.4/PKG-INFO b/scripts/external_libs/nose-1.3.4/PKG-INFO new file mode 100755 index 00000000..dea3d585 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/PKG-INFO @@ -0,0 +1,38 @@ +Metadata-Version: 1.1 +Name: nose +Version: 1.3.4 +Summary: nose extends unittest to make testing easier +Home-page: http://readthedocs.org/docs/nose/ +Author: Jason Pellerin +Author-email: jpellerin+nose@gmail.com +License: GNU LGPL +Description: nose extends the test loading and running features of unittest, making + it easier to write, find and run tests. + + By default, nose will run tests in files or directories under the current + working directory whose names include "test" or "Test" at a word boundary + (like "test_this" or "functional_test" or "TestClass" but not + "libtest"). Test output is similar to that of unittest, but also includes + captured stdout output from failing tests, for easy print-style debugging. + + These features, and many more, are customizable through the use of + plugins. Plugins included with nose provide support for doctest, code + coverage and profiling, flexible attribute-based test selection, + output capture and more. More information about writing plugins may be + found on in the nose API documentation, here: + http://readthedocs.org/docs/nose/ + + If you have recently reported a bug marked as fixed, or have a craving for + the very latest, you may want the development version instead: + https://github.com/nose-devs/nose/tarball/master#egg=nose-dev + +Keywords: test unittest doctest automatic discovery +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Testing diff --git a/scripts/external_libs/nose-1.3.4/lgpl.txt b/scripts/external_libs/nose-1.3.4/lgpl.txt new file mode 100755 index 00000000..8add30ad --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/lgpl.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + <one line to give the library's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + <signature of Ty Coon>, 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/scripts/external_libs/nose-1.3.4/nose/__init__.py b/scripts/external_libs/nose-1.3.4/nose/__init__.py new file mode 100755 index 00000000..8ab010bf --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/__init__.py @@ -0,0 +1,15 @@ +from nose.core import collector, main, run, run_exit, runmodule +# backwards compatibility +from nose.exc import SkipTest, DeprecatedTest +from nose.tools import with_setup + +__author__ = 'Jason Pellerin' +__versioninfo__ = (1, 3, 4) +__version__ = '.'.join(map(str, __versioninfo__)) + +__all__ = [ + 'main', 'run', 'run_exit', 'runmodule', 'with_setup', + 'SkipTest', 'DeprecatedTest', 'collector' + ] + + diff --git a/scripts/external_libs/nose-1.3.4/nose/__main__.py b/scripts/external_libs/nose-1.3.4/nose/__main__.py new file mode 100755 index 00000000..b402d9df --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/__main__.py @@ -0,0 +1,8 @@ +import sys + +from nose.core import run_exit + +if sys.argv[0].endswith('__main__.py'): + sys.argv[0] = '%s -m nose' % sys.executable + +run_exit() diff --git a/scripts/external_libs/nose-1.3.4/nose/case.py b/scripts/external_libs/nose-1.3.4/nose/case.py new file mode 100755 index 00000000..cffa4ab4 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/case.py @@ -0,0 +1,397 @@ +"""nose unittest.TestCase subclasses. It is not necessary to subclass these +classes when writing tests; they are used internally by nose.loader.TestLoader +to create test cases from test functions and methods in test classes. +""" +import logging +import sys +import unittest +from inspect import isfunction +from nose.config import Config +from nose.failure import Failure # for backwards compatibility +from nose.util import resolve_name, test_address, try_run + +log = logging.getLogger(__name__) + + +__all__ = ['Test'] + + +class Test(unittest.TestCase): + """The universal test case wrapper. + + When a plugin sees a test, it will always see an instance of this + class. To access the actual test case that will be run, access the + test property of the nose.case.Test instance. + """ + __test__ = False # do not collect + def __init__(self, test, config=None, resultProxy=None): + # sanity check + if not callable(test): + raise TypeError("nose.case.Test called with argument %r that " + "is not callable. A callable is required." + % test) + self.test = test + if config is None: + config = Config() + self.config = config + self.tbinfo = None + self.capturedOutput = None + self.resultProxy = resultProxy + self.plugins = config.plugins + self.passed = None + unittest.TestCase.__init__(self) + + def __call__(self, *arg, **kwarg): + return self.run(*arg, **kwarg) + + def __str__(self): + name = self.plugins.testName(self) + if name is not None: + return name + return str(self.test) + + def __repr__(self): + return "Test(%r)" % self.test + + def afterTest(self, result): + """Called after test is complete (after result.stopTest) + """ + try: + afterTest = result.afterTest + except AttributeError: + pass + else: + afterTest(self.test) + + def beforeTest(self, result): + """Called before test is run (before result.startTest) + """ + try: + beforeTest = result.beforeTest + except AttributeError: + pass + else: + beforeTest(self.test) + + def exc_info(self): + """Extract exception info. + """ + exc, exv, tb = sys.exc_info() + return (exc, exv, tb) + + def id(self): + """Get a short(er) description of the test + """ + return self.test.id() + + def address(self): + """Return a round-trip name for this test, a name that can be + fed back as input to loadTestByName and (assuming the same + plugin configuration) result in the loading of this test. + """ + if hasattr(self.test, 'address'): + return self.test.address() + else: + # not a nose case + return test_address(self.test) + + def _context(self): + try: + return self.test.context + except AttributeError: + pass + try: + return self.test.__class__ + except AttributeError: + pass + try: + return resolve_name(self.test.__module__) + except AttributeError: + pass + return None + context = property(_context, None, None, + """Get the context object of this test (if any).""") + + def run(self, result): + """Modified run for the test wrapper. + + From here we don't call result.startTest or stopTest or + addSuccess. The wrapper calls addError/addFailure only if its + own setup or teardown fails, or running the wrapped test fails + (eg, if the wrapped "test" is not callable). + + Two additional methods are called, beforeTest and + afterTest. These give plugins a chance to modify the wrapped + test before it is called and do cleanup after it is + called. They are called unconditionally. + """ + if self.resultProxy: + result = self.resultProxy(result, self) + try: + try: + self.beforeTest(result) + self.runTest(result) + except KeyboardInterrupt: + raise + except: + err = sys.exc_info() + result.addError(self, err) + finally: + self.afterTest(result) + + def runTest(self, result): + """Run the test. Plugins may alter the test by returning a + value from prepareTestCase. The value must be callable and + must accept one argument, the result instance. + """ + test = self.test + plug_test = self.config.plugins.prepareTestCase(self) + if plug_test is not None: + test = plug_test + test(result) + + def shortDescription(self): + desc = self.plugins.describeTest(self) + if desc is not None: + return desc + # work around bug in unittest.TestCase.shortDescription + # with multiline docstrings. + test = self.test + try: + test._testMethodDoc = test._testMethodDoc.strip()# 2.5 + except AttributeError: + try: + # 2.4 and earlier + test._TestCase__testMethodDoc = \ + test._TestCase__testMethodDoc.strip() + except AttributeError: + pass + # 2.7 compat: shortDescription() always returns something + # which is a change from 2.6 and below, and breaks the + # testName plugin call. + try: + desc = self.test.shortDescription() + except Exception: + # this is probably caused by a problem in test.__str__() and is + # only triggered by python 3.1's unittest! + pass + try: + if desc == str(self.test): + return + except Exception: + # If str() triggers an exception then ignore it. + # see issue 422 + pass + return desc + + +class TestBase(unittest.TestCase): + """Common functionality for FunctionTestCase and MethodTestCase. + """ + __test__ = False # do not collect + + def id(self): + return str(self) + + def runTest(self): + self.test(*self.arg) + + def shortDescription(self): + if hasattr(self.test, 'description'): + return self.test.description + func, arg = self._descriptors() + doc = getattr(func, '__doc__', None) + if not doc: + doc = str(self) + return doc.strip().split("\n")[0].strip() + + +class FunctionTestCase(TestBase): + """TestCase wrapper for test functions. + + Don't use this class directly; it is used internally in nose to + create test cases for test functions. + """ + __test__ = False # do not collect + + def __init__(self, test, setUp=None, tearDown=None, arg=tuple(), + descriptor=None): + """Initialize the MethodTestCase. + + Required argument: + + * test -- the test function to call. + + Optional arguments: + + * setUp -- function to run at setup. + + * tearDown -- function to run at teardown. + + * arg -- arguments to pass to the test function. This is to support + generator functions that yield arguments. + + * descriptor -- the function, other than the test, that should be used + to construct the test name. This is to support generator functions. + """ + + self.test = test + self.setUpFunc = setUp + self.tearDownFunc = tearDown + self.arg = arg + self.descriptor = descriptor + TestBase.__init__(self) + + def address(self): + """Return a round-trip name for this test, a name that can be + fed back as input to loadTestByName and (assuming the same + plugin configuration) result in the loading of this test. + """ + if self.descriptor is not None: + return test_address(self.descriptor) + else: + return test_address(self.test) + + def _context(self): + return resolve_name(self.test.__module__) + context = property(_context, None, None, + """Get context (module) of this test""") + + def setUp(self): + """Run any setup function attached to the test function + """ + if self.setUpFunc: + self.setUpFunc() + else: + names = ('setup', 'setUp', 'setUpFunc') + try_run(self.test, names) + + def tearDown(self): + """Run any teardown function attached to the test function + """ + if self.tearDownFunc: + self.tearDownFunc() + else: + names = ('teardown', 'tearDown', 'tearDownFunc') + try_run(self.test, names) + + def __str__(self): + func, arg = self._descriptors() + if hasattr(func, 'compat_func_name'): + name = func.compat_func_name + else: + name = func.__name__ + name = "%s.%s" % (func.__module__, name) + if arg: + name = "%s%s" % (name, arg) + # FIXME need to include the full dir path to disambiguate + # in cases where test module of the same name was seen in + # another directory (old fromDirectory) + return name + __repr__ = __str__ + + def _descriptors(self): + """Get the descriptors of the test function: the function and + arguments that will be used to construct the test name. In + most cases, this is the function itself and no arguments. For + tests generated by generator functions, the original + (generator) function and args passed to the generated function + are returned. + """ + if self.descriptor: + return self.descriptor, self.arg + else: + return self.test, self.arg + + +class MethodTestCase(TestBase): + """Test case wrapper for test methods. + + Don't use this class directly; it is used internally in nose to + create test cases for test methods. + """ + __test__ = False # do not collect + + def __init__(self, method, test=None, arg=tuple(), descriptor=None): + """Initialize the MethodTestCase. + + Required argument: + + * method -- the method to call, may be bound or unbound. In either + case, a new instance of the method's class will be instantiated to + make the call. Note: In Python 3.x, if using an unbound method, you + must wrap it using pyversion.unbound_method. + + Optional arguments: + + * test -- the test function to call. If this is passed, it will be + called instead of getting a new bound method of the same name as the + desired method from the test instance. This is to support generator + methods that yield inline functions. + + * arg -- arguments to pass to the test function. This is to support + generator methods that yield arguments. + + * descriptor -- the function, other than the test, that should be used + to construct the test name. This is to support generator methods. + """ + self.method = method + self.test = test + self.arg = arg + self.descriptor = descriptor + if isfunction(method): + raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase") + self.cls = method.im_class + self.inst = self.cls() + if self.test is None: + method_name = self.method.__name__ + self.test = getattr(self.inst, method_name) + TestBase.__init__(self) + + def __str__(self): + func, arg = self._descriptors() + if hasattr(func, 'compat_func_name'): + name = func.compat_func_name + else: + name = func.__name__ + name = "%s.%s.%s" % (self.cls.__module__, + self.cls.__name__, + name) + if arg: + name = "%s%s" % (name, arg) + return name + __repr__ = __str__ + + def address(self): + """Return a round-trip name for this test, a name that can be + fed back as input to loadTestByName and (assuming the same + plugin configuration) result in the loading of this test. + """ + if self.descriptor is not None: + return test_address(self.descriptor) + else: + return test_address(self.method) + + def _context(self): + return self.cls + context = property(_context, None, None, + """Get context (class) of this test""") + + def setUp(self): + try_run(self.inst, ('setup', 'setUp')) + + def tearDown(self): + try_run(self.inst, ('teardown', 'tearDown')) + + def _descriptors(self): + """Get the descriptors of the test method: the method and + arguments that will be used to construct the test name. In + most cases, this is the method itself and no arguments. For + tests generated by generator methods, the original + (generator) method and args passed to the generated method + or function are returned. + """ + if self.descriptor: + return self.descriptor, self.arg + else: + return self.method, self.arg diff --git a/scripts/external_libs/nose-1.3.4/nose/commands.py b/scripts/external_libs/nose-1.3.4/nose/commands.py new file mode 100755 index 00000000..ef0e9cae --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/commands.py @@ -0,0 +1,172 @@ +""" +nosetests setuptools command +---------------------------- + +The easiest way to run tests with nose is to use the `nosetests` setuptools +command:: + + python setup.py nosetests + +This command has one *major* benefit over the standard `test` command: *all +nose plugins are supported*. + +To configure the `nosetests` command, add a [nosetests] section to your +setup.cfg. The [nosetests] section can contain any command line arguments that +nosetests supports. The differences between issuing an option on the command +line and adding it to setup.cfg are: + +* In setup.cfg, the -- prefix must be excluded +* In setup.cfg, command line flags that take no arguments must be given an + argument flag (1, T or TRUE for active, 0, F or FALSE for inactive) + +Here's an example [nosetests] setup.cfg section:: + + [nosetests] + verbosity=1 + detailed-errors=1 + with-coverage=1 + cover-package=nose + debug=nose.loader + pdb=1 + pdb-failures=1 + +If you commonly run nosetests with a large number of options, using +the nosetests setuptools command and configuring with setup.cfg can +make running your tests much less tedious. (Note that the same options +and format supported in setup.cfg are supported in all other config +files, and the nosetests script will also load config files.) + +Another reason to run tests with the command is that the command will +install packages listed in your `tests_require`, as well as doing a +complete build of your package before running tests. For packages with +dependencies or that build C extensions, using the setuptools command +can be more convenient than building by hand and running the nosetests +script. + +Bootstrapping +------------- + +If you are distributing your project and want users to be able to run tests +without having to install nose themselves, add nose to the setup_requires +section of your setup():: + + setup( + # ... + setup_requires=['nose>=1.0'] + ) + +This will direct setuptools to download and activate nose during the setup +process, making the ``nosetests`` command available. + +""" +try: + from setuptools import Command +except ImportError: + Command = nosetests = None +else: + from nose.config import Config, option_blacklist, user_config_files, \ + flag, _bool + from nose.core import TestProgram + from nose.plugins import DefaultPluginManager + + + def get_user_options(parser): + """convert a optparse option list into a distutils option tuple list""" + opt_list = [] + for opt in parser.option_list: + if opt._long_opts[0][2:] in option_blacklist: + continue + long_name = opt._long_opts[0][2:] + if opt.action not in ('store_true', 'store_false'): + long_name = long_name + "=" + short_name = None + if opt._short_opts: + short_name = opt._short_opts[0][1:] + opt_list.append((long_name, short_name, opt.help or "")) + return opt_list + + + class nosetests(Command): + description = "Run unit tests using nosetests" + __config = Config(files=user_config_files(), + plugins=DefaultPluginManager()) + __parser = __config.getParser() + user_options = get_user_options(__parser) + + def initialize_options(self): + """create the member variables, but change hyphens to + underscores + """ + + self.option_to_cmds = {} + for opt in self.__parser.option_list: + cmd_name = opt._long_opts[0][2:] + option_name = cmd_name.replace('-', '_') + self.option_to_cmds[option_name] = cmd_name + setattr(self, option_name, None) + self.attr = None + + def finalize_options(self): + """nothing to do here""" + pass + + def run(self): + """ensure tests are capable of being run, then + run nose.main with a reconstructed argument list""" + if getattr(self.distribution, 'use_2to3', False): + # If we run 2to3 we can not do this inplace: + + # Ensure metadata is up-to-date + build_py = self.get_finalized_command('build_py') + build_py.inplace = 0 + build_py.run() + bpy_cmd = self.get_finalized_command("build_py") + build_path = bpy_cmd.build_lib + + # Build extensions + egg_info = self.get_finalized_command('egg_info') + egg_info.egg_base = build_path + egg_info.run() + + build_ext = self.get_finalized_command('build_ext') + build_ext.inplace = 0 + build_ext.run() + else: + self.run_command('egg_info') + + # Build extensions in-place + build_ext = self.get_finalized_command('build_ext') + build_ext.inplace = 1 + build_ext.run() + + if self.distribution.install_requires: + self.distribution.fetch_build_eggs( + self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs( + self.distribution.tests_require) + + ei_cmd = self.get_finalized_command("egg_info") + argv = ['nosetests', '--where', ei_cmd.egg_base] + for (option_name, cmd_name) in self.option_to_cmds.items(): + if option_name in option_blacklist: + continue + value = getattr(self, option_name) + if value is not None: + argv.extend( + self.cfgToArg(option_name.replace('_', '-'), value)) + TestProgram(argv=argv, config=self.__config) + + def cfgToArg(self, optname, value): + argv = [] + long_optname = '--' + optname + opt = self.__parser.get_option(long_optname) + if opt.action in ('store_true', 'store_false'): + if not flag(value): + raise ValueError("Invalid value '%s' for '%s'" % ( + value, optname)) + if _bool(value): + argv.append(long_optname) + else: + argv.extend([long_optname, value]) + return argv diff --git a/scripts/external_libs/nose-1.3.4/nose/config.py b/scripts/external_libs/nose-1.3.4/nose/config.py new file mode 100755 index 00000000..4214c2d6 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/config.py @@ -0,0 +1,661 @@ +import logging +import optparse +import os +import re +import sys +import ConfigParser +from optparse import OptionParser +from nose.util import absdir, tolist +from nose.plugins.manager import NoPlugins +from warnings import warn, filterwarnings + +log = logging.getLogger(__name__) + +# not allowed in config files +option_blacklist = ['help', 'verbose'] + +config_files = [ + # Linux users will prefer this + "~/.noserc", + # Windows users will prefer this + "~/nose.cfg" + ] + +# plaforms on which the exe check defaults to off +# Windows and IronPython +exe_allowed_platforms = ('win32', 'cli') + +filterwarnings("always", category=DeprecationWarning, + module=r'(.*\.)?nose\.config') + +class NoSuchOptionError(Exception): + def __init__(self, name): + Exception.__init__(self, name) + self.name = name + + +class ConfigError(Exception): + pass + + +class ConfiguredDefaultsOptionParser(object): + """ + Handler for options from commandline and config files. + """ + def __init__(self, parser, config_section, error=None, file_error=None): + self._parser = parser + self._config_section = config_section + if error is None: + error = self._parser.error + self._error = error + if file_error is None: + file_error = lambda msg, **kw: error(msg) + self._file_error = file_error + + def _configTuples(self, cfg, filename): + config = [] + if self._config_section in cfg.sections(): + for name, value in cfg.items(self._config_section): + config.append((name, value, filename)) + return config + + def _readFromFilenames(self, filenames): + config = [] + for filename in filenames: + cfg = ConfigParser.RawConfigParser() + try: + cfg.read(filename) + except ConfigParser.Error, exc: + raise ConfigError("Error reading config file %r: %s" % + (filename, str(exc))) + config.extend(self._configTuples(cfg, filename)) + return config + + def _readFromFileObject(self, fh): + cfg = ConfigParser.RawConfigParser() + try: + filename = fh.name + except AttributeError: + filename = '<???>' + try: + cfg.readfp(fh) + except ConfigParser.Error, exc: + raise ConfigError("Error reading config file %r: %s" % + (filename, str(exc))) + return self._configTuples(cfg, filename) + + def _readConfiguration(self, config_files): + try: + config_files.readline + except AttributeError: + filename_or_filenames = config_files + if isinstance(filename_or_filenames, basestring): + filenames = [filename_or_filenames] + else: + filenames = filename_or_filenames + config = self._readFromFilenames(filenames) + else: + fh = config_files + config = self._readFromFileObject(fh) + return config + + def _processConfigValue(self, name, value, values, parser): + opt_str = '--' + name + option = parser.get_option(opt_str) + if option is None: + raise NoSuchOptionError(name) + else: + option.process(opt_str, value, values, parser) + + def _applyConfigurationToValues(self, parser, config, values): + for name, value, filename in config: + if name in option_blacklist: + continue + try: + self._processConfigValue(name, value, values, parser) + except NoSuchOptionError, exc: + self._file_error( + "Error reading config file %r: " + "no such option %r" % (filename, exc.name), + name=name, filename=filename) + except optparse.OptionValueError, exc: + msg = str(exc).replace('--' + name, repr(name), 1) + self._file_error("Error reading config file %r: " + "%s" % (filename, msg), + name=name, filename=filename) + + def parseArgsAndConfigFiles(self, args, config_files): + values = self._parser.get_default_values() + try: + config = self._readConfiguration(config_files) + except ConfigError, exc: + self._error(str(exc)) + else: + try: + self._applyConfigurationToValues(self._parser, config, values) + except ConfigError, exc: + self._error(str(exc)) + return self._parser.parse_args(args, values) + + +class Config(object): + """nose configuration. + + Instances of Config are used throughout nose to configure + behavior, including plugin lists. Here are the default values for + all config keys:: + + self.env = env = kw.pop('env', {}) + self.args = () + self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + self.addPaths = not env.get('NOSE_NOPATH', False) + self.configSection = 'nosetests' + self.debug = env.get('NOSE_DEBUG') + self.debugLog = env.get('NOSE_DEBUG_LOG') + self.exclude = None + self.getTestCaseNamesCompat = False + self.includeExe = env.get('NOSE_INCLUDE_EXE', + sys.platform in exe_allowed_platforms) + self.ignoreFiles = (re.compile(r'^\.'), + re.compile(r'^_'), + re.compile(r'^setup\.py$') + ) + self.include = None + self.loggingConfig = None + self.logStream = sys.stderr + self.options = NoOptions() + self.parser = None + self.plugins = NoPlugins() + self.srcDirs = ('lib', 'src') + self.runOnInit = True + self.stopOnError = env.get('NOSE_STOP', False) + self.stream = sys.stderr + self.testNames = () + self.verbosity = int(env.get('NOSE_VERBOSE', 1)) + self.where = () + self.py3where = () + self.workingDir = None + """ + + def __init__(self, **kw): + self.env = env = kw.pop('env', {}) + self.args = () + self.testMatchPat = env.get('NOSE_TESTMATCH', + r'(?:^|[\b_\.%s-])[Tt]est' % os.sep) + self.testMatch = re.compile(self.testMatchPat) + self.addPaths = not env.get('NOSE_NOPATH', False) + self.configSection = 'nosetests' + self.debug = env.get('NOSE_DEBUG') + self.debugLog = env.get('NOSE_DEBUG_LOG') + self.exclude = None + self.getTestCaseNamesCompat = False + self.includeExe = env.get('NOSE_INCLUDE_EXE', + sys.platform in exe_allowed_platforms) + self.ignoreFilesDefaultStrings = [r'^\.', + r'^_', + r'^setup\.py$', + ] + self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings) + self.include = None + self.loggingConfig = None + self.logStream = sys.stderr + self.options = NoOptions() + self.parser = None + self.plugins = NoPlugins() + self.srcDirs = ('lib', 'src') + self.runOnInit = True + self.stopOnError = env.get('NOSE_STOP', False) + self.stream = sys.stderr + self.testNames = [] + self.verbosity = int(env.get('NOSE_VERBOSE', 1)) + self.where = () + self.py3where = () + self.workingDir = os.getcwd() + self.traverseNamespace = False + self.firstPackageWins = False + self.parserClass = OptionParser + self.worker = False + + self._default = self.__dict__.copy() + self.update(kw) + self._orig = self.__dict__.copy() + + def __getstate__(self): + state = self.__dict__.copy() + del state['stream'] + del state['_orig'] + del state['_default'] + del state['env'] + del state['logStream'] + # FIXME remove plugins, have only plugin manager class + state['plugins'] = self.plugins.__class__ + return state + + def __setstate__(self, state): + plugincls = state.pop('plugins') + self.update(state) + self.worker = True + # FIXME won't work for static plugin lists + self.plugins = plugincls() + self.plugins.loadPlugins() + # needed so .can_configure gets set appropriately + dummy_parser = self.parserClass() + self.plugins.addOptions(dummy_parser, {}) + self.plugins.configure(self.options, self) + + def __repr__(self): + d = self.__dict__.copy() + # don't expose env, could include sensitive info + d['env'] = {} + keys = [ k for k in d.keys() + if not k.startswith('_') ] + keys.sort() + return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k]) + for k in keys ]) + __str__ = __repr__ + + def _parseArgs(self, argv, cfg_files): + def warn_sometimes(msg, name=None, filename=None): + if (hasattr(self.plugins, 'excludedOption') and + self.plugins.excludedOption(name)): + msg = ("Option %r in config file %r ignored: " + "excluded by runtime environment" % + (name, filename)) + warn(msg, RuntimeWarning) + else: + raise ConfigError(msg) + parser = ConfiguredDefaultsOptionParser( + self.getParser(), self.configSection, file_error=warn_sometimes) + return parser.parseArgsAndConfigFiles(argv[1:], cfg_files) + + def configure(self, argv=None, doc=None): + """Configure the nose running environment. Execute configure before + collecting tests with nose.TestCollector to enable output capture and + other features. + """ + env = self.env + if argv is None: + argv = sys.argv + + cfg_files = getattr(self, 'files', []) + options, args = self._parseArgs(argv, cfg_files) + # If -c --config has been specified on command line, + # load those config files and reparse + if getattr(options, 'files', []): + options, args = self._parseArgs(argv, options.files) + + self.options = options + if args: + self.testNames = args + if options.testNames is not None: + self.testNames.extend(tolist(options.testNames)) + + if options.py3where is not None: + if sys.version_info >= (3,): + options.where = options.py3where + + # `where` is an append action, so it can't have a default value + # in the parser, or that default will always be in the list + if not options.where: + options.where = env.get('NOSE_WHERE', None) + + # include and exclude also + if not options.ignoreFiles: + options.ignoreFiles = env.get('NOSE_IGNORE_FILES', []) + if not options.include: + options.include = env.get('NOSE_INCLUDE', []) + if not options.exclude: + options.exclude = env.get('NOSE_EXCLUDE', []) + + self.addPaths = options.addPaths + self.stopOnError = options.stopOnError + self.verbosity = options.verbosity + self.includeExe = options.includeExe + self.traverseNamespace = options.traverseNamespace + self.debug = options.debug + self.debugLog = options.debugLog + self.loggingConfig = options.loggingConfig + self.firstPackageWins = options.firstPackageWins + self.configureLogging() + + if not options.byteCompile: + sys.dont_write_bytecode = True + + if options.where is not None: + self.configureWhere(options.where) + + if options.testMatch: + self.testMatch = re.compile(options.testMatch) + + if options.ignoreFiles: + self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles)) + log.info("Ignoring files matching %s", options.ignoreFiles) + else: + log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings) + + if options.include: + self.include = map(re.compile, tolist(options.include)) + log.info("Including tests matching %s", options.include) + + if options.exclude: + self.exclude = map(re.compile, tolist(options.exclude)) + log.info("Excluding tests matching %s", options.exclude) + + # When listing plugins we don't want to run them + if not options.showPlugins: + self.plugins.configure(options, self) + self.plugins.begin() + + def configureLogging(self): + """Configure logging for nose, or optionally other packages. Any logger + name may be set with the debug option, and that logger will be set to + debug level and be assigned the same handler as the nose loggers, unless + it already has a handler. + """ + if self.loggingConfig: + from logging.config import fileConfig + fileConfig(self.loggingConfig) + return + + format = logging.Formatter('%(name)s: %(levelname)s: %(message)s') + if self.debugLog: + handler = logging.FileHandler(self.debugLog) + else: + handler = logging.StreamHandler(self.logStream) + handler.setFormatter(format) + + logger = logging.getLogger('nose') + logger.propagate = 0 + + # only add our default handler if there isn't already one there + # this avoids annoying duplicate log messages. + found = False + if self.debugLog: + debugLogAbsPath = os.path.abspath(self.debugLog) + for h in logger.handlers: + if type(h) == logging.FileHandler and \ + h.baseFilename == debugLogAbsPath: + found = True + else: + for h in logger.handlers: + if type(h) == logging.StreamHandler and \ + h.stream == self.logStream: + found = True + if not found: + logger.addHandler(handler) + + # default level + lvl = logging.WARNING + if self.verbosity >= 5: + lvl = 0 + elif self.verbosity >= 4: + lvl = logging.DEBUG + elif self.verbosity >= 3: + lvl = logging.INFO + logger.setLevel(lvl) + + # individual overrides + if self.debug: + # no blanks + debug_loggers = [ name for name in self.debug.split(',') + if name ] + for logger_name in debug_loggers: + l = logging.getLogger(logger_name) + l.setLevel(logging.DEBUG) + if not l.handlers and not logger_name.startswith('nose'): + l.addHandler(handler) + + def configureWhere(self, where): + """Configure the working directory or directories for the test run. + """ + from nose.importer import add_path + self.workingDir = None + where = tolist(where) + warned = False + for path in where: + if not self.workingDir: + abs_path = absdir(path) + if abs_path is None: + raise ValueError("Working directory %s not found, or " + "not a directory" % path) + log.info("Set working dir to %s", abs_path) + self.workingDir = abs_path + if self.addPaths and \ + os.path.exists(os.path.join(abs_path, '__init__.py')): + log.info("Working directory %s is a package; " + "adding to sys.path" % abs_path) + add_path(abs_path) + continue + if not warned: + warn("Use of multiple -w arguments is deprecated and " + "support may be removed in a future release. You can " + "get the same behavior by passing directories without " + "the -w argument on the command line, or by using the " + "--tests argument in a configuration file.", + DeprecationWarning) + warned = True + self.testNames.append(path) + + def default(self): + """Reset all config values to defaults. + """ + self.__dict__.update(self._default) + + def getParser(self, doc=None): + """Get the command line option parser. + """ + if self.parser: + return self.parser + env = self.env + parser = self.parserClass(doc) + parser.add_option( + "-V","--version", action="store_true", + dest="version", default=False, + help="Output nose version and exit") + parser.add_option( + "-p", "--plugins", action="store_true", + dest="showPlugins", default=False, + help="Output list of available plugins and exit. Combine with " + "higher verbosity for greater detail") + parser.add_option( + "-v", "--verbose", + action="count", dest="verbosity", + default=self.verbosity, + help="Be more verbose. [NOSE_VERBOSE]") + parser.add_option( + "--verbosity", action="store", dest="verbosity", + metavar='VERBOSITY', + type="int", help="Set verbosity; --verbosity=2 is " + "the same as -v") + parser.add_option( + "-q", "--quiet", action="store_const", const=0, dest="verbosity", + help="Be less verbose") + parser.add_option( + "-c", "--config", action="append", dest="files", + metavar="FILES", + help="Load configuration from config file(s). May be specified " + "multiple times; in that case, all config files will be " + "loaded and combined") + parser.add_option( + "-w", "--where", action="append", dest="where", + metavar="WHERE", + help="Look for tests in this directory. " + "May be specified multiple times. The first directory passed " + "will be used as the working directory, in place of the current " + "working directory, which is the default. Others will be added " + "to the list of tests to execute. [NOSE_WHERE]" + ) + parser.add_option( + "--py3where", action="append", dest="py3where", + metavar="PY3WHERE", + help="Look for tests in this directory under Python 3.x. " + "Functions the same as 'where', but only applies if running under " + "Python 3.x or above. Note that, if present under 3.x, this " + "option completely replaces any directories specified with " + "'where', so the 'where' option becomes ineffective. " + "[NOSE_PY3WHERE]" + ) + parser.add_option( + "-m", "--match", "--testmatch", action="store", + dest="testMatch", metavar="REGEX", + help="Files, directories, function names, and class names " + "that match this regular expression are considered tests. " + "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat, + default=self.testMatchPat) + parser.add_option( + "--tests", action="store", dest="testNames", default=None, + metavar='NAMES', + help="Run these tests (comma-separated list). This argument is " + "useful mainly from configuration files; on the command line, " + "just pass the tests to run as additional arguments with no " + "switch.") + parser.add_option( + "-l", "--debug", action="store", + dest="debug", default=self.debug, + help="Activate debug logging for one or more systems. " + "Available debug loggers: nose, nose.importer, " + "nose.inspector, nose.plugins, nose.result and " + "nose.selector. Separate multiple names with a comma.") + parser.add_option( + "--debug-log", dest="debugLog", action="store", + default=self.debugLog, metavar="FILE", + help="Log debug messages to this file " + "(default: sys.stderr)") + parser.add_option( + "--logging-config", "--log-config", + dest="loggingConfig", action="store", + default=self.loggingConfig, metavar="FILE", + help="Load logging config from this file -- bypasses all other" + " logging config settings.") + parser.add_option( + "-I", "--ignore-files", action="append", dest="ignoreFiles", + metavar="REGEX", + help="Completely ignore any file that matches this regular " + "expression. Takes precedence over any other settings or " + "plugins. " + "Specifying this option will replace the default setting. " + "Specify this option multiple times " + "to add more regular expressions [NOSE_IGNORE_FILES]") + parser.add_option( + "-e", "--exclude", action="append", dest="exclude", + metavar="REGEX", + help="Don't run tests that match regular " + "expression [NOSE_EXCLUDE]") + parser.add_option( + "-i", "--include", action="append", dest="include", + metavar="REGEX", + help="This regular expression will be applied to files, " + "directories, function names, and class names for a chance " + "to include additional tests that do not match TESTMATCH. " + "Specify this option multiple times " + "to add more regular expressions [NOSE_INCLUDE]") + parser.add_option( + "-x", "--stop", action="store_true", dest="stopOnError", + default=self.stopOnError, + help="Stop running tests after the first error or failure") + parser.add_option( + "-P", "--no-path-adjustment", action="store_false", + dest="addPaths", + default=self.addPaths, + help="Don't make any changes to sys.path when " + "loading tests [NOSE_NOPATH]") + parser.add_option( + "--exe", action="store_true", dest="includeExe", + default=self.includeExe, + help="Look for tests in python modules that are " + "executable. Normal behavior is to exclude executable " + "modules, since they may not be import-safe " + "[NOSE_INCLUDE_EXE]") + parser.add_option( + "--noexe", action="store_false", dest="includeExe", + help="DO NOT look for tests in python modules that are " + "executable. (The default on the windows platform is to " + "do so.)") + parser.add_option( + "--traverse-namespace", action="store_true", + default=self.traverseNamespace, dest="traverseNamespace", + help="Traverse through all path entries of a namespace package") + parser.add_option( + "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins", + action="store_true", default=False, dest="firstPackageWins", + help="nose's importer will normally evict a package from sys." + "modules if it sees a package with the same name in a different " + "location. Set this option to disable that behavior.") + parser.add_option( + "--no-byte-compile", + action="store_false", default=True, dest="byteCompile", + help="Prevent nose from byte-compiling the source into .pyc files " + "while nose is scanning for and running tests.") + + self.plugins.loadPlugins() + self.pluginOpts(parser) + + self.parser = parser + return parser + + def help(self, doc=None): + """Return the generated help message + """ + return self.getParser(doc).format_help() + + def pluginOpts(self, parser): + self.plugins.addOptions(parser, self.env) + + def reset(self): + self.__dict__.update(self._orig) + + def todict(self): + return self.__dict__.copy() + + def update(self, d): + self.__dict__.update(d) + + +class NoOptions(object): + """Options container that returns None for all options. + """ + def __getstate__(self): + return {} + + def __setstate__(self, state): + pass + + def __getnewargs__(self): + return () + + def __nonzero__(self): + return False + + +def user_config_files(): + """Return path to any existing user config files + """ + return filter(os.path.exists, + map(os.path.expanduser, config_files)) + + +def all_config_files(): + """Return path to any existing user config files, plus any setup.cfg + in the current working directory. + """ + user = user_config_files() + if os.path.exists('setup.cfg'): + return user + ['setup.cfg'] + return user + + +# used when parsing config files +def flag(val): + """Does the value look like an on/off flag?""" + if val == 1: + return True + elif val == 0: + return False + val = str(val) + if len(val) > 5: + return False + return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF') + + +def _bool(val): + return str(val).upper() in ('1', 'T', 'TRUE', 'ON') diff --git a/scripts/external_libs/nose-1.3.4/nose/core.py b/scripts/external_libs/nose-1.3.4/nose/core.py new file mode 100755 index 00000000..49e7939b --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/core.py @@ -0,0 +1,341 @@ +"""Implements nose test program and collector. +""" +from __future__ import generators + +import logging +import os +import sys +import time +import unittest + +from nose.config import Config, all_config_files +from nose.loader import defaultTestLoader +from nose.plugins.manager import PluginManager, DefaultPluginManager, \ + RestrictedPluginManager +from nose.result import TextTestResult +from nose.suite import FinalizingSuiteWrapper +from nose.util import isclass, tolist + + +log = logging.getLogger('nose.core') +compat_24 = sys.version_info >= (2, 4) + +__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector', + 'TextTestRunner'] + + +class TextTestRunner(unittest.TextTestRunner): + """Test runner that uses nose's TextTestResult to enable errorClasses, + as well as providing hooks for plugins to override or replace the test + output stream, results, and the test case itself. + """ + def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1, + config=None): + if config is None: + config = Config() + self.config = config + unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity) + + + def _makeResult(self): + return TextTestResult(self.stream, + self.descriptions, + self.verbosity, + self.config) + + def run(self, test): + """Overrides to provide plugin hooks and defer all output to + the test result class. + """ + wrapper = self.config.plugins.prepareTest(test) + if wrapper is not None: + test = wrapper + + # plugins can decorate or capture the output stream + wrapped = self.config.plugins.setOutputStream(self.stream) + if wrapped is not None: + self.stream = wrapped + + result = self._makeResult() + start = time.time() + try: + test(result) + except KeyboardInterrupt: + pass + stop = time.time() + result.printErrors() + result.printSummary(start, stop) + self.config.plugins.finalize(result) + return result + + +class TestProgram(unittest.TestProgram): + """Collect and run tests, returning success or failure. + + The arguments to TestProgram() are the same as to + :func:`main()` and :func:`run()`: + + * module: All tests are in this module (default: None) + * defaultTest: Tests to load (default: '.') + * argv: Command line arguments (default: None; sys.argv is read) + * testRunner: Test runner instance (default: None) + * testLoader: Test loader instance (default: None) + * env: Environment; ignored if config is provided (default: None; + os.environ is read) + * config: :class:`nose.config.Config` instance (default: None) + * suite: Suite or list of tests to run (default: None). Passing a + suite or lists of tests will bypass all test discovery and + loading. *ALSO NOTE* that if you pass a unittest.TestSuite + instance as the suite, context fixtures at the class, module and + package level will not be used, and many plugin hooks will not + be called. If you want normal nose behavior, either pass a list + of tests, or a fully-configured :class:`nose.suite.ContextSuite`. + * exit: Exit after running tests and printing report (default: True) + * plugins: List of plugins to use; ignored if config is provided + (default: load plugins with DefaultPluginManager) + * addplugins: List of **extra** plugins to use. Pass a list of plugin + instances in this argument to make custom plugins available while + still using the DefaultPluginManager. + """ + verbosity = 1 + + def __init__(self, module=None, defaultTest='.', argv=None, + testRunner=None, testLoader=None, env=None, config=None, + suite=None, exit=True, plugins=None, addplugins=None): + if env is None: + env = os.environ + if config is None: + config = self.makeConfig(env, plugins) + if addplugins: + config.plugins.addPlugins(extraplugins=addplugins) + self.config = config + self.suite = suite + self.exit = exit + extra_args = {} + version = sys.version_info[0:2] + if version >= (2,7) and version != (3,0): + extra_args['exit'] = exit + unittest.TestProgram.__init__( + self, module=module, defaultTest=defaultTest, + argv=argv, testRunner=testRunner, testLoader=testLoader, + **extra_args) + + def getAllConfigFiles(self, env=None): + env = env or {} + if env.get('NOSE_IGNORE_CONFIG_FILES', False): + return [] + else: + return all_config_files() + + def makeConfig(self, env, plugins=None): + """Load a Config, pre-filled with user config files if any are + found. + """ + cfg_files = self.getAllConfigFiles(env) + if plugins: + manager = PluginManager(plugins=plugins) + else: + manager = DefaultPluginManager() + return Config( + env=env, files=cfg_files, plugins=manager) + + def parseArgs(self, argv): + """Parse argv and env and configure running environment. + """ + self.config.configure(argv, doc=self.usage()) + log.debug("configured %s", self.config) + + # quick outs: version, plugins (optparse would have already + # caught and exited on help) + if self.config.options.version: + from nose import __version__ + sys.stdout = sys.__stdout__ + print "%s version %s" % (os.path.basename(sys.argv[0]), __version__) + sys.exit(0) + + if self.config.options.showPlugins: + self.showPlugins() + sys.exit(0) + + if self.testLoader is None: + self.testLoader = defaultTestLoader(config=self.config) + elif isclass(self.testLoader): + self.testLoader = self.testLoader(config=self.config) + plug_loader = self.config.plugins.prepareTestLoader(self.testLoader) + if plug_loader is not None: + self.testLoader = plug_loader + log.debug("test loader is %s", self.testLoader) + + # FIXME if self.module is a string, add it to self.testNames? not sure + + if self.config.testNames: + self.testNames = self.config.testNames + else: + self.testNames = tolist(self.defaultTest) + log.debug('defaultTest %s', self.defaultTest) + log.debug('Test names are %s', self.testNames) + if self.config.workingDir is not None: + os.chdir(self.config.workingDir) + self.createTests() + + def createTests(self): + """Create the tests to run. If a self.suite + is set, then that suite will be used. Otherwise, tests will be + loaded from the given test names (self.testNames) using the + test loader. + """ + log.debug("createTests called with %s", self.suite) + if self.suite is not None: + # We were given an explicit suite to run. Make sure it's + # loaded and wrapped correctly. + self.test = self.testLoader.suiteClass(self.suite) + else: + self.test = self.testLoader.loadTestsFromNames(self.testNames) + + def runTests(self): + """Run Tests. Returns true on success, false on failure, and sets + self.success to the same value. + """ + log.debug("runTests called") + if self.testRunner is None: + self.testRunner = TextTestRunner(stream=self.config.stream, + verbosity=self.config.verbosity, + config=self.config) + plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) + if plug_runner is not None: + self.testRunner = plug_runner + result = self.testRunner.run(self.test) + self.success = result.wasSuccessful() + if self.exit: + sys.exit(not self.success) + return self.success + + def showPlugins(self): + """Print list of available plugins. + """ + import textwrap + + class DummyParser: + def __init__(self): + self.options = [] + def add_option(self, *arg, **kw): + self.options.append((arg, kw.pop('help', ''))) + + v = self.config.verbosity + self.config.plugins.sort() + for p in self.config.plugins: + print "Plugin %s" % p.name + if v >= 2: + print " score: %s" % p.score + print '\n'.join(textwrap.wrap(p.help().strip(), + initial_indent=' ', + subsequent_indent=' ')) + if v >= 3: + parser = DummyParser() + p.addOptions(parser) + if len(parser.options): + print + print " Options:" + for opts, help in parser.options: + print ' %s' % (', '.join(opts)) + if help: + print '\n'.join( + textwrap.wrap(help.strip(), + initial_indent=' ', + subsequent_indent=' ')) + print + + def usage(cls): + import nose + try: + ld = nose.__loader__ + text = ld.get_data(os.path.join( + os.path.dirname(__file__), 'usage.txt')) + except AttributeError: + f = open(os.path.join( + os.path.dirname(__file__), 'usage.txt'), 'r') + try: + text = f.read() + finally: + f.close() + # Ensure that we return str, not bytes. + if not isinstance(text, str): + text = text.decode('utf-8') + return text + usage = classmethod(usage) + +# backwards compatibility +run_exit = main = TestProgram + + +def run(*arg, **kw): + """Collect and run tests, returning success or failure. + + The arguments to `run()` are the same as to `main()`: + + * module: All tests are in this module (default: None) + * defaultTest: Tests to load (default: '.') + * argv: Command line arguments (default: None; sys.argv is read) + * testRunner: Test runner instance (default: None) + * testLoader: Test loader instance (default: None) + * env: Environment; ignored if config is provided (default: None; + os.environ is read) + * config: :class:`nose.config.Config` instance (default: None) + * suite: Suite or list of tests to run (default: None). Passing a + suite or lists of tests will bypass all test discovery and + loading. *ALSO NOTE* that if you pass a unittest.TestSuite + instance as the suite, context fixtures at the class, module and + package level will not be used, and many plugin hooks will not + be called. If you want normal nose behavior, either pass a list + of tests, or a fully-configured :class:`nose.suite.ContextSuite`. + * plugins: List of plugins to use; ignored if config is provided + (default: load plugins with DefaultPluginManager) + * addplugins: List of **extra** plugins to use. Pass a list of plugin + instances in this argument to make custom plugins available while + still using the DefaultPluginManager. + + With the exception that the ``exit`` argument is always set + to False. + """ + kw['exit'] = False + return TestProgram(*arg, **kw).success + + +def runmodule(name='__main__', **kw): + """Collect and run tests in a single module only. Defaults to running + tests in __main__. Additional arguments to TestProgram may be passed + as keyword arguments. + """ + main(defaultTest=name, **kw) + + +def collector(): + """TestSuite replacement entry point. Use anywhere you might use a + unittest.TestSuite. The collector will, by default, load options from + all config files and execute loader.loadTestsFromNames() on the + configured testNames, or '.' if no testNames are configured. + """ + # plugins that implement any of these methods are disabled, since + # we don't control the test runner and won't be able to run them + # finalize() is also not called, but plugins that use it aren't disabled, + # because capture needs it. + setuptools_incompat = ('report', 'prepareTest', + 'prepareTestLoader', 'prepareTestRunner', + 'setOutputStream') + + plugins = RestrictedPluginManager(exclude=setuptools_incompat) + conf = Config(files=all_config_files(), + plugins=plugins) + conf.configure(argv=['collector']) + loader = defaultTestLoader(conf) + + if conf.testNames: + suite = loader.loadTestsFromNames(conf.testNames) + else: + suite = loader.loadTestsFromNames(('.',)) + return FinalizingSuiteWrapper(suite, plugins.finalize) + + + +if __name__ == '__main__': + main() diff --git a/scripts/external_libs/nose-1.3.4/nose/exc.py b/scripts/external_libs/nose-1.3.4/nose/exc.py new file mode 100755 index 00000000..8b780db0 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/exc.py @@ -0,0 +1,9 @@ +"""Exceptions for marking tests as skipped or deprecated. + +This module exists to provide backwards compatibility with previous +versions of nose where skipped and deprecated tests were core +functionality, rather than being provided by plugins. It may be +removed in a future release. +""" +from nose.plugins.skip import SkipTest +from nose.plugins.deprecated import DeprecatedTest diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py b/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py new file mode 100755 index 00000000..5fd1516a --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py @@ -0,0 +1,3 @@ +""" +External or vendor files +""" diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py b/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py new file mode 100755 index 00000000..332cf08c --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py @@ -0,0 +1,2272 @@ +# Module doctest. +# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). +# Major enhancements and refactoring by: +# Jim Fulton +# Edward Loper + +# Provided as-is; use at your own risk; no warranty; no promises; enjoy! +# +# Modified for inclusion in nose to provide support for DocFileTest in +# python 2.3: +# +# - all doctests removed from module (they fail under 2.3 and 2.5) +# - now handles the $py.class extension when ran under Jython + +r"""Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +""" + +__docformat__ = 'reStructuredText en' + +__all__ = [ + # 0, Option Flags + 'register_optionflag', + 'DONT_ACCEPT_TRUE_FOR_1', + 'DONT_ACCEPT_BLANKLINE', + 'NORMALIZE_WHITESPACE', + 'ELLIPSIS', + 'IGNORE_EXCEPTION_DETAIL', + 'COMPARISON_FLAGS', + 'REPORT_UDIFF', + 'REPORT_CDIFF', + 'REPORT_NDIFF', + 'REPORT_ONLY_FIRST_FAILURE', + 'REPORTING_FLAGS', + # 1. Utility Functions + 'is_private', + # 2. Example & DocTest + 'Example', + 'DocTest', + # 3. Doctest Parser + 'DocTestParser', + # 4. Doctest Finder + 'DocTestFinder', + # 5. Doctest Runner + 'DocTestRunner', + 'OutputChecker', + 'DocTestFailure', + 'UnexpectedException', + 'DebugRunner', + # 6. Test Functions + 'testmod', + 'testfile', + 'run_docstring_examples', + # 7. Tester + 'Tester', + # 8. Unittest Support + 'DocTestSuite', + 'DocFileSuite', + 'set_unittest_reportflags', + # 9. Debugging Support + 'script_from_examples', + 'testsource', + 'debug_src', + 'debug', +] + +import __future__ + +import sys, traceback, inspect, linecache, os, re +import unittest, difflib, pdb, tempfile +import warnings +from StringIO import StringIO + +# Don't whine about the deprecated is_private function in this +# module's tests. +warnings.filterwarnings("ignore", "is_private", DeprecationWarning, + __name__, 0) + +# There are 4 basic classes: +# - Example: a <source, want> pair, plus an intra-docstring line number. +# - DocTest: a collection of examples, parsed from a docstring, plus +# info about where the docstring came from (name, filename, lineno). +# - DocTestFinder: extracts DocTests from a given object's docstring and +# its contained objects' docstrings. +# - DocTestRunner: runs DocTest cases, and accumulates statistics. +# +# So the basic picture is: +# +# list of: +# +------+ +---------+ +-------+ +# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| +# +------+ +---------+ +-------+ +# | Example | +# | ... | +# | Example | +# +---------+ + +# Option constants. + +OPTIONFLAGS_BY_NAME = {} +def register_optionflag(name): + # Create a new flag unless `name` is already known. + return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) + +DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') +DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') +NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') +ELLIPSIS = register_optionflag('ELLIPSIS') +IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') + +COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | + DONT_ACCEPT_BLANKLINE | + NORMALIZE_WHITESPACE | + ELLIPSIS | + IGNORE_EXCEPTION_DETAIL) + +REPORT_UDIFF = register_optionflag('REPORT_UDIFF') +REPORT_CDIFF = register_optionflag('REPORT_CDIFF') +REPORT_NDIFF = register_optionflag('REPORT_NDIFF') +REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') + +REPORTING_FLAGS = (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF | + REPORT_ONLY_FIRST_FAILURE) + +# Special string markers for use in `want` strings: +BLANKLINE_MARKER = '<BLANKLINE>' +ELLIPSIS_MARKER = '...' + +###################################################################### +## Table of Contents +###################################################################### +# 1. Utility Functions +# 2. Example & DocTest -- store test cases +# 3. DocTest Parser -- extracts examples from strings +# 4. DocTest Finder -- extracts test cases from objects +# 5. DocTest Runner -- runs test cases +# 6. Test Functions -- convenient wrappers for testing +# 7. Tester Class -- for backwards compatibility +# 8. Unittest Support +# 9. Debugging Support +# 10. Example Usage + +###################################################################### +## 1. Utility Functions +###################################################################### + +def is_private(prefix, base): + """prefix, base -> true iff name prefix + "." + base is "private". + + Prefix may be an empty string, and base does not contain a period. + Prefix is ignored (although functions you write conforming to this + protocol may make use of it). + Return true iff base begins with an (at least one) underscore, but + does not both begin and end with (at least) two underscores. + """ + warnings.warn("is_private is deprecated; it wasn't useful; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning, stacklevel=2) + return base[:1] == "_" and not base[:2] == "__" == base[-2:] + +def _extract_future_flags(globs): + """ + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + """ + flags = 0 + for fname in __future__.all_feature_names: + feature = globs.get(fname, None) + if feature is getattr(__future__, fname): + flags |= feature.compiler_flag + return flags + +def _normalize_module(module, depth=2): + """ + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + """ + if inspect.ismodule(module): + return module + elif isinstance(module, (str, unicode)): + return __import__(module, globals(), locals(), ["*"]) + elif module is None: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + else: + raise TypeError("Expected a module, string, or None") + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning every + non-blank line in `s`, and return the result. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + +def _exception_traceback(exc_info): + """ + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + """ + # Get a traceback message. + excout = StringIO() + exc_type, exc_val, exc_tb = exc_info + traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) + return excout.getvalue() + +# Override some StringIO methods. +class _SpoofOut(StringIO): + def getvalue(self): + result = StringIO.getvalue(self) + # If anything at all was written, make sure there's a trailing + # newline. There's no way for the expected output to indicate + # that a trailing newline is missing. + if result and not result.endswith("\n"): + result += "\n" + # Prevent softspace from screwing up the next test case, in + # case they used print with a trailing comma in an example. + if hasattr(self, "softspace"): + del self.softspace + return result + + def truncate(self, size=None): + StringIO.truncate(self, size) + if hasattr(self, "softspace"): + del self.softspace + +# Worst-case linear-time ellipsis matching. +def _ellipsis_match(want, got): + if ELLIPSIS_MARKER not in want: + return want == got + + # Find "the real" strings. + ws = want.split(ELLIPSIS_MARKER) + assert len(ws) >= 2 + + # Deal with exact matches possibly needed at one or both ends. + startpos, endpos = 0, len(got) + w = ws[0] + if w: # starts with exact match + if got.startswith(w): + startpos = len(w) + del ws[0] + else: + return False + w = ws[-1] + if w: # ends with exact match + if got.endswith(w): + endpos -= len(w) + del ws[-1] + else: + return False + + if startpos > endpos: + # Exact end matches required more characters than we have, as in + # _ellipsis_match('aa...aa', 'aaa') + return False + + # For the rest, we only need to find the leftmost non-overlapping + # match for each piece. If there's no overall match that way alone, + # there's no overall match period. + for w in ws: + # w may be '' at times, if there are consecutive ellipses, or + # due to an ellipsis at the start or end of `want`. That's OK. + # Search for an empty string succeeds, and doesn't change startpos. + startpos = got.find(w, startpos, endpos) + if startpos < 0: + return False + startpos += len(w) + + return True + +def _comment_line(line): + "Return a commented form of the given line" + line = line.rstrip() + if line: + return '# '+line + else: + return '#' + +class _OutputRedirectingPdb(pdb.Pdb): + """ + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + """ + def __init__(self, out): + self.__out = out + pdb.Pdb.__init__(self) + + def trace_dispatch(self, *args): + # Redirect stdout to the given stream. + save_stdout = sys.stdout + sys.stdout = self.__out + # Call Pdb's trace dispatch method. + try: + return pdb.Pdb.trace_dispatch(self, *args) + finally: + sys.stdout = save_stdout + +# [XX] Normalize with respect to os.path.pardir? +def _module_relative_path(module, path): + if not inspect.ismodule(module): + raise TypeError, 'Expected a module: %r' % module + if path.startswith('/'): + raise ValueError, 'Module-relative files may not have absolute paths' + + # Find the base directory for the path. + if hasattr(module, '__file__'): + # A normal module/package + basedir = os.path.split(module.__file__)[0] + elif module.__name__ == '__main__': + # An interactive session. + if len(sys.argv)>0 and sys.argv[0] != '': + basedir = os.path.split(sys.argv[0])[0] + else: + basedir = os.curdir + else: + # A module w/o __file__ (this includes builtins) + raise ValueError("Can't resolve paths relative to the module " + + module + " (it has no __file__)") + + # Combine the base directory and the path. + return os.path.join(basedir, *(path.split('/'))) + +###################################################################### +## 2. Example & DocTest +###################################################################### +## - An "example" is a <source, want> pair, where "source" is a +## fragment of source code, and "want" is the expected output for +## "source." The Example class also includes information about +## where the example was extracted from. +## +## - A "doctest" is a collection of examples, typically extracted from +## a string (such as an object's docstring). The DocTest class also +## includes information about where the string was extracted from. + +class Example: + """ + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that preceed the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + """ + def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, + options=None): + # Normalize inputs. + if not source.endswith('\n'): + source += '\n' + if want and not want.endswith('\n'): + want += '\n' + if exc_msg is not None and not exc_msg.endswith('\n'): + exc_msg += '\n' + # Store properties. + self.source = source + self.want = want + self.lineno = lineno + self.indent = indent + if options is None: options = {} + self.options = options + self.exc_msg = exc_msg + +class DocTest: + """ + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + """ + def __init__(self, examples, globs, name, filename, lineno, docstring): + """ + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + """ + assert not isinstance(examples, basestring), \ + "DocTest no longer accepts str; use DocTestParser instead" + self.examples = examples + self.docstring = docstring + self.globs = globs.copy() + self.name = name + self.filename = filename + self.lineno = lineno + + def __repr__(self): + if len(self.examples) == 0: + examples = 'no examples' + elif len(self.examples) == 1: + examples = '1 example' + else: + examples = '%d examples' % len(self.examples) + return ('<DocTest %s from %s:%s (%s)>' % + (self.name, self.filename, self.lineno, examples)) + + + # This lets us sort tests by name: + def __cmp__(self, other): + if not isinstance(other, DocTest): + return -1 + return cmp((self.name, self.filename, self.lineno, id(self)), + (other.name, other.filename, other.lineno, id(other))) + +###################################################################### +## 3. DocTestParser +###################################################################### + +class DocTestParser: + """ + A class used to parse strings containing doctest examples. + """ + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P<source> + (?:^(?P<indent> [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P<want> (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .*$\n? # But any other line + )*) + ''', re.MULTILINE | re.VERBOSE) + + # A regular expression for handling `want` strings that contain + # expected exceptions. It divides `want` into three pieces: + # - the traceback header line (`hdr`) + # - the traceback stack (`stack`) + # - the exception message (`msg`), as generated by + # traceback.format_exception_only() + # `msg` may have multiple lines. We assume/require that the + # exception message is the first non-indented line starting with a word + # character following the traceback header line. + _EXCEPTION_RE = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P<hdr> Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P<stack> .*?) # don't blink: absorb stuff until... + ^ (?P<msg> \w+ .*) # a line *starts* with alphanum. + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + + # A callable returning a true value iff its argument is a blank line + # or contains a single comment. + _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match + + def parse(self, string, name='<string>'): + """ + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + """ + string = string.expandtabs() + # If all lines begin with the same indentation, then strip it. + min_indent = self._min_indent(string) + if min_indent > 0: + string = '\n'.join([l[min_indent:] for l in string.split('\n')]) + + output = [] + charno, lineno = 0, 0 + # Find all doctest examples in the string: + for m in self._EXAMPLE_RE.finditer(string): + # Add the pre-example text to `output`. + output.append(string[charno:m.start()]) + # Update lineno (lines before this example) + lineno += string.count('\n', charno, m.start()) + # Extract info from the regexp match. + (source, options, want, exc_msg) = \ + self._parse_example(m, name, lineno) + # Create an Example, and add it to the list. + if not self._IS_BLANK_OR_COMMENT(source): + output.append( Example(source, want, exc_msg, + lineno=lineno, + indent=min_indent+len(m.group('indent')), + options=options) ) + # Update lineno (lines inside this example) + lineno += string.count('\n', m.start(), m.end()) + # Update charno. + charno = m.end() + # Add any remaining post-example text to `output`. + output.append(string[charno:]) + return output + + def get_doctest(self, string, globs, name, filename, lineno): + """ + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + """ + return DocTest(self.get_examples(string, name), globs, + name, filename, lineno, string) + + def get_examples(self, string, name='<string>'): + """ + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called \"line 1\" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + """ + return [x for x in self.parse(string, name) + if isinstance(x, Example)] + + def _parse_example(self, m, name, lineno): + """ + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + # Get the example's indentation level. + indent = len(m.group('indent')) + + # Divide source into lines; check that they're properly + # indented; and then strip their indentation & prompts. + source_lines = m.group('source').split('\n') + self._check_prompt_blank(source_lines, indent, name, lineno) + self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) + source = '\n'.join([sl[indent+4:] for sl in source_lines]) + + # Divide want into lines; check that it's properly indented; and + # then strip the indentation. Spaces before the last newline should + # be preserved, so plain rstrip() isn't good enough. + want = m.group('want') + want_lines = want.split('\n') + if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): + del want_lines[-1] # forget final newline & spaces after it + self._check_prefix(want_lines, ' '*indent, name, + lineno + len(source_lines)) + want = '\n'.join([wl[indent:] for wl in want_lines]) + + # If `want` contains a traceback message, then extract it. + m = self._EXCEPTION_RE.match(want) + if m: + exc_msg = m.group('msg') + else: + exc_msg = None + + # Extract options from the source. + options = self._find_options(source, name, lineno) + + return source, options, want, exc_msg + + # This regular expression looks for option directives in the + # source code of an example. Option directives are comments + # starting with "doctest:". Warning: this may give false + # positives for string-literals that contain the string + # "#doctest:". Eliminating these false positives would require + # actually parsing the string; but we limit them by ignoring any + # line containing "#doctest:" that is *followed* by a quote mark. + _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', + re.MULTILINE) + + def _find_options(self, source, name, lineno): + """ + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + options = {} + # (note: with the current regexp, this will match at most once:) + for m in self._OPTION_DIRECTIVE_RE.finditer(source): + option_strings = m.group(1).replace(',', ' ').split() + for option in option_strings: + if (option[0] not in '+-' or + option[1:] not in OPTIONFLAGS_BY_NAME): + raise ValueError('line %r of the doctest for %s ' + 'has an invalid option: %r' % + (lineno+1, name, option)) + flag = OPTIONFLAGS_BY_NAME[option[1:]] + options[flag] = (option[0] == '+') + if options and self._IS_BLANK_OR_COMMENT(source): + raise ValueError('line %r of the doctest for %s has an option ' + 'directive on a line with no example: %r' % + (lineno, name, source)) + return options + + # This regular expression finds the indentation of every non-blank + # line in a string. + _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) + + def _min_indent(self, s): + "Return the minimum indentation of any non-blank line in `s`" + indents = [len(indent) for indent in self._INDENT_RE.findall(s)] + if len(indents) > 0: + return min(indents) + else: + return 0 + + def _check_prompt_blank(self, lines, indent, name, lineno): + """ + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + """ + for i, line in enumerate(lines): + if len(line) >= indent+4 and line[indent+3] != ' ': + raise ValueError('line %r of the docstring for %s ' + 'lacks blank after %s: %r' % + (lineno+i+1, name, + line[indent:indent+3], line)) + + def _check_prefix(self, lines, prefix, name, lineno): + """ + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + """ + for i, line in enumerate(lines): + if line and not line.startswith(prefix): + raise ValueError('line %r of the docstring for %s has ' + 'inconsistent leading whitespace: %r' % + (lineno+i+1, name, line)) + + +###################################################################### +## 4. DocTest Finder +###################################################################### + +class DocTestFinder: + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + """ + + def __init__(self, verbose=False, parser=DocTestParser(), + recurse=True, _namefilter=None, exclude_empty=True): + """ + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + """ + self._parser = parser + self._verbose = verbose + self._recurse = recurse + self._exclude_empty = exclude_empty + # _namefilter is undocumented, and exists only for temporary backward- + # compatibility support of testmod's deprecated isprivate mess. + self._namefilter = _namefilter + + def find(self, obj, name=None, module=None, globs=None, + extraglobs=None): + """ + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + """ + # If name was not specified, then extract it from the object. + if name is None: + name = getattr(obj, '__name__', None) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) + + # Find the module that contains the given object (if obj is + # a module, then module=obj.). Note: this may fail, in which + # case module will be None. + if module is False: + module = None + elif module is None: + module = inspect.getmodule(obj) + + # Read the module's source code. This is used by + # DocTestFinder._find_lineno to find the line number for a + # given object's docstring. + try: + file = inspect.getsourcefile(obj) or inspect.getfile(obj) + source_lines = linecache.getlines(file) + if not source_lines: + source_lines = None + except TypeError: + source_lines = None + + # Initialize globals, and merge in extraglobs. + if globs is None: + if module is None: + globs = {} + else: + globs = module.__dict__.copy() + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + # Recursively expore `obj`, extracting DocTests. + tests = [] + self._find(tests, obj, name, module, source_lines, globs, {}) + # Sort the tests by alpha order of names, for consistency in + # verbose-mode output. This was a feature of doctest in Pythons + # <= 2.3 that got lost by accident in 2.4. It was repaired in + # 2.4.4 and 2.5. + tests.sort() + return tests + + def _filter(self, obj, prefix, base): + """ + Return true if the given object should not be examined. + """ + return (self._namefilter is not None and + self._namefilter(prefix, base)) + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.func_globals + elif inspect.isclass(object): + # Some jython classes don't set __module__ + return module.__name__ == getattr(object, '__module__', None) + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + if self._verbose: + print 'Finding tests in %s' % name + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + valname = '%s.%s' % (name, valname) + # Recurse to functions & classes. + if ((inspect.isfunction(val) or inspect.isclass(val)) and + self._from_module(module, val)): + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a module's __test__ dictionary. + if inspect.ismodule(obj) and self._recurse: + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, basestring): + raise ValueError("DocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isfunction(val) or inspect.isclass(val) or + inspect.ismethod(val) or inspect.ismodule(val) or + isinstance(val, basestring)): + raise ValueError("DocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if inspect.isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).im_func + + # Recurse to methods, properties, and nested classes. + if ((inspect.isfunction(val) or inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + # Extract the object's docstring. If it doesn't have one, + # then return None (no test for this object). + if isinstance(obj, basestring): + docstring = obj + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, basestring): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Find the docstring's location in the file. + lineno = self._find_lineno(obj, source_lines) + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + filename = getattr(module, '__file__', module.__name__) + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + elif sys.platform.startswith('java') and \ + filename.endswith('$py.class'): + filename = '%s.py' % filename[:-9] + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + def _find_lineno(self, obj, source_lines): + """ + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + """ + lineno = None + + # Find the line number for modules. + if inspect.ismodule(obj): + lineno = 0 + + # Find the line number for classes. + # Note: this could be fooled if a class is defined multiple + # times in a single file. + if inspect.isclass(obj): + if source_lines is None: + return None + pat = re.compile(r'^\s*class\s*%s\b' % + getattr(obj, '__name__', '-')) + for i, line in enumerate(source_lines): + if pat.match(line): + lineno = i + break + + # Find the line number for functions & methods. + if inspect.ismethod(obj): obj = obj.im_func + if inspect.isfunction(obj): obj = obj.func_code + if inspect.istraceback(obj): obj = obj.tb_frame + if inspect.isframe(obj): obj = obj.f_code + if inspect.iscode(obj): + lineno = getattr(obj, 'co_firstlineno', None)-1 + + # Find the line number where the docstring starts. Assume + # that it's the first line that begins with a quote mark. + # Note: this could be fooled by a multiline function + # signature, where a continuation line begins with a quote + # mark. + if lineno is not None: + if source_lines is None: + return lineno+1 + pat = re.compile('(^|.*:)\s*\w*("|\')') + for lineno in range(lineno, len(source_lines)): + if pat.match(source_lines[lineno]): + return lineno + + # We couldn't find the line number. + return None + +###################################################################### +## 5. DocTest Runner +###################################################################### + +class DocTestRunner: + # This divider string is used to separate failure messages, and to + # separate sections of the summary. + DIVIDER = "*" * 70 + + def __init__(self, checker=None, verbose=None, optionflags=0): + """ + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + """ + self._checker = checker or OutputChecker() + if verbose is None: + verbose = '-v' in sys.argv + self._verbose = verbose + self.optionflags = optionflags + self.original_optionflags = optionflags + + # Keep track of the examples we've run. + self.tries = 0 + self.failures = 0 + self._name2ft = {} + + # Create a fake output target for capturing doctest output. + self._fakeout = _SpoofOut() + + #///////////////////////////////////////////////////////////////// + # Reporting methods + #///////////////////////////////////////////////////////////////// + + def report_start(self, out, test, example): + """ + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + """ + if self._verbose: + if example.want: + out('Trying:\n' + _indent(example.source) + + 'Expecting:\n' + _indent(example.want)) + else: + out('Trying:\n' + _indent(example.source) + + 'Expecting nothing\n') + + def report_success(self, out, test, example, got): + """ + Report that the given example ran successfully. (Only + displays a message if verbose=True) + """ + if self._verbose: + out("ok\n") + + def report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + out(self._failure_header(test, example) + + self._checker.output_difference(example, got, self.optionflags)) + + def report_unexpected_exception(self, out, test, example, exc_info): + """ + Report that the given example raised an unexpected exception. + """ + out(self._failure_header(test, example) + + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) + + def _failure_header(self, test, example): + out = [self.DIVIDER] + if test.filename: + if test.lineno is not None and example.lineno is not None: + lineno = test.lineno + example.lineno + 1 + else: + lineno = '?' + out.append('File "%s", line %s, in %s' % + (test.filename, lineno, test.name)) + else: + out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append('Failed example:') + source = example.source + out.append(_indent(source)) + return '\n'.join(out) + + #///////////////////////////////////////////////////////////////// + # DocTest Running + #///////////////////////////////////////////////////////////////// + + def __run(self, test, compileflags, out): + """ + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + """ + # Keep track of the number of failures and tries. + failures = tries = 0 + + # Save the option flags (since option directives can be used + # to modify them). + original_optionflags = self.optionflags + + SUCCESS, FAILURE, BOOM = range(3) # `outcome` state + + check = self._checker.check_output + + # Process each example. + for examplenum, example in enumerate(test.examples): + + # If REPORT_ONLY_FIRST_FAILURE is set, then supress + # reporting after the first failure. + quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and + failures > 0) + + # Merge in the example's options. + self.optionflags = original_optionflags + if example.options: + for (optionflag, val) in example.options.items(): + if val: + self.optionflags |= optionflag + else: + self.optionflags &= ~optionflag + + # Record that we started this example. + tries += 1 + if not quiet: + self.report_start(out, test, example) + + # Use a special filename for compile(), so we can retrieve + # the source code during interactive debugging (see + # __patched_linecache_getlines). + filename = '<doctest %s[%d]>' % (test.name, examplenum) + + # Run the example in the given context (globs), and record + # any exception that gets raised. (But don't intercept + # keyboard interrupts.) + try: + # Don't blink! This is where the user's code gets run. + exec compile(example.source, filename, "single", + compileflags, 1) in test.globs + self.debugger.set_continue() # ==== Example Finished ==== + exception = None + except KeyboardInterrupt: + raise + except: + exception = sys.exc_info() + self.debugger.set_continue() # ==== Example Finished ==== + + got = self._fakeout.getvalue() # the actual output + self._fakeout.truncate(0) + outcome = FAILURE # guilty until proved innocent or insane + + # If the example executed without raising any exceptions, + # verify its output. + if exception is None: + if check(example.want, got, self.optionflags): + outcome = SUCCESS + + # The example raised an exception: check if it was expected. + else: + exc_info = sys.exc_info() + exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] + if not quiet: + got += _exception_traceback(exc_info) + + # If `example.exc_msg` is None, then we weren't expecting + # an exception. + if example.exc_msg is None: + outcome = BOOM + + # We expected an exception: see whether it matches. + elif check(example.exc_msg, exc_msg, self.optionflags): + outcome = SUCCESS + + # Another chance if they didn't care about the detail. + elif self.optionflags & IGNORE_EXCEPTION_DETAIL: + m1 = re.match(r'[^:]*:', example.exc_msg) + m2 = re.match(r'[^:]*:', exc_msg) + if m1 and m2 and check(m1.group(0), m2.group(0), + self.optionflags): + outcome = SUCCESS + + # Report the outcome. + if outcome is SUCCESS: + if not quiet: + self.report_success(out, test, example, got) + elif outcome is FAILURE: + if not quiet: + self.report_failure(out, test, example, got) + failures += 1 + elif outcome is BOOM: + if not quiet: + self.report_unexpected_exception(out, test, example, + exc_info) + failures += 1 + else: + assert False, ("unknown outcome", outcome) + + # Restore the option flags (in case they were modified) + self.optionflags = original_optionflags + + # Record and return the number of failures and tries. + self.__record_outcome(test, failures, tries) + return failures, tries + + def __record_outcome(self, test, f, t): + """ + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + """ + f2, t2 = self._name2ft.get(test.name, (0,0)) + self._name2ft[test.name] = (f+f2, t+t2) + self.failures += f + self.tries += t + + __LINECACHE_FILENAME_RE = re.compile(r'<doctest ' + r'(?P<name>[\w\.]+)' + r'\[(?P<examplenum>\d+)\]>$') + def __patched_linecache_getlines(self, filename): + m = self.__LINECACHE_FILENAME_RE.match(filename) + if m and m.group('name') == self.test.name: + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(True) + else: + return self.save_linecache_getlines(filename) + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + """ + self.test = test + + if compileflags is None: + compileflags = _extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + out = save_stdout.write + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_set_trace = pdb.set_trace + self.debugger = _OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + linecache.getlines = self.save_linecache_getlines + if clear_globs: + test.globs.clear() + + #///////////////////////////////////////////////////////////////// + # Summarization + #///////////////////////////////////////////////////////////////// + def summarize(self, verbose=None): + """ + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + """ + if verbose is None: + verbose = self._verbose + notests = [] + passed = [] + failed = [] + totalt = totalf = 0 + for x in self._name2ft.items(): + name, (f, t) = x + assert f <= t + totalt += t + totalf += f + if t == 0: + notests.append(name) + elif f == 0: + passed.append( (name, t) ) + else: + failed.append(x) + if verbose: + if notests: + print len(notests), "items had no tests:" + notests.sort() + for thing in notests: + print " ", thing + if passed: + print len(passed), "items passed all tests:" + passed.sort() + for thing, count in passed: + print " %3d tests in %s" % (count, thing) + if failed: + print self.DIVIDER + print len(failed), "items had failures:" + failed.sort() + for thing, (f, t) in failed: + print " %3d of %3d in %s" % (f, t, thing) + if verbose: + print totalt, "tests in", len(self._name2ft), "items." + print totalt - totalf, "passed and", totalf, "failed." + if totalf: + print "***Test Failed***", totalf, "failures." + elif verbose: + print "Test passed." + return totalf, totalt + + #///////////////////////////////////////////////////////////////// + # Backward compatibility cruft to maintain doctest.master. + #///////////////////////////////////////////////////////////////// + def merge(self, other): + d = self._name2ft + for name, (f, t) in other._name2ft.items(): + if name in d: + print "*** DocTestRunner.merge: '" + name + "' in both" \ + " testers; summing outcomes." + f2, t2 = d[name] + f = f + f2 + t = t + t2 + d[name] = f, t + +class OutputChecker: + """ + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + """ + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # The values True and False replaced 1 and 0 as the return + # value for boolean comparisons in Python 2.3. + if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): + if (got,want) == ("True\n", "1\n"): + return True + if (got,want) == ("False\n", "0\n"): + return True + + # <BLANKLINE> can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + # Replace <BLANKLINE> in want with a blank line. + want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub('(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & ELLIPSIS: + if _ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + # Should we do a fancy diff? + def _do_a_fancy_diff(self, want, got, optionflags): + # Not unless they asked for a fancy diff. + if not optionflags & (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF): + return False + + # If expected output uses ellipsis, a meaningful fancy diff is + # too hard ... or maybe not. In two real-life failures Tim saw, + # a diff was a major help anyway, so this is commented out. + # [todo] _ellipsis_match() knows which pieces do and don't match, + # and could be the basis for a kick-ass diff in this case. + ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: + ## return False + + # ndiff does intraline difference marking, so can be useful even + # for 1-line differences. + if optionflags & REPORT_NDIFF: + return True + + # The other diff types need at least a few lines to be helpful. + return want.count('\n') > 2 and got.count('\n') > 2 + + def output_difference(self, example, got, optionflags): + """ + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + """ + want = example.want + # If <BLANKLINE>s are being used, then replace blank lines + # with <BLANKLINE> in the actual output string. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) + + # Check if we should use diff. + if self._do_a_fancy_diff(want, got, optionflags): + # Split want & got into lines. + want_lines = want.splitlines(True) # True == keep line ends + got_lines = got.splitlines(True) + # Use difflib to find their differences. + if optionflags & REPORT_UDIFF: + diff = difflib.unified_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'unified diff with -expected +actual' + elif optionflags & REPORT_CDIFF: + diff = difflib.context_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'context diff with expected followed by actual' + elif optionflags & REPORT_NDIFF: + engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) + diff = list(engine.compare(want_lines, got_lines)) + kind = 'ndiff with -expected +actual' + else: + assert 0, 'Bad diff option' + # Remove trailing whitespace on diff output. + diff = [line.rstrip() + '\n' for line in diff] + return 'Differences (%s):\n' % kind + _indent(''.join(diff)) + + # If we're not using diff, then simply list the expected + # output followed by the actual output. + if want and got: + return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) + elif want: + return 'Expected:\n%sGot nothing\n' % _indent(want) + elif got: + return 'Expected nothing\nGot:\n%s' % _indent(got) + else: + return 'Expected nothing\nGot nothing\n' + +class DocTestFailure(Exception): + """A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - got: the actual output + """ + def __init__(self, test, example, got): + self.test = test + self.example = example + self.got = got + + def __str__(self): + return str(self.test) + +class UnexpectedException(Exception): + """A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - exc_info: the exception info + """ + def __init__(self, test, example, exc_info): + self.test = test + self.example = example + self.exc_info = exc_info + + def __str__(self): + return str(self.test) + +class DebugRunner(DocTestRunner): + + def run(self, test, compileflags=None, out=None, clear_globs=True): + r = DocTestRunner.run(self, test, compileflags, out, False) + if clear_globs: + test.globs.clear() + return r + + def report_unexpected_exception(self, out, test, example, exc_info): + raise UnexpectedException(test, example, exc_info) + + def report_failure(self, out, test, example, got): + raise DocTestFailure(test, example, got) + +###################################################################### +## 6. Test Functions +###################################################################### +# These should be backwards compatible. + +# For backward compatibility, a global instance of a DocTestRunner +# class, updated by testmod. +master = None + +def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, + raise_on_error=False, exclude_empty=False): + """m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. Unless isprivate is specified, private names + are not skipped. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See doctest.__doc__ for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Deprecated in Python 2.4: + Optional keyword arg "isprivate" specifies a function used to + determine whether a name is private. The default function is + treat all functions as public. Optionally, "isprivate" can be + set to doctest.is_private to skip over functions marked as private + using the underscore naming convention; see its docs for details. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if isprivate is not None: + warnings.warn("the isprivate argument is deprecated; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning) + + # If no module was given, then use __main__. + if m is None: + # DWA - m will still be None if this wasn't invoked from the command + # line, in which case the following TypeError is about as good an error + # as we should expect + m = sys.modules.get('__main__') + + # Check that we were actually given a module. + if not inspect.ismodule(m): + raise TypeError("testmod: module required; %r" % (m,)) + + # If no name was given, then use the module's name. + if name is None: + name = m.__name__ + + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def testfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, parser=DocTestParser()): + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + if module_relative: + package = _normalize_module(package) + filename = _module_relative_path(package, filename) + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + # Read the file, convert it to a test, and run it. + s = open(filename).read() + test = parser.get_doctest(s, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def run_docstring_examples(f, globs, verbose=False, name="NoName", + compileflags=None, optionflags=0): + """ + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + """ + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(verbose=verbose, recurse=False) + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + for test in finder.find(f, name, globs=globs): + runner.run(test, compileflags=compileflags) + +###################################################################### +## 7. Tester +###################################################################### +# This is provided only for backwards compatibility. It's not +# actually used in any way. + +class Tester: + def __init__(self, mod=None, globs=None, verbose=None, + isprivate=None, optionflags=0): + + warnings.warn("class Tester is deprecated; " + "use class doctest.DocTestRunner instead", + DeprecationWarning, stacklevel=2) + if mod is None and globs is None: + raise TypeError("Tester.__init__: must specify mod or globs") + if mod is not None and not inspect.ismodule(mod): + raise TypeError("Tester.__init__: mod must be a module; %r" % + (mod,)) + if globs is None: + globs = mod.__dict__ + self.globs = globs + + self.verbose = verbose + self.isprivate = isprivate + self.optionflags = optionflags + self.testfinder = DocTestFinder(_namefilter=isprivate) + self.testrunner = DocTestRunner(verbose=verbose, + optionflags=optionflags) + + def runstring(self, s, name): + test = DocTestParser().get_doctest(s, self.globs, name, None, None) + if self.verbose: + print "Running string", name + (f,t) = self.testrunner.run(test) + if self.verbose: + print f, "of", t, "examples failed in string", name + return (f,t) + + def rundoc(self, object, name=None, module=None): + f = t = 0 + tests = self.testfinder.find(object, name, module=module, + globs=self.globs) + for test in tests: + (f2, t2) = self.testrunner.run(test) + (f,t) = (f+f2, t+t2) + return (f,t) + + def rundict(self, d, name, module=None): + import new + m = new.module(name) + m.__dict__.update(d) + if module is None: + module = False + return self.rundoc(m, name, module) + + def run__test__(self, d, name): + import new + m = new.module(name) + m.__test__ = d + return self.rundoc(m, name) + + def summarize(self, verbose=None): + return self.testrunner.summarize(verbose) + + def merge(self, other): + self.testrunner.merge(other.testrunner) + +###################################################################### +## 8. Unittest Support +###################################################################### + +_unittest_reportflags = 0 + +def set_unittest_reportflags(flags): + global _unittest_reportflags + + if (flags & REPORTING_FLAGS) != flags: + raise ValueError("Only reporting flags allowed", flags) + old = _unittest_reportflags + _unittest_reportflags = flags + return old + + +class DocTestCase(unittest.TestCase): + + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None): + + unittest.TestCase.__init__(self) + self._dt_optionflags = optionflags + self._dt_checker = checker + self._dt_test = test + self._dt_setUp = setUp + self._dt_tearDown = tearDown + + def setUp(self): + test = self._dt_test + + if self._dt_setUp is not None: + self._dt_setUp(test) + + def tearDown(self): + test = self._dt_test + + if self._dt_tearDown is not None: + self._dt_tearDown(test) + + test.globs.clear() + + def runTest(self): + test = self._dt_test + old = sys.stdout + new = StringIO() + optionflags = self._dt_optionflags + + if not (optionflags & REPORTING_FLAGS): + # The option flags don't include any reporting flags, + # so add the default reporting flags + optionflags |= _unittest_reportflags + + runner = DocTestRunner(optionflags=optionflags, + checker=self._dt_checker, verbose=False) + + try: + runner.DIVIDER = "-"*70 + failures, tries = runner.run( + test, out=new.write, clear_globs=False) + finally: + sys.stdout = old + + if failures: + raise self.failureException(self.format_failure(new.getvalue())) + + def format_failure(self, err): + test = self._dt_test + if test.lineno is None: + lineno = 'unknown line number' + else: + lineno = '%s' % test.lineno + lname = '.'.join(test.name.split('.')[-1:]) + return ('Failed doctest test for %s\n' + ' File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def debug(self): + self.setUp() + runner = DebugRunner(optionflags=self._dt_optionflags, + checker=self._dt_checker, verbose=False) + runner.run(self._dt_test) + self.tearDown() + + def id(self): + return self._dt_test.name + + def __repr__(self): + name = self._dt_test.name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + + __str__ = __repr__ + + def shortDescription(self): + return "Doctest: " + self._dt_test.name + +def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, + **options): + """ + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + """ + + if test_finder is None: + test_finder = DocTestFinder() + + module = _normalize_module(module) + tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) + if globs is None: + globs = module.__dict__ + if not tests: + # Why do we want to do this? Because it reveals a bug that might + # otherwise be hidden. + raise ValueError(module, "has no tests") + + tests.sort() + suite = unittest.TestSuite() + for test in tests: + if len(test.examples) == 0: + continue + if not test.filename: + filename = module.__file__ + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + elif sys.platform.startswith('java') and \ + filename.endswith('$py.class'): + filename = '%s.py' % filename[:-9] + test.filename = filename + suite.addTest(DocTestCase(test, **options)) + + return suite + +class DocFileCase(DocTestCase): + + def id(self): + return '_'.join(self._dt_test.name.split('.')) + + def __repr__(self): + return self._dt_test.filename + __str__ = __repr__ + + def format_failure(self, err): + return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' + % (self._dt_test.name, self._dt_test.filename, err) + ) + +def DocFileTest(path, module_relative=True, package=None, + globs=None, parser=DocTestParser(), **options): + if globs is None: + globs = {} + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path. + if module_relative: + package = _normalize_module(package) + path = _module_relative_path(package, path) + + # Find the file and read it. + name = os.path.basename(path) + doc = open(path).read() + + # Convert it to a test, and wrap it in a DocFileCase. + test = parser.get_doctest(doc, globs, name, path, 0) + return DocFileCase(test, **options) + +def DocFileSuite(*paths, **kw): + """A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + """ + suite = unittest.TestSuite() + + # We do this here so that _normalize_module is called at the right + # level. If it were called in DocFileTest, then this function + # would be the caller and we might guess the package incorrectly. + if kw.get('module_relative', True): + kw['package'] = _normalize_module(kw.get('package')) + + for path in paths: + suite.addTest(DocFileTest(path, **kw)) + + return suite + +###################################################################### +## 9. Debugging Support +###################################################################### + +def script_from_examples(s): + output = [] + for piece in DocTestParser().parse(s): + if isinstance(piece, Example): + # Add the example's source code (strip trailing NL) + output.append(piece.source[:-1]) + # Add the expected output: + want = piece.want + if want: + output.append('# Expected:') + output += ['## '+l for l in want.split('\n')[:-1]] + else: + # Add non-example text. + output += [_comment_line(l) + for l in piece.split('\n')[:-1]] + + # Trim junk on both ends. + while output and output[-1] == '#': + output.pop() + while output and output[0] == '#': + output.pop(0) + # Combine the output, and return it. + # Add a courtesy newline to prevent exec from choking (see bug #1172785) + return '\n'.join(output) + '\n' + +def testsource(module, name): + """Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + """ + module = _normalize_module(module) + tests = DocTestFinder().find(module) + test = [t for t in tests if t.name == name] + if not test: + raise ValueError(name, "not found in tests") + test = test[0] + testsrc = script_from_examples(test.docstring) + return testsrc + +def debug_src(src, pm=False, globs=None): + """Debug a single doctest docstring, in argument `src`'""" + testsrc = script_from_examples(src) + debug_script(testsrc, pm, globs) + +def debug_script(src, pm=False, globs=None): + "Debug a test script. `src` is the script, as a string." + import pdb + + # Note that tempfile.NameTemporaryFile() cannot be used. As the + # docs say, a file so created cannot be opened by name a second time + # on modern Windows boxes, and execfile() needs to open it. + srcfilename = tempfile.mktemp(".py", "doctestdebug") + f = open(srcfilename, 'w') + f.write(src) + f.close() + + try: + if globs: + globs = globs.copy() + else: + globs = {} + + if pm: + try: + execfile(srcfilename, globs, globs) + except: + print sys.exc_info()[1] + pdb.post_mortem(sys.exc_info()[2]) + else: + # Note that %r is vital here. '%s' instead can, e.g., cause + # backslashes to get treated as metacharacters on Windows. + pdb.run("execfile(%r)" % srcfilename, globs, globs) + + finally: + os.remove(srcfilename) + +def debug(module, name, pm=False): + """Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + """ + module = _normalize_module(module) + testsrc = testsource(module, name) + debug_script(testsrc, pm, module.__dict__) + + +__test__ = {} diff --git a/scripts/external_libs/nose-1.3.4/nose/failure.py b/scripts/external_libs/nose-1.3.4/nose/failure.py new file mode 100755 index 00000000..c5fabfda --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/failure.py @@ -0,0 +1,42 @@ +import logging +import unittest +from traceback import format_tb +from nose.pyversion import is_base_exception + +log = logging.getLogger(__name__) + + +__all__ = ['Failure'] + + +class Failure(unittest.TestCase): + """Unloadable or unexecutable test. + + A Failure case is placed in a test suite to indicate the presence of a + test that could not be loaded or executed. A common example is a test + module that fails to import. + + """ + __test__ = False # do not collect + def __init__(self, exc_class, exc_val, tb=None, address=None): + log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb)) + self.exc_class = exc_class + self.exc_val = exc_val + self.tb = tb + self._address = address + unittest.TestCase.__init__(self) + + def __str__(self): + return "Failure: %s (%s)" % ( + getattr(self.exc_class, '__name__', self.exc_class), self.exc_val) + + def address(self): + return self._address + + def runTest(self): + if self.tb is not None: + if is_base_exception(self.exc_val): + raise self.exc_val, None, self.tb + raise self.exc_class, self.exc_val, self.tb + else: + raise self.exc_class(self.exc_val) diff --git a/scripts/external_libs/nose-1.3.4/nose/importer.py b/scripts/external_libs/nose-1.3.4/nose/importer.py new file mode 100755 index 00000000..e677658c --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/importer.py @@ -0,0 +1,167 @@ +"""Implements an importer that looks only in specific path (ignoring +sys.path), and uses a per-path cache in addition to sys.modules. This is +necessary because test modules in different directories frequently have the +same names, which means that the first loaded would mask the rest when using +the builtin importer. +""" +import logging +import os +import sys +from nose.config import Config + +from imp import find_module, load_module, acquire_lock, release_lock + +log = logging.getLogger(__name__) + +try: + _samefile = os.path.samefile +except AttributeError: + def _samefile(src, dst): + return (os.path.normcase(os.path.realpath(src)) == + os.path.normcase(os.path.realpath(dst))) + + +class Importer(object): + """An importer class that does only path-specific imports. That + is, the given module is not searched for on sys.path, but only at + the path or in the directory specified. + """ + def __init__(self, config=None): + if config is None: + config = Config() + self.config = config + + def importFromPath(self, path, fqname): + """Import a dotted-name package whose tail is at path. In other words, + given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then + bar from path/to/foo/bar, returning bar. + """ + # find the base dir of the package + path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep) + name_parts = fqname.split('.') + if path_parts[-1] == '__init__.py': + path_parts.pop() + path_parts = path_parts[:-(len(name_parts))] + dir_path = os.sep.join(path_parts) + # then import fqname starting from that dir + return self.importFromDir(dir_path, fqname) + + def importFromDir(self, dir, fqname): + """Import a module *only* from path, ignoring sys.path and + reloading if the version in sys.modules is not the one we want. + """ + dir = os.path.normpath(os.path.abspath(dir)) + log.debug("Import %s from %s", fqname, dir) + + # FIXME reimplement local per-dir cache? + + # special case for __main__ + if fqname == '__main__': + return sys.modules[fqname] + + if self.config.addPaths: + add_path(dir, self.config) + + path = [dir] + parts = fqname.split('.') + part_fqname = '' + mod = parent = fh = None + + for part in parts: + if part_fqname == '': + part_fqname = part + else: + part_fqname = "%s.%s" % (part_fqname, part) + try: + acquire_lock() + log.debug("find module part %s (%s) in %s", + part, part_fqname, path) + fh, filename, desc = find_module(part, path) + old = sys.modules.get(part_fqname) + if old is not None: + # test modules frequently have name overlap; make sure + # we get a fresh copy of anything we are trying to load + # from a new path + log.debug("sys.modules has %s as %s", part_fqname, old) + if (self.sameModule(old, filename) + or (self.config.firstPackageWins and + getattr(old, '__path__', None))): + mod = old + else: + del sys.modules[part_fqname] + mod = load_module(part_fqname, fh, filename, desc) + else: + mod = load_module(part_fqname, fh, filename, desc) + finally: + if fh: + fh.close() + release_lock() + if parent: + setattr(parent, part, mod) + if hasattr(mod, '__path__'): + path = mod.__path__ + parent = mod + return mod + + def _dirname_if_file(self, filename): + # We only take the dirname if we have a path to a non-dir, + # because taking the dirname of a symlink to a directory does not + # give the actual directory parent. + if os.path.isdir(filename): + return filename + else: + return os.path.dirname(filename) + + def sameModule(self, mod, filename): + mod_paths = [] + if hasattr(mod, '__path__'): + for path in mod.__path__: + mod_paths.append(self._dirname_if_file(path)) + elif hasattr(mod, '__file__'): + mod_paths.append(self._dirname_if_file(mod.__file__)) + else: + # builtin or other module-like object that + # doesn't have __file__; must be new + return False + new_path = self._dirname_if_file(filename) + for mod_path in mod_paths: + log.debug( + "module already loaded? mod: %s new: %s", + mod_path, new_path) + if _samefile(mod_path, new_path): + return True + return False + + +def add_path(path, config=None): + """Ensure that the path, or the root of the current package (if + path is in a package), is in sys.path. + """ + + # FIXME add any src-looking dirs seen too... need to get config for that + + log.debug('Add path %s' % path) + if not path: + return [] + added = [] + parent = os.path.dirname(path) + if (parent + and os.path.exists(os.path.join(path, '__init__.py'))): + added.extend(add_path(parent, config)) + elif not path in sys.path: + log.debug("insert %s into sys.path", path) + sys.path.insert(0, path) + added.append(path) + if config and config.srcDirs: + for dirname in config.srcDirs: + dirpath = os.path.join(path, dirname) + if os.path.isdir(dirpath): + sys.path.insert(0, dirpath) + added.append(dirpath) + return added + + +def remove_path(path): + log.debug('Remove path %s' % path) + if path in sys.path: + sys.path.remove(path) diff --git a/scripts/external_libs/nose-1.3.4/nose/inspector.py b/scripts/external_libs/nose-1.3.4/nose/inspector.py new file mode 100755 index 00000000..a6c4a3e3 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/inspector.py @@ -0,0 +1,207 @@ +"""Simple traceback introspection. Used to add additional information to +AssertionErrors in tests, so that failure messages may be more informative. +""" +import inspect +import logging +import re +import sys +import textwrap +import tokenize + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +log = logging.getLogger(__name__) + +def inspect_traceback(tb): + """Inspect a traceback and its frame, returning source for the expression + where the exception was raised, with simple variable replacement performed + and the line on which the exception was raised marked with '>>' + """ + log.debug('inspect traceback %s', tb) + + # we only want the innermost frame, where the exception was raised + while tb.tb_next: + tb = tb.tb_next + + frame = tb.tb_frame + lines, exc_line = tbsource(tb) + + # figure out the set of lines to grab. + inspect_lines, mark_line = find_inspectable_lines(lines, exc_line) + src = StringIO(textwrap.dedent(''.join(inspect_lines))) + exp = Expander(frame.f_locals, frame.f_globals) + + while inspect_lines: + try: + for tok in tokenize.generate_tokens(src.readline): + exp(*tok) + except tokenize.TokenError, e: + # this can happen if our inspectable region happens to butt up + # against the end of a construct like a docstring with the closing + # """ on separate line + log.debug("Tokenizer error: %s", e) + inspect_lines.pop(0) + mark_line -= 1 + src = StringIO(textwrap.dedent(''.join(inspect_lines))) + exp = Expander(frame.f_locals, frame.f_globals) + continue + break + padded = [] + if exp.expanded_source: + exp_lines = exp.expanded_source.split('\n') + ep = 0 + for line in exp_lines: + if ep == mark_line: + padded.append('>> ' + line) + else: + padded.append(' ' + line) + ep += 1 + return '\n'.join(padded) + + +def tbsource(tb, context=6): + """Get source from a traceback object. + + A tuple of two things is returned: a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line. + + .. Note :: + This is adapted from inspect.py in the python 2.4 standard library, + since a bug in the 2.3 version of inspect prevents it from correctly + locating source lines in a traceback frame. + """ + + lineno = tb.tb_lineno + frame = tb.tb_frame + + if context > 0: + start = lineno - 1 - context//2 + log.debug("lineno: %s start: %s", lineno, start) + + try: + lines, dummy = inspect.findsource(frame) + except IOError: + lines, index = [''], 0 + else: + all_lines = lines + start = max(start, 1) + start = max(0, min(start, len(lines) - context)) + lines = lines[start:start+context] + index = lineno - 1 - start + + # python 2.5 compat: if previous line ends in a continuation, + # decrement start by 1 to match 2.4 behavior + if sys.version_info >= (2, 5) and index > 0: + while lines[index-1].strip().endswith('\\'): + start -= 1 + lines = all_lines[start:start+context] + else: + lines, index = [''], 0 + log.debug("tbsource lines '''%s''' around index %s", lines, index) + return (lines, index) + + +def find_inspectable_lines(lines, pos): + """Find lines in home that are inspectable. + + Walk back from the err line up to 3 lines, but don't walk back over + changes in indent level. + + Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk + over changes in indent level (unless part of an extended line) + """ + cnt = re.compile(r'\\[\s\n]*$') + df = re.compile(r':[\s\n]*$') + ind = re.compile(r'^(\s*)') + toinspect = [] + home = lines[pos] + home_indent = ind.match(home).groups()[0] + + before = lines[max(pos-3, 0):pos] + before.reverse() + after = lines[pos+1:min(pos+4, len(lines))] + + for line in before: + if ind.match(line).groups()[0] == home_indent: + toinspect.append(line) + else: + break + toinspect.reverse() + toinspect.append(home) + home_pos = len(toinspect)-1 + continued = cnt.search(home) + for line in after: + if ((continued or ind.match(line).groups()[0] == home_indent) + and not df.search(line)): + toinspect.append(line) + continued = cnt.search(line) + else: + break + log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) + return toinspect, home_pos + + +class Expander: + """Simple expression expander. Uses tokenize to find the names and + expands any that can be looked up in the frame. + """ + def __init__(self, locals, globals): + self.locals = locals + self.globals = globals + self.lpos = None + self.expanded_source = '' + + def __call__(self, ttype, tok, start, end, line): + # TODO + # deal with unicode properly + + # TODO + # Dealing with instance members + # always keep the last thing seen + # if the current token is a dot, + # get ready to getattr(lastthing, this thing) on the + # next call. + + if self.lpos is not None: + if start[1] >= self.lpos: + self.expanded_source += ' ' * (start[1]-self.lpos) + elif start[1] < self.lpos: + # newline, indent correctly + self.expanded_source += ' ' * start[1] + self.lpos = end[1] + + if ttype == tokenize.INDENT: + pass + elif ttype == tokenize.NAME: + # Clean this junk up + try: + val = self.locals[tok] + if callable(val): + val = tok + else: + val = repr(val) + except KeyError: + try: + val = self.globals[tok] + if callable(val): + val = tok + else: + val = repr(val) + + except KeyError: + val = tok + # FIXME... not sure how to handle things like funcs, classes + # FIXME this is broken for some unicode strings + self.expanded_source += val + else: + self.expanded_source += tok + # if this is the end of the line and the line ends with + # \, then tack a \ and newline onto the output + # print line[end[1]:] + if re.match(r'\s+\\\n', line[end[1]:]): + self.expanded_source += ' \\\n' diff --git a/scripts/external_libs/nose-1.3.4/nose/loader.py b/scripts/external_libs/nose-1.3.4/nose/loader.py new file mode 100755 index 00000000..966b6dc7 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/loader.py @@ -0,0 +1,619 @@ +""" +Test Loader +----------- + +nose's test loader implements the same basic functionality as its +superclass, unittest.TestLoader, but extends it by more liberal +interpretations of what may be a test and how a test may be named. +""" +from __future__ import generators + +import logging +import os +import sys +import unittest +import types +from inspect import isfunction +from nose.pyversion import unbound_method, ismethod +from nose.case import FunctionTestCase, MethodTestCase +from nose.failure import Failure +from nose.config import Config +from nose.importer import Importer, add_path, remove_path +from nose.selector import defaultSelector, TestAddress +from nose.util import func_lineno, getpackage, isclass, isgenerator, \ + ispackage, regex_last_key, resolve_name, transplant_func, \ + transplant_class, test_address +from nose.suite import ContextSuiteFactory, ContextList, LazySuite +from nose.pyversion import sort_list, cmp_to_key + + +log = logging.getLogger(__name__) +#log.setLevel(logging.DEBUG) + +# for efficiency and easier mocking +op_normpath = os.path.normpath +op_abspath = os.path.abspath +op_join = os.path.join +op_isdir = os.path.isdir +op_isfile = os.path.isfile + + +__all__ = ['TestLoader', 'defaultTestLoader'] + + +class TestLoader(unittest.TestLoader): + """Test loader that extends unittest.TestLoader to: + + * Load tests from test-like functions and classes that are not + unittest.TestCase subclasses + * Find and load test modules in a directory + * Support tests that are generators + * Support easy extensions of or changes to that behavior through plugins + """ + config = None + importer = None + workingDir = None + selector = None + suiteClass = None + + def __init__(self, config=None, importer=None, workingDir=None, + selector=None): + """Initialize a test loader. + + Parameters (all optional): + + * config: provide a `nose.config.Config`_ or other config class + instance; if not provided a `nose.config.Config`_ with + default values is used. + * importer: provide an importer instance that implements + `importFromPath`. If not provided, a + `nose.importer.Importer`_ is used. + * workingDir: the directory to which file and module names are + relative. If not provided, assumed to be the current working + directory. + * selector: a selector class or instance. If a class is + provided, it will be instantiated with one argument, the + current config. If not provided, a `nose.selector.Selector`_ + is used. + """ + if config is None: + config = Config() + if importer is None: + importer = Importer(config=config) + if workingDir is None: + workingDir = config.workingDir + if selector is None: + selector = defaultSelector(config) + elif isclass(selector): + selector = selector(config) + self.config = config + self.importer = importer + self.workingDir = op_normpath(op_abspath(workingDir)) + self.selector = selector + if config.addPaths: + add_path(workingDir, config) + self.suiteClass = ContextSuiteFactory(config=config) + + self._visitedPaths = set([]) + + unittest.TestLoader.__init__(self) + + def getTestCaseNames(self, testCaseClass): + """Override to select with selector, unless + config.getTestCaseNamesCompat is True + """ + if self.config.getTestCaseNamesCompat: + return unittest.TestLoader.getTestCaseNames(self, testCaseClass) + + def wanted(attr, cls=testCaseClass, sel=self.selector): + item = getattr(cls, attr, None) + if isfunction(item): + item = unbound_method(cls, item) + elif not ismethod(item): + return False + return sel.wantMethod(item) + + cases = filter(wanted, dir(testCaseClass)) + + # add runTest if nothing else picked + if not cases and hasattr(testCaseClass, 'runTest'): + cases = ['runTest'] + if self.sortTestMethodsUsing: + sort_list(cases, cmp_to_key(self.sortTestMethodsUsing)) + return cases + + def _haveVisited(self, path): + # For cases where path is None, we always pretend we haven't visited + # them. + if path is None: + return False + + return path in self._visitedPaths + + def _addVisitedPath(self, path): + if path is not None: + self._visitedPaths.add(path) + + def loadTestsFromDir(self, path): + """Load tests from the directory at path. This is a generator + -- each suite of tests from a module or other file is yielded + and is expected to be executed before the next file is + examined. + """ + log.debug("load from dir %s", path) + plugins = self.config.plugins + plugins.beforeDirectory(path) + if self.config.addPaths: + paths_added = add_path(path, self.config) + + entries = os.listdir(path) + sort_list(entries, regex_last_key(self.config.testMatch)) + for entry in entries: + # this hard-coded initial-dot test will be removed: + # http://code.google.com/p/python-nose/issues/detail?id=82 + if entry.startswith('.'): + continue + entry_path = op_abspath(op_join(path, entry)) + is_file = op_isfile(entry_path) + wanted = False + if is_file: + is_dir = False + wanted = self.selector.wantFile(entry_path) + else: + is_dir = op_isdir(entry_path) + if is_dir: + # this hard-coded initial-underscore test will be removed: + # http://code.google.com/p/python-nose/issues/detail?id=82 + if entry.startswith('_'): + continue + wanted = self.selector.wantDirectory(entry_path) + is_package = ispackage(entry_path) + + # Python 3.3 now implements PEP 420: Implicit Namespace Packages. + # As a result, it's now possible that parent paths that have a + # segment with the same basename as our package ends up + # in module.__path__. So we have to keep track of what we've + # visited, and not-revisit them again. + if wanted and not self._haveVisited(entry_path): + self._addVisitedPath(entry_path) + if is_file: + plugins.beforeContext() + if entry.endswith('.py'): + yield self.loadTestsFromName( + entry_path, discovered=True) + else: + yield self.loadTestsFromFile(entry_path) + plugins.afterContext() + elif is_package: + # Load the entry as a package: given the full path, + # loadTestsFromName() will figure it out + yield self.loadTestsFromName( + entry_path, discovered=True) + else: + # Another test dir in this one: recurse lazily + yield self.suiteClass( + lambda: self.loadTestsFromDir(entry_path)) + tests = [] + for test in plugins.loadTestsFromDir(path): + tests.append(test) + # TODO: is this try/except needed? + try: + if tests: + yield self.suiteClass(tests) + except (KeyboardInterrupt, SystemExit): + raise + except: + yield self.suiteClass([Failure(*sys.exc_info())]) + + # pop paths + if self.config.addPaths: + for p in paths_added: + remove_path(p) + plugins.afterDirectory(path) + + def loadTestsFromFile(self, filename): + """Load tests from a non-module file. Default is to raise a + ValueError; plugins may implement `loadTestsFromFile` to + provide a list of tests loaded from the file. + """ + log.debug("Load from non-module file %s", filename) + try: + tests = [test for test in + self.config.plugins.loadTestsFromFile(filename)] + if tests: + # Plugins can yield False to indicate that they were + # unable to load tests from a file, but it was not an + # error -- the file just had no tests to load. + tests = filter(None, tests) + return self.suiteClass(tests) + else: + # Nothing was able to even try to load from this file + open(filename, 'r').close() # trigger os error + raise ValueError("Unable to load tests from file %s" + % filename) + except (KeyboardInterrupt, SystemExit): + raise + except: + exc = sys.exc_info() + return self.suiteClass( + [Failure(exc[0], exc[1], exc[2], + address=(filename, None, None))]) + + def loadTestsFromGenerator(self, generator, module): + """Lazy-load tests from a generator function. The generator function + may yield either: + + * a callable, or + * a function name resolvable within the same module + """ + def generate(g=generator, m=module): + try: + for test in g(): + test_func, arg = self.parseGeneratedTest(test) + if not callable(test_func): + test_func = getattr(m, test_func) + yield FunctionTestCase(test_func, arg=arg, descriptor=g) + except KeyboardInterrupt: + raise + except: + exc = sys.exc_info() + yield Failure(exc[0], exc[1], exc[2], + address=test_address(generator)) + return self.suiteClass(generate, context=generator, can_split=False) + + def loadTestsFromGeneratorMethod(self, generator, cls): + """Lazy-load tests from a generator method. + + This is more complicated than loading from a generator function, + since a generator method may yield: + + * a function + * a bound or unbound method, or + * a method name + """ + # convert the unbound generator method + # into a bound method so it can be called below + if hasattr(generator, 'im_class'): + cls = generator.im_class + inst = cls() + method = generator.__name__ + generator = getattr(inst, method) + + def generate(g=generator, c=cls): + try: + for test in g(): + test_func, arg = self.parseGeneratedTest(test) + if not callable(test_func): + test_func = unbound_method(c, getattr(c, test_func)) + if ismethod(test_func): + yield MethodTestCase(test_func, arg=arg, descriptor=g) + elif callable(test_func): + # In this case we're forcing the 'MethodTestCase' + # to run the inline function as its test call, + # but using the generator method as the 'method of + # record' (so no need to pass it as the descriptor) + yield MethodTestCase(g, test=test_func, arg=arg) + else: + yield Failure( + TypeError, + "%s is not a callable or method" % test_func) + except KeyboardInterrupt: + raise + except: + exc = sys.exc_info() + yield Failure(exc[0], exc[1], exc[2], + address=test_address(generator)) + return self.suiteClass(generate, context=generator, can_split=False) + + def loadTestsFromModule(self, module, path=None, discovered=False): + """Load all tests from module and return a suite containing + them. If the module has been discovered and is not test-like, + the suite will be empty by default, though plugins may add + their own tests. + """ + log.debug("Load from module %s", module) + tests = [] + test_classes = [] + test_funcs = [] + # For *discovered* modules, we only load tests when the module looks + # testlike. For modules we've been directed to load, we always + # look for tests. (discovered is set to True by loadTestsFromDir) + if not discovered or self.selector.wantModule(module): + for item in dir(module): + test = getattr(module, item, None) + # print "Check %s (%s) in %s" % (item, test, module.__name__) + if isclass(test): + if self.selector.wantClass(test): + test_classes.append(test) + elif isfunction(test) and self.selector.wantFunction(test): + test_funcs.append(test) + sort_list(test_classes, lambda x: x.__name__) + sort_list(test_funcs, func_lineno) + tests = map(lambda t: self.makeTest(t, parent=module), + test_classes + test_funcs) + + # Now, descend into packages + # FIXME can or should this be lazy? + # is this syntax 2.2 compatible? + module_paths = getattr(module, '__path__', []) + if path: + path = os.path.realpath(path) + for module_path in module_paths: + log.debug("Load tests from module path %s?", module_path) + log.debug("path: %s os.path.realpath(%s): %s", + path, module_path, os.path.realpath(module_path)) + if (self.config.traverseNamespace or not path) or \ + os.path.realpath(module_path).startswith(path): + # Egg files can be on sys.path, so make sure the path is a + # directory before trying to load from it. + if os.path.isdir(module_path): + tests.extend(self.loadTestsFromDir(module_path)) + + for test in self.config.plugins.loadTestsFromModule(module, path): + tests.append(test) + + return self.suiteClass(ContextList(tests, context=module)) + + def loadTestsFromName(self, name, module=None, discovered=False): + """Load tests from the entity with the given name. + + The name may indicate a file, directory, module, or any object + within a module. See `nose.util.split_test_name` for details on + test name parsing. + """ + # FIXME refactor this method into little bites? + log.debug("load from %s (%s)", name, module) + + suite = self.suiteClass + + # give plugins first crack + plug_tests = self.config.plugins.loadTestsFromName(name, module) + if plug_tests: + return suite(plug_tests) + + addr = TestAddress(name, workingDir=self.workingDir) + if module: + # Two cases: + # name is class.foo + # The addr will be incorrect, since it thinks class.foo is + # a dotted module name. It's actually a dotted attribute + # name. In this case we want to use the full submitted + # name as the name to load from the module. + # name is module:class.foo + # The addr will be correct. The part we want is the part after + # the :, which is in addr.call. + if addr.call: + name = addr.call + parent, obj = self.resolve(name, module) + if (isclass(parent) + and getattr(parent, '__module__', None) != module.__name__ + and not isinstance(obj, Failure)): + parent = transplant_class(parent, module.__name__) + obj = getattr(parent, obj.__name__) + log.debug("parent %s obj %s module %s", parent, obj, module) + if isinstance(obj, Failure): + return suite([obj]) + else: + return suite(ContextList([self.makeTest(obj, parent)], + context=parent)) + else: + if addr.module: + try: + if addr.filename is None: + module = resolve_name(addr.module) + else: + self.config.plugins.beforeImport( + addr.filename, addr.module) + # FIXME: to support module.name names, + # do what resolve-name does and keep trying to + # import, popping tail of module into addr.call, + # until we either get an import or run out of + # module parts + try: + module = self.importer.importFromPath( + addr.filename, addr.module) + finally: + self.config.plugins.afterImport( + addr.filename, addr.module) + except (KeyboardInterrupt, SystemExit): + raise + except: + exc = sys.exc_info() + return suite([Failure(exc[0], exc[1], exc[2], + address=addr.totuple())]) + if addr.call: + return self.loadTestsFromName(addr.call, module) + else: + return self.loadTestsFromModule( + module, addr.filename, + discovered=discovered) + elif addr.filename: + path = addr.filename + if addr.call: + package = getpackage(path) + if package is None: + return suite([ + Failure(ValueError, + "Can't find callable %s in file %s: " + "file is not a python module" % + (addr.call, path), + address=addr.totuple())]) + return self.loadTestsFromName(addr.call, module=package) + else: + if op_isdir(path): + # In this case we *can* be lazy since we know + # that each module in the dir will be fully + # loaded before its tests are executed; we + # also know that we're not going to be asked + # to load from . and ./some_module.py *as part + # of this named test load* + return LazySuite( + lambda: self.loadTestsFromDir(path)) + elif op_isfile(path): + return self.loadTestsFromFile(path) + else: + return suite([ + Failure(OSError, "No such file %s" % path, + address=addr.totuple())]) + else: + # just a function? what to do? I think it can only be + # handled when module is not None + return suite([ + Failure(ValueError, "Unresolvable test name %s" % name, + address=addr.totuple())]) + + def loadTestsFromNames(self, names, module=None): + """Load tests from all names, returning a suite containing all + tests. + """ + plug_res = self.config.plugins.loadTestsFromNames(names, module) + if plug_res: + suite, names = plug_res + if suite: + return self.suiteClass([ + self.suiteClass(suite), + unittest.TestLoader.loadTestsFromNames(self, names, module) + ]) + return unittest.TestLoader.loadTestsFromNames(self, names, module) + + def loadTestsFromTestCase(self, testCaseClass): + """Load tests from a unittest.TestCase subclass. + """ + cases = [] + plugins = self.config.plugins + for case in plugins.loadTestsFromTestCase(testCaseClass): + cases.append(case) + # For efficiency in the most common case, just call and return from + # super. This avoids having to extract cases and rebuild a context + # suite when there are no plugin-contributed cases. + if not cases: + return super(TestLoader, self).loadTestsFromTestCase(testCaseClass) + cases.extend( + [case for case in + super(TestLoader, self).loadTestsFromTestCase(testCaseClass)]) + return self.suiteClass(cases) + + def loadTestsFromTestClass(self, cls): + """Load tests from a test class that is *not* a unittest.TestCase + subclass. + + In this case, we can't depend on the class's `__init__` taking method + name arguments, so we have to compose a MethodTestCase for each + method in the class that looks testlike. + """ + def wanted(attr, cls=cls, sel=self.selector): + item = getattr(cls, attr, None) + if isfunction(item): + item = unbound_method(cls, item) + elif not ismethod(item): + return False + return sel.wantMethod(item) + cases = [self.makeTest(getattr(cls, case), cls) + for case in filter(wanted, dir(cls))] + for test in self.config.plugins.loadTestsFromTestClass(cls): + cases.append(test) + return self.suiteClass(ContextList(cases, context=cls)) + + def makeTest(self, obj, parent=None): + try: + return self._makeTest(obj, parent) + except (KeyboardInterrupt, SystemExit): + raise + except: + exc = sys.exc_info() + try: + addr = test_address(obj) + except KeyboardInterrupt: + raise + except: + addr = None + return Failure(exc[0], exc[1], exc[2], address=addr) + + def _makeTest(self, obj, parent=None): + """Given a test object and its parent, return a test case + or test suite. + """ + plug_tests = [] + try: + addr = test_address(obj) + except KeyboardInterrupt: + raise + except: + addr = None + for test in self.config.plugins.makeTest(obj, parent): + plug_tests.append(test) + # TODO: is this try/except needed? + try: + if plug_tests: + return self.suiteClass(plug_tests) + except (KeyboardInterrupt, SystemExit): + raise + except: + exc = sys.exc_info() + return Failure(exc[0], exc[1], exc[2], address=addr) + + if isfunction(obj) and parent and not isinstance(parent, types.ModuleType): + # This is a Python 3.x 'unbound method'. Wrap it with its + # associated class.. + obj = unbound_method(parent, obj) + + if isinstance(obj, unittest.TestCase): + return obj + elif isclass(obj): + if parent and obj.__module__ != parent.__name__: + obj = transplant_class(obj, parent.__name__) + if issubclass(obj, unittest.TestCase): + return self.loadTestsFromTestCase(obj) + else: + return self.loadTestsFromTestClass(obj) + elif ismethod(obj): + if parent is None: + parent = obj.__class__ + if issubclass(parent, unittest.TestCase): + return parent(obj.__name__) + else: + if isgenerator(obj): + return self.loadTestsFromGeneratorMethod(obj, parent) + else: + return MethodTestCase(obj) + elif isfunction(obj): + if parent and obj.__module__ != parent.__name__: + obj = transplant_func(obj, parent.__name__) + if isgenerator(obj): + return self.loadTestsFromGenerator(obj, parent) + else: + return FunctionTestCase(obj) + else: + return Failure(TypeError, + "Can't make a test from %s" % obj, + address=addr) + + def resolve(self, name, module): + """Resolve name within module + """ + obj = module + parts = name.split('.') + for part in parts: + parent, obj = obj, getattr(obj, part, None) + if obj is None: + # no such test + obj = Failure(ValueError, "No such test %s" % name) + return parent, obj + + def parseGeneratedTest(self, test): + """Given the yield value of a test generator, return a func and args. + + This is used in the two loadTestsFromGenerator* methods. + + """ + if not isinstance(test, tuple): # yield test + test_func, arg = (test, tuple()) + elif len(test) == 1: # yield (test,) + test_func, arg = (test[0], tuple()) + else: # yield test, foo, bar, ... + assert len(test) > 1 # sanity check + test_func, arg = (test[0], test[1:]) + return test_func, arg + +defaultTestLoader = TestLoader + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py b/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py new file mode 100755 index 00000000..08ee8f32 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py @@ -0,0 +1,190 @@ +""" +Writing Plugins +--------------- + +nose supports plugins for test collection, selection, observation and +reporting. There are two basic rules for plugins: + +* Plugin classes should subclass :class:`nose.plugins.Plugin`. + +* Plugins may implement any of the methods described in the class + :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that + this class is for documentary purposes only; plugins may not subclass + IPluginInterface. + +Hello World +=========== + +Here's a basic plugin. It doesn't do much so read on for more ideas or dive +into the :doc:`IPluginInterface <interface>` to see all available hooks. + +.. code-block:: python + + import logging + import os + + from nose.plugins import Plugin + + log = logging.getLogger('nose.plugins.helloworld') + + class HelloWorld(Plugin): + name = 'helloworld' + + def options(self, parser, env=os.environ): + super(HelloWorld, self).options(parser, env=env) + + def configure(self, options, conf): + super(HelloWorld, self).configure(options, conf) + if not self.enabled: + return + + def finalize(self, result): + log.info('Hello pluginized world!') + +Registering +=========== + +.. Note:: + Important note: the following applies only to the default + plugin manager. Other plugin managers may use different means to + locate and load plugins. + +For nose to find a plugin, it must be part of a package that uses +setuptools_, and the plugin must be included in the entry points defined +in the setup.py for the package: + +.. code-block:: python + + setup(name='Some plugin', + # ... + entry_points = { + 'nose.plugins.0.10': [ + 'someplugin = someplugin:SomePlugin' + ] + }, + # ... + ) + +Once the package is installed with install or develop, nose will be able +to load the plugin. + +.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools + +Registering a plugin without setuptools +======================================= + +It is currently possible to register a plugin programmatically by +creating a custom nose runner like this : + +.. code-block:: python + + import nose + from yourplugin import YourPlugin + + if __name__ == '__main__': + nose.main(addplugins=[YourPlugin()]) + +Defining options +================ + +All plugins must implement the methods ``options(self, parser, env)`` +and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin +that want the standard options should call the superclass methods. + +nose uses optparse.OptionParser from the standard library to parse +arguments. A plugin's ``options()`` method receives a parser +instance. It's good form for a plugin to use that instance only to add +additional arguments that take only long arguments (--like-this). Most +of nose's built-in arguments get their default value from an environment +variable. + +A plugin's ``configure()`` method receives the parsed ``OptionParser`` options +object, as well as the current config object. Plugins should configure their +behavior based on the user-selected settings, and may raise exceptions +if the configured behavior is nonsensical. + +Logging +======= + +nose uses the logging classes from the standard library. To enable users +to view debug messages easily, plugins should use ``logging.getLogger()`` to +acquire a logger in the ``nose.plugins`` namespace. + +Recipes +======= + +* Writing a plugin that monitors or controls test result output + + Implement any or all of ``addError``, ``addFailure``, etc., to monitor test + results. If you also want to monitor output, implement + ``setOutputStream`` and keep a reference to the output stream. If you + want to prevent the builtin ``TextTestResult`` output, implement + ``setOutputSteam`` and *return a dummy stream*. The default output will go + to the dummy stream, while you send your desired output to the real stream. + + Example: `examples/html_plugin/htmlplug.py`_ + +* Writing a plugin that handles exceptions + + Subclass :doc:`ErrorClassPlugin <errorclasses>`. + + Examples: :doc:`nose.plugins.deprecated <deprecated>`, + :doc:`nose.plugins.skip <skip>` + +* Writing a plugin that adds detail to error reports + + Implement ``formatError`` and/or ``formatFailure``. The error tuple + you return (error class, error message, traceback) will replace the + original error tuple. + + Examples: :doc:`nose.plugins.capture <capture>`, + :doc:`nose.plugins.failuredetail <failuredetail>` + +* Writing a plugin that loads tests from files other than python modules + + Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``, + return True for files that you want to examine for tests. In + ``loadTestsFromFile``, for those files, return an iterable + containing TestCases (or yield them as you find them; + ``loadTestsFromFile`` may also be a generator). + + Example: :doc:`nose.plugins.doctests <doctests>` + +* Writing a plugin that prints a report + + Implement ``begin`` if you need to perform setup before testing + begins. Implement ``report`` and output your report to the provided stream. + + Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>` + +* Writing a plugin that selects or rejects tests + + Implement any or all ``want*`` methods. Return False to reject the test + candidate, True to accept it -- which means that the test candidate + will pass through the rest of the system, so you must be prepared to + load tests from it if tests can't be loaded by the core loader or + another plugin -- and None if you don't care. + + Examples: :doc:`nose.plugins.attrib <attrib>`, + :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>` + + +More Examples +============= + +See any builtin plugin or example plugin in the examples_ directory in +the nose source distribution. There is a list of third-party plugins +`on jottit`_. + +.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py +.. _examples: http://python-nose.googlecode.com/svn/trunk/examples +.. _on jottit: http://nose-plugins.jottit.com/ + +""" +from nose.plugins.base import Plugin +from nose.plugins.manager import * +from nose.plugins.plugintest import PluginTester + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py b/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py new file mode 100755 index 00000000..1ccd7773 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py @@ -0,0 +1,45 @@ +"""Use the AllModules plugin by passing ``--all-modules`` or setting the +NOSE_ALL_MODULES environment variable to enable collection and execution of +tests in all python modules. Normal nose behavior is to look for tests only in +modules that match testMatch. + +More information: :doc:`../doc_tests/test_allmodules/test_allmodules` + +.. warning :: + + This plugin can have surprising interactions with plugins that load tests + from what nose normally considers non-test modules, such as + the :doc:`doctest plugin <doctests>`. This is because any given + object in a module can't be loaded both by a plugin and the normal nose + :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions + or classes in non-test modules that look like tests but aren't, you will + likely see errors as nose attempts to run them as tests. + +""" + +import os +from nose.plugins.base import Plugin + +class AllModules(Plugin): + """Collect tests from all python modules. + """ + def options(self, parser, env): + """Register commandline options. + """ + env_opt = 'NOSE_ALL_MODULES' + parser.add_option('--all-modules', + action="store_true", + dest=self.enableOpt, + default=env.get(env_opt), + help="Enable plugin %s: %s [%s]" % + (self.__class__.__name__, self.help(), env_opt)) + + def wantFile(self, file): + """Override to return True for all files ending with .py""" + # always want .py files + if file.endswith('.py'): + return True + + def wantModule(self, module): + """Override return True for all modules""" + return True diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py b/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py new file mode 100755 index 00000000..3d4422a2 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py @@ -0,0 +1,286 @@ +"""Attribute selector plugin. + +Oftentimes when testing you will want to select tests based on +criteria rather then simply by filename. For example, you might want +to run all tests except for the slow ones. You can do this with the +Attribute selector plugin by setting attributes on your test methods. +Here is an example: + +.. code-block:: python + + def test_big_download(): + import urllib + # commence slowness... + + test_big_download.slow = 1 + +Once you've assigned an attribute ``slow = 1`` you can exclude that +test and all other tests having the slow attribute by running :: + + $ nosetests -a '!slow' + +There is also a decorator available for you that will set attributes. +Here's how to set ``slow=1`` like above with the decorator: + +.. code-block:: python + + from nose.plugins.attrib import attr + @attr('slow') + def test_big_download(): + import urllib + # commence slowness... + +And here's how to set an attribute with a specific value: + +.. code-block:: python + + from nose.plugins.attrib import attr + @attr(speed='slow') + def test_big_download(): + import urllib + # commence slowness... + +This test could be run with :: + + $ nosetests -a speed=slow + +In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes +on all its test methods at once. For example: + +.. code-block:: python + + from nose.plugins.attrib import attr + @attr(speed='slow') + class MyTestCase: + def test_long_integration(self): + pass + def test_end_to_end_something(self): + pass + +Below is a reference to the different syntaxes available. + +Simple syntax +------------- + +Examples of using the ``-a`` and ``--attr`` options: + +* ``nosetests -a status=stable`` + Only runs tests with attribute "status" having value "stable" + +* ``nosetests -a priority=2,status=stable`` + Runs tests having both attributes and values + +* ``nosetests -a priority=2 -a slow`` + Runs tests that match either attribute + +* ``nosetests -a tags=http`` + If a test's ``tags`` attribute was a list and it contained the value + ``http`` then it would be run + +* ``nosetests -a slow`` + Runs tests with the attribute ``slow`` if its value does not equal False + (False, [], "", etc...) + +* ``nosetests -a '!slow'`` + Runs tests that do NOT have the attribute ``slow`` or have a ``slow`` + attribute that is equal to False + **NOTE**: + if your shell (like bash) interprets '!' as a special character make sure to + put single quotes around it. + +Expression Evaluation +--------------------- + +Examples using the ``-A`` and ``--eval-attr`` options: + +* ``nosetests -A "not slow"`` + Evaluates the Python expression "not slow" and runs the test if True + +* ``nosetests -A "(priority > 5) and not slow"`` + Evaluates a complex Python expression and runs the test if True + +""" +import inspect +import logging +import os +import sys +from inspect import isfunction +from nose.plugins.base import Plugin +from nose.util import tolist + +log = logging.getLogger('nose.plugins.attrib') +compat_24 = sys.version_info >= (2, 4) + +def attr(*args, **kwargs): + """Decorator that adds attributes to classes or functions + for use with the Attribute (-a) plugin. + """ + def wrap_ob(ob): + for name in args: + setattr(ob, name, True) + for name, value in kwargs.iteritems(): + setattr(ob, name, value) + return ob + return wrap_ob + +def get_method_attr(method, cls, attr_name, default = False): + """Look up an attribute on a method/ function. + If the attribute isn't found there, looking it up in the + method's class, if any. + """ + Missing = object() + value = getattr(method, attr_name, Missing) + if value is Missing and cls is not None: + value = getattr(cls, attr_name, Missing) + if value is Missing: + return default + return value + + +class ContextHelper: + """Object that can act as context dictionary for eval and looks up + names as attributes on a method/ function and its class. + """ + def __init__(self, method, cls): + self.method = method + self.cls = cls + + def __getitem__(self, name): + return get_method_attr(self.method, self.cls, name) + + +class AttributeSelector(Plugin): + """Selects test cases to be run based on their attributes. + """ + + def __init__(self): + Plugin.__init__(self) + self.attribs = [] + + def options(self, parser, env): + """Register command line options""" + parser.add_option("-a", "--attr", + dest="attr", action="append", + default=env.get('NOSE_ATTR'), + metavar="ATTR", + help="Run only tests that have attributes " + "specified by ATTR [NOSE_ATTR]") + # disable in < 2.4: eval can't take needed args + if compat_24: + parser.add_option("-A", "--eval-attr", + dest="eval_attr", metavar="EXPR", action="append", + default=env.get('NOSE_EVAL_ATTR'), + help="Run only tests for whose attributes " + "the Python expression EXPR evaluates " + "to True [NOSE_EVAL_ATTR]") + + def configure(self, options, config): + """Configure the plugin and system, based on selected options. + + attr and eval_attr may each be lists. + + self.attribs will be a list of lists of tuples. In that list, each + list is a group of attributes, all of which must match for the rule to + match. + """ + self.attribs = [] + + # handle python eval-expression parameter + if compat_24 and options.eval_attr: + eval_attr = tolist(options.eval_attr) + for attr in eval_attr: + # "<python expression>" + # -> eval(expr) in attribute context must be True + def eval_in_context(expr, obj, cls): + return eval(expr, None, ContextHelper(obj, cls)) + self.attribs.append([(attr, eval_in_context)]) + + # attribute requirements are a comma separated list of + # 'key=value' pairs + if options.attr: + std_attr = tolist(options.attr) + for attr in std_attr: + # all attributes within an attribute group must match + attr_group = [] + for attrib in attr.strip().split(","): + # don't die on trailing comma + if not attrib: + continue + items = attrib.split("=", 1) + if len(items) > 1: + # "name=value" + # -> 'str(obj.name) == value' must be True + key, value = items + else: + key = items[0] + if key[0] == "!": + # "!name" + # 'bool(obj.name)' must be False + key = key[1:] + value = False + else: + # "name" + # -> 'bool(obj.name)' must be True + value = True + attr_group.append((key, value)) + self.attribs.append(attr_group) + if self.attribs: + self.enabled = True + + def validateAttrib(self, method, cls = None): + """Verify whether a method has the required attributes + The method is considered a match if it matches all attributes + for any attribute group. + .""" + # TODO: is there a need for case-sensitive value comparison? + any = False + for group in self.attribs: + match = True + for key, value in group: + attr = get_method_attr(method, cls, key) + if callable(value): + if not value(key, method, cls): + match = False + break + elif value is True: + # value must exist and be True + if not bool(attr): + match = False + break + elif value is False: + # value must not exist or be False + if bool(attr): + match = False + break + elif type(attr) in (list, tuple): + # value must be found in the list attribute + if not str(value).lower() in [str(x).lower() + for x in attr]: + match = False + break + else: + # value must match, convert to string and compare + if (value != attr + and str(value).lower() != str(attr).lower()): + match = False + break + any = any or match + if any: + # not True because we don't want to FORCE the selection of the + # item, only say that it is acceptable + return None + return False + + def wantFunction(self, function): + """Accept the function if its attributes match. + """ + return self.validateAttrib(function) + + def wantMethod(self, method): + """Accept the method if its attributes match. + """ + try: + cls = method.im_class + except AttributeError: + return False + return self.validateAttrib(method, cls) diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/base.py b/scripts/external_libs/nose-1.3.4/nose/plugins/base.py new file mode 100755 index 00000000..f09beb69 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/base.py @@ -0,0 +1,725 @@ +import os +import textwrap +from optparse import OptionConflictError +from warnings import warn +from nose.util import tolist + +class Plugin(object): + """Base class for nose plugins. It's recommended but not *necessary* to + subclass this class to create a plugin, but all plugins *must* implement + `options(self, parser, env)` and `configure(self, options, conf)`, and + must have the attributes `enabled`, `name` and `score`. The `name` + attribute may contain hyphens ('-'). + + Plugins should not be enabled by default. + + Subclassing Plugin (and calling the superclass methods in + __init__, configure, and options, if you override them) will give + your plugin some friendly default behavior: + + * A --with-$name option will be added to the command line interface + to enable the plugin, and a corresponding environment variable + will be used as the default value. The plugin class's docstring + will be used as the help for this option. + * The plugin will not be enabled unless this option is selected by + the user. + """ + can_configure = False + enabled = False + enableOpt = None + name = None + score = 100 + + def __init__(self): + if self.name is None: + self.name = self.__class__.__name__.lower() + if self.enableOpt is None: + self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_') + + def addOptions(self, parser, env=None): + """Add command-line options for this plugin. + + The base plugin class adds --with-$name by default, used to enable the + plugin. + + .. warning :: Don't implement addOptions unless you want to override + all default option handling behavior, including + warnings for conflicting options. Implement + :meth:`options + <nose.plugins.base.IPluginInterface.options>` + instead. + """ + self.add_options(parser, env) + + def add_options(self, parser, env=None): + """Non-camel-case version of func name for backwards compatibility. + + .. warning :: + + DEPRECATED: Do not use this method, + use :meth:`options <nose.plugins.base.IPluginInterface.options>` + instead. + + """ + # FIXME raise deprecation warning if wasn't called by wrapper + if env is None: + env = os.environ + try: + self.options(parser, env) + self.can_configure = True + except OptionConflictError, e: + warn("Plugin %s has conflicting option string: %s and will " + "be disabled" % (self, e), RuntimeWarning) + self.enabled = False + self.can_configure = False + + def options(self, parser, env): + """Register commandline options. + + Implement this method for normal options behavior with protection from + OptionConflictErrors. If you override this method and want the default + --with-$name option to be registered, be sure to call super(). + """ + env_opt = 'NOSE_WITH_%s' % self.name.upper() + env_opt = env_opt.replace('-', '_') + parser.add_option("--with-%s" % self.name, + action="store_true", + dest=self.enableOpt, + default=env.get(env_opt), + help="Enable plugin %s: %s [%s]" % + (self.__class__.__name__, self.help(), env_opt)) + + def configure(self, options, conf): + """Configure the plugin and system, based on selected options. + + The base plugin class sets the plugin to enabled if the enable option + for the plugin (self.enableOpt) is true. + """ + if not self.can_configure: + return + self.conf = conf + if hasattr(options, self.enableOpt): + self.enabled = getattr(options, self.enableOpt) + + def help(self): + """Return help for this plugin. This will be output as the help + section of the --with-$name option that enables the plugin. + """ + if self.__class__.__doc__: + # doc sections are often indented; compress the spaces + return textwrap.dedent(self.__class__.__doc__) + return "(no help available)" + + # Compatiblity shim + def tolist(self, val): + warn("Plugin.tolist is deprecated. Use nose.util.tolist instead", + DeprecationWarning) + return tolist(val) + + +class IPluginInterface(object): + """ + IPluginInterface describes the plugin API. Do not subclass or use this + class directly. + """ + def __new__(cls, *arg, **kw): + raise TypeError("IPluginInterface class is for documentation only") + + def addOptions(self, parser, env): + """Called to allow plugin to register command-line options with the + parser. DO NOT return a value from this method unless you want to stop + all other plugins from setting their options. + + .. warning :: + + DEPRECATED -- implement + :meth:`options <nose.plugins.base.IPluginInterface.options>` instead. + """ + pass + add_options = addOptions + add_options.deprecated = True + + def addDeprecated(self, test): + """Called when a deprecated test is seen. DO NOT return a value + unless you want to stop other plugins from seeing the deprecated + test. + + .. warning :: DEPRECATED -- check error class in addError instead + """ + pass + addDeprecated.deprecated = True + + def addError(self, test, err): + """Called when a test raises an uncaught exception. DO NOT return a + value unless you want to stop other plugins from seeing that the + test has raised an error. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: sys.exc_info() tuple + :type err: 3-tuple + """ + pass + addError.changed = True + + def addFailure(self, test, err): + """Called when a test fails. DO NOT return a value unless you + want to stop other plugins from seeing that the test has failed. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: 3-tuple + :type err: sys.exc_info() tuple + """ + pass + addFailure.changed = True + + def addSkip(self, test): + """Called when a test is skipped. DO NOT return a value unless + you want to stop other plugins from seeing the skipped test. + + .. warning:: DEPRECATED -- check error class in addError instead + """ + pass + addSkip.deprecated = True + + def addSuccess(self, test): + """Called when a test passes. DO NOT return a value unless you + want to stop other plugins from seeing the passing test. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + addSuccess.changed = True + + def afterContext(self): + """Called after a context (generally a module) has been + lazy-loaded, imported, setup, had its tests loaded and + executed, and torn down. + """ + pass + afterContext._new = True + + def afterDirectory(self, path): + """Called after all tests have been loaded from directory at path + and run. + + :param path: the directory that has finished processing + :type path: string + """ + pass + afterDirectory._new = True + + def afterImport(self, filename, module): + """Called after module is imported from filename. afterImport + is called even if the import failed. + + :param filename: The file that was loaded + :type filename: string + :param module: The name of the module + :type module: string + """ + pass + afterImport._new = True + + def afterTest(self, test): + """Called after the test has been run and the result recorded + (after stopTest). + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + afterTest._new = True + + def beforeContext(self): + """Called before a context (generally a module) is + examined. Because the context is not yet loaded, plugins don't + get to know what the context is; so any context operations + should use a stack that is pushed in `beforeContext` and popped + in `afterContext` to ensure they operate symmetrically. + + `beforeContext` and `afterContext` are mainly useful for tracking + and restoring global state around possible changes from within a + context, whatever the context may be. If you need to operate on + contexts themselves, see `startContext` and `stopContext`, which + are passed the context in question, but are called after + it has been loaded (imported in the module case). + """ + pass + beforeContext._new = True + + def beforeDirectory(self, path): + """Called before tests are loaded from directory at path. + + :param path: the directory that is about to be processed + """ + pass + beforeDirectory._new = True + + def beforeImport(self, filename, module): + """Called before module is imported from filename. + + :param filename: The file that will be loaded + :param module: The name of the module found in file + :type module: string + """ + beforeImport._new = True + + def beforeTest(self, test): + """Called before the test is run (before startTest). + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + beforeTest._new = True + + def begin(self): + """Called before any tests are collected or run. Use this to + perform any setup needed before testing begins. + """ + pass + + def configure(self, options, conf): + """Called after the command line has been parsed, with the + parsed options and the config container. Here, implement any + config storage or changes to state or operation that are set + by command line options. + + DO NOT return a value from this method unless you want to + stop all other plugins from being configured. + """ + pass + + def finalize(self, result): + """Called after all report output, including output from all + plugins, has been sent to the stream. Use this to print final + test results or perform final cleanup. Return None to allow + other plugins to continue printing, or any other value to stop + them. + + :param result: test result object + + .. Note:: When tests are run under a test runner other than + :class:`nose.core.TextTestRunner`, such as + via ``python setup.py test``, this method may be called + **before** the default report output is sent. + """ + pass + + def describeTest(self, test): + """Return a test description. + + Called by :meth:`nose.case.Test.shortDescription`. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + describeTest._new = True + + def formatError(self, test, err): + """Called in result.addError, before plugin.addError. If you + want to replace or modify the error tuple, return a new error + tuple, otherwise return err, the original error tuple. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: sys.exc_info() tuple + :type err: 3-tuple + """ + pass + formatError._new = True + formatError.chainable = True + # test arg is not chainable + formatError.static_args = (True, False) + + def formatFailure(self, test, err): + """Called in result.addFailure, before plugin.addFailure. If you + want to replace or modify the error tuple, return a new error + tuple, otherwise return err, the original error tuple. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: sys.exc_info() tuple + :type err: 3-tuple + """ + pass + formatFailure._new = True + formatFailure.chainable = True + # test arg is not chainable + formatFailure.static_args = (True, False) + + def handleError(self, test, err): + """Called on addError. To handle the error yourself and prevent normal + error processing, return a true value. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: sys.exc_info() tuple + :type err: 3-tuple + """ + pass + handleError._new = True + + def handleFailure(self, test, err): + """Called on addFailure. To handle the failure yourself and + prevent normal failure processing, return a true value. + + :param test: the test case + :type test: :class:`nose.case.Test` + :param err: sys.exc_info() tuple + :type err: 3-tuple + """ + pass + handleFailure._new = True + + def loadTestsFromDir(self, path): + """Return iterable of tests from a directory. May be a + generator. Each item returned must be a runnable + unittest.TestCase (or subclass) instance or suite instance. + Return None if your plugin cannot collect any tests from + directory. + + :param path: The path to the directory. + """ + pass + loadTestsFromDir.generative = True + loadTestsFromDir._new = True + + def loadTestsFromModule(self, module, path=None): + """Return iterable of tests in a module. May be a + generator. Each item returned must be a runnable + unittest.TestCase (or subclass) instance. + Return None if your plugin cannot + collect any tests from module. + + :param module: The module object + :type module: python module + :param path: the path of the module to search, to distinguish from + namespace package modules + + .. note:: + + NEW. The ``path`` parameter will only be passed by nose 0.11 + or above. + """ + pass + loadTestsFromModule.generative = True + + def loadTestsFromName(self, name, module=None, importPath=None): + """Return tests in this file or module. Return None if you are not able + to load any tests, or an iterable if you are. May be a + generator. + + :param name: The test name. May be a file or module name plus a test + callable. Use split_test_name to split into parts. Or it might + be some crazy name of your own devising, in which case, do + whatever you want. + :param module: Module from which the name is to be loaded + :param importPath: Path from which file (must be a python module) was + found + + .. warning:: DEPRECATED: this argument will NOT be passed. + """ + pass + loadTestsFromName.generative = True + + def loadTestsFromNames(self, names, module=None): + """Return a tuple of (tests loaded, remaining names). Return + None if you are not able to load any tests. Multiple plugins + may implement loadTestsFromNames; the remaining name list from + each will be passed to the next as input. + + :param names: List of test names. + :type names: iterable + :param module: Module from which the names are to be loaded + """ + pass + loadTestsFromNames._new = True + loadTestsFromNames.chainable = True + + def loadTestsFromFile(self, filename): + """Return tests in this file. Return None if you are not + interested in loading any tests, or an iterable if you are and + can load some. May be a generator. *If you are interested in + loading tests from the file and encounter no errors, but find + no tests, yield False or return [False].* + + .. Note:: This method replaces loadTestsFromPath from the 0.9 + API. + + :param filename: The full path to the file or directory. + """ + pass + loadTestsFromFile.generative = True + loadTestsFromFile._new = True + + def loadTestsFromPath(self, path): + """ + .. warning:: DEPRECATED -- use loadTestsFromFile instead + """ + pass + loadTestsFromPath.deprecated = True + + def loadTestsFromTestCase(self, cls): + """Return tests in this test case class. Return None if you are + not able to load any tests, or an iterable if you are. May be a + generator. + + :param cls: The test case class. Must be subclass of + :class:`unittest.TestCase`. + """ + pass + loadTestsFromTestCase.generative = True + + def loadTestsFromTestClass(self, cls): + """Return tests in this test class. Class will *not* be a + unittest.TestCase subclass. Return None if you are not able to + load any tests, an iterable if you are. May be a generator. + + :param cls: The test case class. Must be **not** be subclass of + :class:`unittest.TestCase`. + """ + pass + loadTestsFromTestClass._new = True + loadTestsFromTestClass.generative = True + + def makeTest(self, obj, parent): + """Given an object and its parent, return or yield one or more + test cases. Each test must be a unittest.TestCase (or subclass) + instance. This is called before default test loading to allow + plugins to load an alternate test case or cases for an + object. May be a generator. + + :param obj: The object to be made into a test + :param parent: The parent of obj (eg, for a method, the class) + """ + pass + makeTest._new = True + makeTest.generative = True + + def options(self, parser, env): + """Called to allow plugin to register command line + options with the parser. + + DO NOT return a value from this method unless you want to stop + all other plugins from setting their options. + + :param parser: options parser instance + :type parser: :class:`ConfigParser.ConfigParser` + :param env: environment, default is os.environ + """ + pass + options._new = True + + def prepareTest(self, test): + """Called before the test is run by the test runner. Please + note the article *the* in the previous sentence: prepareTest + is called *only once*, and is passed the test case or test + suite that the test runner will execute. It is *not* called + for each individual test case. If you return a non-None value, + that return value will be run as the test. Use this hook to + wrap or decorate the test with another function. If you need + to modify or wrap individual test cases, use `prepareTestCase` + instead. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + + def prepareTestCase(self, test): + """Prepare or wrap an individual test case. Called before + execution of the test. The test passed here is a + nose.case.Test instance; the case to be executed is in the + test attribute of the passed case. To modify the test to be + run, you should return a callable that takes one argument (the + test result object) -- it is recommended that you *do not* + side-effect the nose.case.Test instance you have been passed. + + Keep in mind that when you replace the test callable you are + replacing the run() method of the test case -- including the + exception handling and result calls, etc. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + prepareTestCase._new = True + + def prepareTestLoader(self, loader): + """Called before tests are loaded. To replace the test loader, + return a test loader. To allow other plugins to process the + test loader, return None. Only one plugin may replace the test + loader. Only valid when using nose.TestProgram. + + :param loader: :class:`nose.loader.TestLoader` + (or other loader) instance + """ + pass + prepareTestLoader._new = True + + def prepareTestResult(self, result): + """Called before the first test is run. To use a different + test result handler for all tests than the given result, + return a test result handler. NOTE however that this handler + will only be seen by tests, that is, inside of the result + proxy system. The TestRunner and TestProgram -- whether nose's + or other -- will continue to see the original result + handler. For this reason, it is usually better to monkeypatch + the result (for instance, if you want to handle some + exceptions in a unique way). Only one plugin may replace the + result, but many may monkeypatch it. If you want to + monkeypatch and stop other plugins from doing so, monkeypatch + and return the patched result. + + :param result: :class:`nose.result.TextTestResult` + (or other result) instance + """ + pass + prepareTestResult._new = True + + def prepareTestRunner(self, runner): + """Called before tests are run. To replace the test runner, + return a test runner. To allow other plugins to process the + test runner, return None. Only valid when using nose.TestProgram. + + :param runner: :class:`nose.core.TextTestRunner` + (or other runner) instance + """ + pass + prepareTestRunner._new = True + + def report(self, stream): + """Called after all error output has been printed. Print your + plugin's report to the provided stream. Return None to allow + other plugins to print reports, any other value to stop them. + + :param stream: stream object; send your output here + :type stream: file-like object + """ + pass + + def setOutputStream(self, stream): + """Called before test output begins. To direct test output to a + new stream, return a stream object, which must implement a + `write(msg)` method. If you only want to note the stream, not + capture or redirect it, then return None. + + :param stream: stream object; send your output here + :type stream: file-like object + """ + + def startContext(self, context): + """Called before context setup and the running of tests in the + context. Note that tests have already been *loaded* from the + context before this call. + + :param context: the context about to be setup. May be a module or + class, or any other object that contains tests. + """ + pass + startContext._new = True + + def startTest(self, test): + """Called before each test is run. DO NOT return a value unless + you want to stop other plugins from seeing the test start. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + + def stopContext(self, context): + """Called after the tests in a context have run and the + context has been torn down. + + :param context: the context that has been torn down. May be a module or + class, or any other object that contains tests. + """ + pass + stopContext._new = True + + def stopTest(self, test): + """Called after each test is run. DO NOT return a value unless + you want to stop other plugins from seeing that the test has stopped. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + + def testName(self, test): + """Return a short test name. Called by `nose.case.Test.__str__`. + + :param test: the test case + :type test: :class:`nose.case.Test` + """ + pass + testName._new = True + + def wantClass(self, cls): + """Return true if you want the main test selector to collect + tests from this class, false if you don't, and None if you don't + care. + + :param cls: The class being examined by the selector + """ + pass + + def wantDirectory(self, dirname): + """Return true if you want test collection to descend into this + directory, false if you do not, and None if you don't care. + + :param dirname: Full path to directory being examined by the selector + """ + pass + + def wantFile(self, file): + """Return true if you want to collect tests from this file, + false if you do not and None if you don't care. + + Change from 0.9: The optional package parameter is no longer passed. + + :param file: Full path to file being examined by the selector + """ + pass + + def wantFunction(self, function): + """Return true to collect this function as a test, false to + prevent it from being collected, and None if you don't care. + + :param function: The function object being examined by the selector + """ + pass + + def wantMethod(self, method): + """Return true to collect this method as a test, false to + prevent it from being collected, and None if you don't care. + + :param method: The method object being examined by the selector + :type method: unbound method + """ + pass + + def wantModule(self, module): + """Return true if you want to collection to descend into this + module, false to prevent the collector from descending into the + module, and None if you don't care. + + :param module: The module object being examined by the selector + :type module: python module + """ + pass + + def wantModuleTests(self, module): + """ + .. warning:: DEPRECATED -- this method will not be called, it has + been folded into wantModule. + """ + pass + wantModuleTests.deprecated = True + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py b/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py new file mode 100755 index 00000000..4fcc0018 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py @@ -0,0 +1,34 @@ +""" +Lists builtin plugins. +""" +plugins = [] +builtins = ( + ('nose.plugins.attrib', 'AttributeSelector'), + ('nose.plugins.capture', 'Capture'), + ('nose.plugins.logcapture', 'LogCapture'), + ('nose.plugins.cover', 'Coverage'), + ('nose.plugins.debug', 'Pdb'), + ('nose.plugins.deprecated', 'Deprecated'), + ('nose.plugins.doctests', 'Doctest'), + ('nose.plugins.isolate', 'IsolationPlugin'), + ('nose.plugins.failuredetail', 'FailureDetail'), + ('nose.plugins.prof', 'Profile'), + ('nose.plugins.skip', 'Skip'), + ('nose.plugins.testid', 'TestId'), + ('nose.plugins.multiprocess', 'MultiProcess'), + ('nose.plugins.xunit', 'Xunit'), + ('nose.plugins.allmodules', 'AllModules'), + ('nose.plugins.collect', 'CollectOnly'), + ) + +for module, cls in builtins: + try: + plugmod = __import__(module, globals(), locals(), [cls]) + except KeyboardInterrupt: + raise + except: + continue + plug = getattr(plugmod, cls) + plugins.append(plug) + globals()[cls] = plug + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py b/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py new file mode 100755 index 00000000..fa4e5dca --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py @@ -0,0 +1,115 @@ +""" +This plugin captures stdout during test execution. If the test fails +or raises an error, the captured output will be appended to the error +or failure output. It is enabled by default but can be disabled with +the options ``-s`` or ``--nocapture``. + +:Options: + ``--nocapture`` + Don't capture stdout (any stdout output will be printed immediately) + +""" +import logging +import os +import sys +from nose.plugins.base import Plugin +from nose.pyversion import exc_to_unicode, force_unicode +from nose.util import ln +from StringIO import StringIO + + +log = logging.getLogger(__name__) + +class Capture(Plugin): + """ + Output capture plugin. Enabled by default. Disable with ``-s`` or + ``--nocapture``. This plugin captures stdout during test execution, + appending any output captured to the error or failure output, + should the test fail or raise an error. + """ + enabled = True + env_opt = 'NOSE_NOCAPTURE' + name = 'capture' + score = 1600 + + def __init__(self): + self.stdout = [] + self._buf = None + + def options(self, parser, env): + """Register commandline options + """ + parser.add_option( + "-s", "--nocapture", action="store_false", + default=not env.get(self.env_opt), dest="capture", + help="Don't capture stdout (any stdout output " + "will be printed immediately) [NOSE_NOCAPTURE]") + + def configure(self, options, conf): + """Configure plugin. Plugin is enabled by default. + """ + self.conf = conf + if not options.capture: + self.enabled = False + + def afterTest(self, test): + """Clear capture buffer. + """ + self.end() + self._buf = None + + def begin(self): + """Replace sys.stdout with capture buffer. + """ + self.start() # get an early handle on sys.stdout + + def beforeTest(self, test): + """Flush capture buffer. + """ + self.start() + + def formatError(self, test, err): + """Add captured output to error report. + """ + test.capturedOutput = output = self.buffer + self._buf = None + if not output: + # Don't return None as that will prevent other + # formatters from formatting and remove earlier formatters + # formats, instead return the err we got + return err + ec, ev, tb = err + return (ec, self.addCaptureToErr(ev, output), tb) + + def formatFailure(self, test, err): + """Add captured output to failure report. + """ + return self.formatError(test, err) + + def addCaptureToErr(self, ev, output): + ev = exc_to_unicode(ev) + output = force_unicode(output) + return u'\n'.join([ev, ln(u'>> begin captured stdout <<'), + output, ln(u'>> end captured stdout <<')]) + + def start(self): + self.stdout.append(sys.stdout) + self._buf = StringIO() + sys.stdout = self._buf + + def end(self): + if self.stdout: + sys.stdout = self.stdout.pop() + + def finalize(self, result): + """Restore stdout. + """ + while self.stdout: + self.end() + + def _get_buffer(self): + if self._buf is not None: + return self._buf.getvalue() + + buffer = property(_get_buffer, None, None, + """Captured stdout output.""") diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py b/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py new file mode 100755 index 00000000..6f9f0faa --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py @@ -0,0 +1,94 @@ +""" +This plugin bypasses the actual execution of tests, and instead just collects +test names. Fixtures are also bypassed, so running nosetests with the +collection plugin enabled should be very quick. + +This plugin is useful in combination with the testid plugin (``--with-id``). +Run both together to get an indexed list of all tests, which will enable you to +run individual tests by index number. + +This plugin is also useful for counting tests in a test suite, and making +people watching your demo think all of your tests pass. +""" +from nose.plugins.base import Plugin +from nose.case import Test +import logging +import unittest + +log = logging.getLogger(__name__) + + +class CollectOnly(Plugin): + """ + Collect and output test names only, don't run any tests. + """ + name = "collect-only" + enableOpt = 'collect_only' + + def options(self, parser, env): + """Register commandline options. + """ + parser.add_option('--collect-only', + action='store_true', + dest=self.enableOpt, + default=env.get('NOSE_COLLECT_ONLY'), + help="Enable collect-only: %s [COLLECT_ONLY]" % + (self.help())) + + def prepareTestLoader(self, loader): + """Install collect-only suite class in TestLoader. + """ + # Disable context awareness + log.debug("Preparing test loader") + loader.suiteClass = TestSuiteFactory(self.conf) + + def prepareTestCase(self, test): + """Replace actual test with dummy that always passes. + """ + # Return something that always passes + log.debug("Preparing test case %s", test) + if not isinstance(test, Test): + return + def run(result): + # We need to make these plugin calls because there won't be + # a result proxy, due to using a stripped-down test suite + self.conf.plugins.startTest(test) + result.startTest(test) + self.conf.plugins.addSuccess(test) + result.addSuccess(test) + self.conf.plugins.stopTest(test) + result.stopTest(test) + return run + + +class TestSuiteFactory: + """ + Factory for producing configured test suites. + """ + def __init__(self, conf): + self.conf = conf + + def __call__(self, tests=(), **kw): + return TestSuite(tests, conf=self.conf) + + +class TestSuite(unittest.TestSuite): + """ + Basic test suite that bypasses most proxy and plugin calls, but does + wrap tests in a nose.case.Test so prepareTestCase will be called. + """ + def __init__(self, tests=(), conf=None): + self.conf = conf + # Exec lazy suites: makes discovery depth-first + if callable(tests): + tests = tests() + log.debug("TestSuite(%r)", tests) + unittest.TestSuite.__init__(self, tests) + + def addTest(self, test): + log.debug("Add test %s", test) + if isinstance(test, unittest.TestSuite): + self._tests.append(test) + else: + self._tests.append(Test(test, config=self.conf)) + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py b/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py new file mode 100755 index 00000000..551f3320 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py @@ -0,0 +1,253 @@ +"""If you have Ned Batchelder's coverage_ module installed, you may activate a +coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE +environment variable. The coverage report will cover any python source module +imported after the start of the test run, excluding modules that match +testMatch. If you want to include those modules too, use the ``--cover-tests`` +switch, or set the NOSE_COVER_TESTS environment variable to a true value. To +restrict the coverage report to modules from a particular package or packages, +use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment +variable. + +.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html +""" +import logging +import re +import sys +import StringIO +from nose.plugins.base import Plugin +from nose.util import src, tolist + +log = logging.getLogger(__name__) + + +class Coverage(Plugin): + """ + Activate a coverage report using Ned Batchelder's coverage module. + """ + coverTests = False + coverPackages = None + coverInstance = None + coverErase = False + coverMinPercentage = None + score = 200 + status = {} + + def options(self, parser, env): + """ + Add options to command line. + """ + super(Coverage, self).options(parser, env) + parser.add_option("--cover-package", action="append", + default=env.get('NOSE_COVER_PACKAGE'), + metavar="PACKAGE", + dest="cover_packages", + help="Restrict coverage output to selected packages " + "[NOSE_COVER_PACKAGE]") + parser.add_option("--cover-erase", action="store_true", + default=env.get('NOSE_COVER_ERASE'), + dest="cover_erase", + help="Erase previously collected coverage " + "statistics before run") + parser.add_option("--cover-tests", action="store_true", + dest="cover_tests", + default=env.get('NOSE_COVER_TESTS'), + help="Include test modules in coverage report " + "[NOSE_COVER_TESTS]") + parser.add_option("--cover-min-percentage", action="store", + dest="cover_min_percentage", + default=env.get('NOSE_COVER_MIN_PERCENTAGE'), + help="Minimum percentage of coverage for tests " + "to pass [NOSE_COVER_MIN_PERCENTAGE]") + parser.add_option("--cover-inclusive", action="store_true", + dest="cover_inclusive", + default=env.get('NOSE_COVER_INCLUSIVE'), + help="Include all python files under working " + "directory in coverage report. Useful for " + "discovering holes in test coverage if not all " + "files are imported by the test suite. " + "[NOSE_COVER_INCLUSIVE]") + parser.add_option("--cover-html", action="store_true", + default=env.get('NOSE_COVER_HTML'), + dest='cover_html', + help="Produce HTML coverage information") + parser.add_option('--cover-html-dir', action='store', + default=env.get('NOSE_COVER_HTML_DIR', 'cover'), + dest='cover_html_dir', + metavar='DIR', + help='Produce HTML coverage information in dir') + parser.add_option("--cover-branches", action="store_true", + default=env.get('NOSE_COVER_BRANCHES'), + dest="cover_branches", + help="Include branch coverage in coverage report " + "[NOSE_COVER_BRANCHES]") + parser.add_option("--cover-xml", action="store_true", + default=env.get('NOSE_COVER_XML'), + dest="cover_xml", + help="Produce XML coverage information") + parser.add_option("--cover-xml-file", action="store", + default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'), + dest="cover_xml_file", + metavar="FILE", + help="Produce XML coverage information in file") + + def configure(self, options, conf): + """ + Configure plugin. + """ + try: + self.status.pop('active') + except KeyError: + pass + super(Coverage, self).configure(options, conf) + if conf.worker: + return + if self.enabled: + try: + import coverage + if not hasattr(coverage, 'coverage'): + raise ImportError("Unable to import coverage module") + except ImportError: + log.error("Coverage not available: " + "unable to import coverage module") + self.enabled = False + return + self.conf = conf + self.coverErase = options.cover_erase + self.coverTests = options.cover_tests + self.coverPackages = [] + if options.cover_packages: + if isinstance(options.cover_packages, (list, tuple)): + cover_packages = options.cover_packages + else: + cover_packages = [options.cover_packages] + for pkgs in [tolist(x) for x in cover_packages]: + self.coverPackages.extend(pkgs) + self.coverInclusive = options.cover_inclusive + if self.coverPackages: + log.info("Coverage report will include only packages: %s", + self.coverPackages) + self.coverHtmlDir = None + if options.cover_html: + self.coverHtmlDir = options.cover_html_dir + log.debug('Will put HTML coverage report in %s', self.coverHtmlDir) + self.coverBranches = options.cover_branches + self.coverXmlFile = None + if options.cover_min_percentage: + self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%')) + if options.cover_xml: + self.coverXmlFile = options.cover_xml_file + log.debug('Will put XML coverage report in %s', self.coverXmlFile) + if self.enabled: + self.status['active'] = True + self.coverInstance = coverage.coverage(auto_data=False, + branch=self.coverBranches, data_suffix=None, + source=self.coverPackages) + + def begin(self): + """ + Begin recording coverage information. + """ + log.debug("Coverage begin") + self.skipModules = sys.modules.keys()[:] + if self.coverErase: + log.debug("Clearing previously collected coverage statistics") + self.coverInstance.combine() + self.coverInstance.erase() + self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') + self.coverInstance.load() + self.coverInstance.start() + + def report(self, stream): + """ + Output code coverage report. + """ + log.debug("Coverage report") + self.coverInstance.stop() + self.coverInstance.combine() + self.coverInstance.save() + modules = [module + for name, module in sys.modules.items() + if self.wantModuleCoverage(name, module)] + log.debug("Coverage report will cover modules: %s", modules) + self.coverInstance.report(modules, file=stream) + + import coverage + if self.coverHtmlDir: + log.debug("Generating HTML coverage report") + try: + self.coverInstance.html_report(modules, self.coverHtmlDir) + except coverage.misc.CoverageException, e: + log.warning("Failed to generate HTML report: %s" % str(e)) + + if self.coverXmlFile: + log.debug("Generating XML coverage report") + try: + self.coverInstance.xml_report(modules, self.coverXmlFile) + except coverage.misc.CoverageException, e: + log.warning("Failed to generate XML report: %s" % str(e)) + + # make sure we have minimum required coverage + if self.coverMinPercentage: + f = StringIO.StringIO() + self.coverInstance.report(modules, file=f) + + multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?' + r'\s+(\d+)%\s+\d*\s{0,1}$') + singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?' + r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$') + + m = re.search(multiPackageRe, f.getvalue()) + if m is None: + m = re.search(singlePackageRe, f.getvalue()) + + if m: + percentage = int(m.groups()[0]) + if percentage < self.coverMinPercentage: + log.error('TOTAL Coverage did not reach minimum ' + 'required: %d%%' % self.coverMinPercentage) + sys.exit(1) + else: + log.error("No total percentage was found in coverage output, " + "something went wrong.") + + + def wantModuleCoverage(self, name, module): + if not hasattr(module, '__file__'): + log.debug("no coverage of %s: no __file__", name) + return False + module_file = src(module.__file__) + if not module_file or not module_file.endswith('.py'): + log.debug("no coverage of %s: not a python file", name) + return False + if self.coverPackages: + for package in self.coverPackages: + if (re.findall(r'^%s\b' % re.escape(package), name) + and (self.coverTests + or not self.conf.testMatch.search(name))): + log.debug("coverage for %s", name) + return True + if name in self.skipModules: + log.debug("no coverage for %s: loaded before coverage start", + name) + return False + if self.conf.testMatch.search(name) and not self.coverTests: + log.debug("no coverage for %s: is a test", name) + return False + # accept any package that passed the previous tests, unless + # coverPackages is on -- in that case, if we wanted this + # module, we would have already returned True + return not self.coverPackages + + def wantFile(self, file, package=None): + """If inclusive coverage enabled, return true for all source files + in wanted packages. + """ + if self.coverInclusive: + if file.endswith(".py"): + if package and self.coverPackages: + for want in self.coverPackages: + if package.startswith(want): + return True + else: + return True + return None diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py b/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py new file mode 100755 index 00000000..78243e60 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py @@ -0,0 +1,67 @@ +""" +This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb`` +option will drop the test runner into pdb when it encounters an error. To +drop into pdb on failure, use ``--pdb-failures``. +""" + +import pdb +from nose.plugins.base import Plugin + +class Pdb(Plugin): + """ + Provides --pdb and --pdb-failures options that cause the test runner to + drop into pdb if it encounters an error or failure, respectively. + """ + enabled_for_errors = False + enabled_for_failures = False + score = 5 # run last, among builtins + + def options(self, parser, env): + """Register commandline options. + """ + parser.add_option( + "--pdb", action="store_true", dest="debugBoth", + default=env.get('NOSE_PDB', False), + help="Drop into debugger on failures or errors") + parser.add_option( + "--pdb-failures", action="store_true", + dest="debugFailures", + default=env.get('NOSE_PDB_FAILURES', False), + help="Drop into debugger on failures") + parser.add_option( + "--pdb-errors", action="store_true", + dest="debugErrors", + default=env.get('NOSE_PDB_ERRORS', False), + help="Drop into debugger on errors") + + def configure(self, options, conf): + """Configure which kinds of exceptions trigger plugin. + """ + self.conf = conf + self.enabled_for_errors = options.debugErrors or options.debugBoth + self.enabled_for_failures = options.debugFailures or options.debugBoth + self.enabled = self.enabled_for_failures or self.enabled_for_errors + + def addError(self, test, err): + """Enter pdb if configured to debug errors. + """ + if not self.enabled_for_errors: + return + self.debug(err) + + def addFailure(self, test, err): + """Enter pdb if configured to debug failures. + """ + if not self.enabled_for_failures: + return + self.debug(err) + + def debug(self, err): + import sys # FIXME why is this import here? + ec, ev, tb = err + stdout = sys.stdout + sys.stdout = sys.__stdout__ + try: + pdb.post_mortem(tb) + finally: + sys.stdout = stdout diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py b/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py new file mode 100755 index 00000000..461a26be --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py @@ -0,0 +1,45 @@ +""" +This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest` +exception. When :class:`DeprecatedTest` is raised, the exception will be logged +in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose) +will be output, and the exception will not be counted as an error or failure. +It is enabled by default, but can be turned off by using ``--no-deprecated``. +""" + +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin + + +class DeprecatedTest(Exception): + """Raise this exception to mark a test as deprecated. + """ + pass + + +class Deprecated(ErrorClassPlugin): + """ + Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled + by default. + """ + enabled = True + deprecated = ErrorClass(DeprecatedTest, + label='DEPRECATED', + isfailure=False) + + def options(self, parser, env): + """Register commandline options. + """ + env_opt = 'NOSE_WITHOUT_DEPRECATED' + parser.add_option('--no-deprecated', action='store_true', + dest='noDeprecated', default=env.get(env_opt, False), + help="Disable special handling of DeprecatedTest " + "exceptions.") + + def configure(self, options, conf): + """Configure plugin. + """ + if not self.can_configure: + return + self.conf = conf + disable = getattr(options, 'noDeprecated', False) + if disable: + self.enabled = False diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py b/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py new file mode 100755 index 00000000..5ef65799 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py @@ -0,0 +1,455 @@ +"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST +environment variable to enable collection and execution of :mod:`doctests +<doctest>`. Because doctests are usually included in the tested package +(instead of being grouped into packages or modules of their own), nose only +looks for them in the non-test packages it discovers in the working directory. + +Doctests may also be placed into files other than python modules, in which +case they can be collected and executed by using the ``--doctest-extension`` +switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file +extension(s) to load. + +When loading doctests from non-module files, use the ``--doctest-fixtures`` +switch to specify how to find modules containing fixtures for the tests. A +module name will be produced by appending the value of that switch to the base +name of each doctest file loaded. For example, a doctest file "widgets.rst" +with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module +``widgets_fixt.py``. + +A fixtures module may define any or all of the following functions: + +* setup([module]) or setup_module([module]) + + Called before the test runs. You may raise SkipTest to skip all tests. + +* teardown([module]) or teardown_module([module]) + + Called after the test runs, if setup/setup_module did not raise an + unhandled exception. + +* setup_test(test) + + Called before the test. NOTE: the argument passed is a + doctest.DocTest instance, *not* a unittest.TestCase. + +* teardown_test(test) + + Called after the test, if setup_test did not raise an exception. NOTE: the + argument passed is a doctest.DocTest instance, *not* a unittest.TestCase. + +Doctests are run like any other test, with the exception that output +capture does not work; doctest does its own output capture while running a +test. + +.. note :: + + See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for + additional documentation and examples. + +""" +from __future__ import generators + +import logging +import os +import sys +import unittest +from inspect import getmodule +from nose.plugins.base import Plugin +from nose.suite import ContextList +from nose.util import anyp, getpackage, test_address, resolve_name, \ + src, tolist, isproperty +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO +import sys +import __builtin__ as builtin_mod + +log = logging.getLogger(__name__) + +try: + import doctest + doctest.DocTestCase + # system version of doctest is acceptable, but needs a monkeypatch +except (ImportError, AttributeError): + # system version is too old + import nose.ext.dtcompat as doctest + + +# +# Doctest and coverage don't get along, so we need to create +# a monkeypatch that will replace the part of doctest that +# interferes with coverage reports. +# +# The monkeypatch is based on this zope patch: +# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705 +# +_orp = doctest._OutputRedirectingPdb + +class NoseOutputRedirectingPdb(_orp): + def __init__(self, out): + self.__debugger_used = False + _orp.__init__(self, out) + + def set_trace(self): + self.__debugger_used = True + _orp.set_trace(self, sys._getframe().f_back) + + def set_continue(self): + # Calling set_continue unconditionally would break unit test + # coverage reporting, as Bdb.set_continue calls sys.settrace(None). + if self.__debugger_used: + _orp.set_continue(self) +doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb + + +class DoctestSuite(unittest.TestSuite): + """ + Doctest suites are parallelizable at the module or file level only, + since they may be attached to objects that are not individually + addressable (like properties). This suite subclass is used when + loading doctests from a module to ensure that behavior. + + This class is used only if the plugin is not fully prepared; + in normal use, the loader's suiteClass is used. + + """ + can_split = False + + def __init__(self, tests=(), context=None, can_split=False): + self.context = context + self.can_split = can_split + unittest.TestSuite.__init__(self, tests=tests) + + def address(self): + return test_address(self.context) + + def __iter__(self): + # 2.3 compat + return iter(self._tests) + + def __str__(self): + return str(self._tests) + + +class Doctest(Plugin): + """ + Activate doctest plugin to find and run doctests in non-test modules. + """ + extension = None + suiteClass = DoctestSuite + + def options(self, parser, env): + """Register commmandline options. + """ + Plugin.options(self, parser, env) + parser.add_option('--doctest-tests', action='store_true', + dest='doctest_tests', + default=env.get('NOSE_DOCTEST_TESTS'), + help="Also look for doctests in test modules. " + "Note that classes, methods and functions should " + "have either doctests or non-doctest tests, " + "not both. [NOSE_DOCTEST_TESTS]") + parser.add_option('--doctest-extension', action="append", + dest="doctestExtension", + metavar="EXT", + help="Also look for doctests in files with " + "this extension [NOSE_DOCTEST_EXTENSION]") + parser.add_option('--doctest-result-variable', + dest='doctest_result_var', + default=env.get('NOSE_DOCTEST_RESULT_VAR'), + metavar="VAR", + help="Change the variable name set to the result of " + "the last interpreter command from the default '_'. " + "Can be used to avoid conflicts with the _() " + "function used for text translation. " + "[NOSE_DOCTEST_RESULT_VAR]") + parser.add_option('--doctest-fixtures', action="store", + dest="doctestFixtures", + metavar="SUFFIX", + help="Find fixtures for a doctest file in module " + "with this name appended to the base name " + "of the doctest file") + parser.add_option('--doctest-options', action="append", + dest="doctestOptions", + metavar="OPTIONS", + help="Specify options to pass to doctest. " + + "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'") + # Set the default as a list, if given in env; otherwise + # an additional value set on the command line will cause + # an error. + env_setting = env.get('NOSE_DOCTEST_EXTENSION') + if env_setting is not None: + parser.set_defaults(doctestExtension=tolist(env_setting)) + + def configure(self, options, config): + """Configure plugin. + """ + Plugin.configure(self, options, config) + self.doctest_result_var = options.doctest_result_var + self.doctest_tests = options.doctest_tests + self.extension = tolist(options.doctestExtension) + self.fixtures = options.doctestFixtures + self.finder = doctest.DocTestFinder() + self.optionflags = 0 + if options.doctestOptions: + flags = ",".join(options.doctestOptions).split(',') + for flag in flags: + if not flag or flag[0] not in '+-': + raise ValueError( + "Must specify doctest options with starting " + + "'+' or '-'. Got %s" % (flag,)) + mode, option_name = flag[0], flag[1:] + option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name) + if not option_flag: + raise ValueError("Unknown doctest option %s" % + (option_name,)) + if mode == '+': + self.optionflags |= option_flag + elif mode == '-': + self.optionflags &= ~option_flag + + def prepareTestLoader(self, loader): + """Capture loader's suiteClass. + + This is used to create test suites from doctest files. + + """ + self.suiteClass = loader.suiteClass + + def loadTestsFromModule(self, module): + """Load doctests from the module. + """ + log.debug("loading from %s", module) + if not self.matches(module.__name__): + log.debug("Doctest doesn't want module %s", module) + return + try: + tests = self.finder.find(module) + except AttributeError: + log.exception("Attribute error loading from %s", module) + # nose allows module.__test__ = False; doctest does not and throws + # AttributeError + return + if not tests: + log.debug("No tests found in %s", module) + return + tests.sort() + module_file = src(module.__file__) + # FIXME this breaks the id plugin somehow (tests probably don't + # get wrapped in result proxy or something) + cases = [] + for test in tests: + if not test.examples: + continue + if not test.filename: + test.filename = module_file + cases.append(DocTestCase(test, + optionflags=self.optionflags, + result_var=self.doctest_result_var)) + if cases: + yield self.suiteClass(cases, context=module, can_split=False) + + def loadTestsFromFile(self, filename): + """Load doctests from the file. + + Tests are loaded only if filename's extension matches + configured doctest extension. + + """ + if self.extension and anyp(filename.endswith, self.extension): + name = os.path.basename(filename) + dh = open(filename) + try: + doc = dh.read() + finally: + dh.close() + + fixture_context = None + globs = {'__file__': filename} + if self.fixtures: + base, ext = os.path.splitext(name) + dirname = os.path.dirname(filename) + sys.path.append(dirname) + fixt_mod = base + self.fixtures + try: + fixture_context = __import__( + fixt_mod, globals(), locals(), ["nop"]) + except ImportError, e: + log.debug( + "Could not import %s: %s (%s)", fixt_mod, e, sys.path) + log.debug("Fixture module %s resolved to %s", + fixt_mod, fixture_context) + if hasattr(fixture_context, 'globs'): + globs = fixture_context.globs(globs) + parser = doctest.DocTestParser() + test = parser.get_doctest( + doc, globs=globs, name=name, + filename=filename, lineno=0) + if test.examples: + case = DocFileCase( + test, + optionflags=self.optionflags, + setUp=getattr(fixture_context, 'setup_test', None), + tearDown=getattr(fixture_context, 'teardown_test', None), + result_var=self.doctest_result_var) + if fixture_context: + yield ContextList((case,), context=fixture_context) + else: + yield case + else: + yield False # no tests to load + + def makeTest(self, obj, parent): + """Look for doctests in the given object, which will be a + function, method or class. + """ + name = getattr(obj, '__name__', 'Unnammed %s' % type(obj)) + doctests = self.finder.find(obj, module=getmodule(parent), name=name) + if doctests: + for test in doctests: + if len(test.examples) == 0: + continue + yield DocTestCase(test, obj=obj, optionflags=self.optionflags, + result_var=self.doctest_result_var) + + def matches(self, name): + # FIXME this seems wrong -- nothing is ever going to + # fail this test, since we're given a module NAME not FILE + if name == '__init__.py': + return False + # FIXME don't think we need include/exclude checks here? + return ((self.doctest_tests or not self.conf.testMatch.search(name) + or (self.conf.include + and filter(None, + [inc.search(name) + for inc in self.conf.include]))) + and (not self.conf.exclude + or not filter(None, + [exc.search(name) + for exc in self.conf.exclude]))) + + def wantFile(self, file): + """Override to select all modules and any file ending with + configured doctest extension. + """ + # always want .py files + if file.endswith('.py'): + return True + # also want files that match my extension + if (self.extension + and anyp(file.endswith, self.extension) + and (not self.conf.exclude + or not filter(None, + [exc.search(file) + for exc in self.conf.exclude]))): + return True + return None + + +class DocTestCase(doctest.DocTestCase): + """Overrides DocTestCase to + provide an address() method that returns the correct address for + the doctest case. To provide hints for address(), an obj may also + be passed -- this will be used as the test object for purposes of + determining the test address, if it is provided. + """ + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None, obj=None, result_var='_'): + self._result_var = result_var + self._nose_obj = obj + super(DocTestCase, self).__init__( + test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, + checker=checker) + + def address(self): + if self._nose_obj is not None: + return test_address(self._nose_obj) + obj = resolve_name(self._dt_test.name) + + if isproperty(obj): + # properties have no connection to the class they are in + # so we can't just look 'em up, we have to first look up + # the class, then stick the prop on the end + parts = self._dt_test.name.split('.') + class_name = '.'.join(parts[:-1]) + cls = resolve_name(class_name) + base_addr = test_address(cls) + return (base_addr[0], base_addr[1], + '.'.join([base_addr[2], parts[-1]])) + else: + return test_address(obj) + + # doctests loaded via find(obj) omit the module name + # so we need to override id, __repr__ and shortDescription + # bonus: this will squash a 2.3 vs 2.4 incompatiblity + def id(self): + name = self._dt_test.name + filename = self._dt_test.filename + if filename is not None: + pk = getpackage(filename) + if pk is None: + return name + if not name.startswith(pk): + name = "%s.%s" % (pk, name) + return name + + def __repr__(self): + name = self.id() + name = name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + __str__ = __repr__ + + def shortDescription(self): + return 'Doctest: %s' % self.id() + + def setUp(self): + if self._result_var is not None: + self._old_displayhook = sys.displayhook + sys.displayhook = self._displayhook + super(DocTestCase, self).setUp() + + def _displayhook(self, value): + if value is None: + return + setattr(builtin_mod, self._result_var, value) + print repr(value) + + def tearDown(self): + super(DocTestCase, self).tearDown() + if self._result_var is not None: + sys.displayhook = self._old_displayhook + delattr(builtin_mod, self._result_var) + + +class DocFileCase(doctest.DocFileCase): + """Overrides to provide address() method that returns the correct + address for the doc file case. + """ + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None, result_var='_'): + self._result_var = result_var + super(DocFileCase, self).__init__( + test, optionflags=optionflags, setUp=setUp, tearDown=tearDown, + checker=None) + + def address(self): + return (self._dt_test.filename, None, None) + + def setUp(self): + if self._result_var is not None: + self._old_displayhook = sys.displayhook + sys.displayhook = self._displayhook + super(DocFileCase, self).setUp() + + def _displayhook(self, value): + if value is None: + return + setattr(builtin_mod, self._result_var, value) + print repr(value) + + def tearDown(self): + super(DocFileCase, self).tearDown() + if self._result_var is not None: + sys.displayhook = self._old_displayhook + delattr(builtin_mod, self._result_var) diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py b/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py new file mode 100755 index 00000000..d1540e00 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py @@ -0,0 +1,210 @@ +""" +ErrorClass Plugins +------------------ + +ErrorClass plugins provide an easy way to add support for custom +handling of particular classes of exceptions. + +An ErrorClass plugin defines one or more ErrorClasses and how each is +handled and reported on. Each error class is stored in a different +attribute on the result, and reported separately. Each error class must +indicate the exceptions that fall under that class, the label to use +for reporting, and whether exceptions of the class should be +considered as failures for the whole test run. + +ErrorClasses use a declarative syntax. Assign an ErrorClass to the +attribute you wish to add to the result object, defining the +exceptions, label and isfailure attributes. For example, to declare an +ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError) +as an error class with the label 'TODO' that is considered a failure, +do this: + + >>> class Todo(Exception): + ... pass + >>> class TodoError(ErrorClassPlugin): + ... todo = ErrorClass(Todo, label='TODO', isfailure=True) + +The MetaErrorClass metaclass translates the ErrorClass declarations +into the tuples used by the error handling and reporting functions in +the result. This is an internal format and subject to change; you +should always use the declarative syntax for attaching ErrorClasses to +an ErrorClass plugin. + + >>> TodoError.errorClasses # doctest: +ELLIPSIS + ((<class ...Todo...>, ('todo', 'TODO', True)),) + +Let's see the plugin in action. First some boilerplate. + + >>> import sys + >>> import unittest + >>> try: + ... # 2.7+ + ... from unittest.runner import _WritelnDecorator + ... except ImportError: + ... from unittest import _WritelnDecorator + ... + >>> buf = _WritelnDecorator(sys.stdout) + +Now define a test case that raises a Todo. + + >>> class TestTodo(unittest.TestCase): + ... def runTest(self): + ... raise Todo("I need to test something") + >>> case = TestTodo() + +Prepare the result using our plugin. Normally this happens during the +course of test execution within nose -- you won't be doing this +yourself. For the purposes of this testing document, I'm stepping +through the internal process of nose so you can see what happens at +each step. + + >>> plugin = TodoError() + >>> from nose.result import _TextTestResult + >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2) + >>> plugin.prepareTestResult(result) + +Now run the test. TODO is printed. + + >>> _ = case(result) # doctest: +ELLIPSIS + runTest (....TestTodo) ... TODO: I need to test something + +Errors and failures are empty, but todo has our test: + + >>> result.errors + [] + >>> result.failures + [] + >>> result.todo # doctest: +ELLIPSIS + [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')] + >>> result.printErrors() # doctest: +ELLIPSIS + <BLANKLINE> + ====================================================================== + TODO: runTest (....TestTodo) + ---------------------------------------------------------------------- + Traceback (most recent call last): + ... + ...Todo: I need to test something + <BLANKLINE> + +Since we defined a Todo as a failure, the run was not successful. + + >>> result.wasSuccessful() + False +""" + +from nose.pyversion import make_instancemethod +from nose.plugins.base import Plugin +from nose.result import TextTestResult +from nose.util import isclass + +class MetaErrorClass(type): + """Metaclass for ErrorClassPlugins that allows error classes to be + set up in a declarative manner. + """ + def __init__(self, name, bases, attr): + errorClasses = [] + for name, detail in attr.items(): + if isinstance(detail, ErrorClass): + attr.pop(name) + for cls in detail: + errorClasses.append( + (cls, (name, detail.label, detail.isfailure))) + super(MetaErrorClass, self).__init__(name, bases, attr) + self.errorClasses = tuple(errorClasses) + + +class ErrorClass(object): + def __init__(self, *errorClasses, **kw): + self.errorClasses = errorClasses + try: + for key in ('label', 'isfailure'): + setattr(self, key, kw.pop(key)) + except KeyError: + raise TypeError("%r is a required named argument for ErrorClass" + % key) + + def __iter__(self): + return iter(self.errorClasses) + + +class ErrorClassPlugin(Plugin): + """ + Base class for ErrorClass plugins. Subclass this class and declare the + exceptions that you wish to handle as attributes of the subclass. + """ + __metaclass__ = MetaErrorClass + score = 1000 + errorClasses = () + + def addError(self, test, err): + err_cls, a, b = err + if not isclass(err_cls): + return + classes = [e[0] for e in self.errorClasses] + if filter(lambda c: issubclass(err_cls, c), classes): + return True + + def prepareTestResult(self, result): + if not hasattr(result, 'errorClasses'): + self.patchResult(result) + for cls, (storage_attr, label, isfail) in self.errorClasses: + if cls not in result.errorClasses: + storage = getattr(result, storage_attr, []) + setattr(result, storage_attr, storage) + result.errorClasses[cls] = (storage, label, isfail) + + def patchResult(self, result): + result.printLabel = print_label_patch(result) + result._orig_addError, result.addError = \ + result.addError, add_error_patch(result) + result._orig_wasSuccessful, result.wasSuccessful = \ + result.wasSuccessful, wassuccessful_patch(result) + if hasattr(result, 'printErrors'): + result._orig_printErrors, result.printErrors = \ + result.printErrors, print_errors_patch(result) + if hasattr(result, 'addSkip'): + result._orig_addSkip, result.addSkip = \ + result.addSkip, add_skip_patch(result) + result.errorClasses = {} + + +def add_error_patch(result): + """Create a new addError method to patch into a result instance + that recognizes the errorClasses attribute and deals with + errorclasses correctly. + """ + return make_instancemethod(TextTestResult.addError, result) + + +def print_errors_patch(result): + """Create a new printErrors method that prints errorClasses items + as well. + """ + return make_instancemethod(TextTestResult.printErrors, result) + + +def print_label_patch(result): + """Create a new printLabel method that prints errorClasses items + as well. + """ + return make_instancemethod(TextTestResult.printLabel, result) + + +def wassuccessful_patch(result): + """Create a new wasSuccessful method that checks errorClasses for + exceptions that were put into other slots than error or failure + but that still count as not success. + """ + return make_instancemethod(TextTestResult.wasSuccessful, result) + + +def add_skip_patch(result): + """Create a new addSkip method to patch into a result instance + that delegates to addError. + """ + return make_instancemethod(TextTestResult.addSkip, result) + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py b/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py new file mode 100755 index 00000000..6462865d --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py @@ -0,0 +1,49 @@ +""" +This plugin provides assert introspection. When the plugin is enabled +and a test failure occurs, the traceback is displayed with extra context +around the line in which the exception was raised. Simple variable +substitution is also performed in the context output to provide more +debugging information. +""" + +from nose.plugins import Plugin +from nose.pyversion import exc_to_unicode, force_unicode +from nose.inspector import inspect_traceback + +class FailureDetail(Plugin): + """ + Plugin that provides extra information in tracebacks of test failures. + """ + score = 1600 # before capture + + def options(self, parser, env): + """Register commmandline options. + """ + parser.add_option( + "-d", "--detailed-errors", "--failure-detail", + action="store_true", + default=env.get('NOSE_DETAILED_ERRORS'), + dest="detailedErrors", help="Add detail to error" + " output by attempting to evaluate failed" + " asserts [NOSE_DETAILED_ERRORS]") + + def configure(self, options, conf): + """Configure plugin. + """ + if not self.can_configure: + return + self.enabled = options.detailedErrors + self.conf = conf + + def formatFailure(self, test, err): + """Add detail from traceback inspection to error message of a failure. + """ + ec, ev, tb = err + tbinfo, str_ev = None, exc_to_unicode(ev) + + if tb: + tbinfo = force_unicode(inspect_traceback(tb)) + str_ev = '\n'.join([str_ev, tbinfo]) + test.tbinfo = tbinfo + return (ec, str_ev, tb) + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py b/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py new file mode 100755 index 00000000..13235dfb --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py @@ -0,0 +1,103 @@ +"""The isolation plugin resets the contents of sys.modules after running +each test module or package. Use it by setting ``--with-isolation`` or the +NOSE_WITH_ISOLATION environment variable. + +The effects are similar to wrapping the following functions around the +import and execution of each test module:: + + def setup(module): + module._mods = sys.modules.copy() + + def teardown(module): + to_del = [ m for m in sys.modules.keys() if m not in + module._mods ] + for mod in to_del: + del sys.modules[mod] + sys.modules.update(module._mods) + +Isolation works only during lazy loading. In normal use, this is only +during discovery of modules within a directory, where the process of +importing, loading tests and running tests from each module is +encapsulated in a single loadTestsFromName call. This plugin +implements loadTestsFromNames to force the same lazy-loading there, +which allows isolation to work in directed mode as well as discovery, +at the cost of some efficiency: lazy-loading names forces full context +setup and teardown to run for each name, defeating the grouping that +is normally used to ensure that context setup and teardown are run the +fewest possible times for a given set of names. + +.. warning :: + + This plugin should not be used in conjunction with other plugins + that assume that modules, once imported, will stay imported; for + instance, it may cause very odd results when used with the coverage + plugin. + +""" + +import logging +import sys + +from nose.plugins import Plugin + + +log = logging.getLogger('nose.plugins.isolation') + +class IsolationPlugin(Plugin): + """ + Activate the isolation plugin to isolate changes to external + modules to a single test module or package. The isolation plugin + resets the contents of sys.modules after each test module or + package runs to its state before the test. PLEASE NOTE that this + plugin should not be used with the coverage plugin, or in any other case + where module reloading may produce undesirable side-effects. + """ + score = 10 # I want to be last + name = 'isolation' + + def configure(self, options, conf): + """Configure plugin. + """ + Plugin.configure(self, options, conf) + self._mod_stack = [] + + def beforeContext(self): + """Copy sys.modules onto my mod stack + """ + mods = sys.modules.copy() + self._mod_stack.append(mods) + + def afterContext(self): + """Pop my mod stack and restore sys.modules to the state + it was in when mod stack was pushed. + """ + mods = self._mod_stack.pop() + to_del = [ m for m in sys.modules.keys() if m not in mods ] + if to_del: + log.debug('removing sys modules entries: %s', to_del) + for mod in to_del: + del sys.modules[mod] + sys.modules.update(mods) + + def loadTestsFromNames(self, names, module=None): + """Create a lazy suite that calls beforeContext and afterContext + around each name. The side-effect of this is that full context + fixtures will be set up and torn down around each test named. + """ + # Fast path for when we don't care + if not names or len(names) == 1: + return + loader = self.loader + plugins = self.conf.plugins + def lazy(): + for name in names: + plugins.beforeContext() + yield loader.loadTestsFromName(name, module=module) + plugins.afterContext() + return (loader.suiteClass(lazy), []) + + def prepareTestLoader(self, loader): + """Get handle on test loader so we can use it in loadTestsFromNames. + """ + self.loader = loader + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py b/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py new file mode 100755 index 00000000..4c9a79f6 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py @@ -0,0 +1,245 @@ +""" +This plugin captures logging statements issued during test execution. When an +error or failure occurs, the captured log messages are attached to the running +test in the test.capturedLogging attribute, and displayed with the error failure +output. It is enabled by default but can be turned off with the option +``--nologcapture``. + +You can filter captured logging statements with the ``--logging-filter`` option. +If set, it specifies which logger(s) will be captured; loggers that do not match +will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp`` +will ensure that only statements logged via sqlalchemy.engine, myapp +or myapp.foo.bar logger will be logged. + +You can remove other installed logging handlers with the +``--logging-clear-handlers`` option. +""" + +import logging +from logging import Handler +import threading + +from nose.plugins.base import Plugin +from nose.util import anyp, ln, safe_str + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +log = logging.getLogger(__name__) + +class FilterSet(object): + def __init__(self, filter_components): + self.inclusive, self.exclusive = self._partition(filter_components) + + # @staticmethod + def _partition(components): + inclusive, exclusive = [], [] + for component in components: + if component.startswith('-'): + exclusive.append(component[1:]) + else: + inclusive.append(component) + return inclusive, exclusive + _partition = staticmethod(_partition) + + def allow(self, record): + """returns whether this record should be printed""" + if not self: + # nothing to filter + return True + return self._allow(record) and not self._deny(record) + + # @staticmethod + def _any_match(matchers, record): + """return the bool of whether `record` starts with + any item in `matchers`""" + def record_matches_key(key): + return record == key or record.startswith(key + '.') + return anyp(bool, map(record_matches_key, matchers)) + _any_match = staticmethod(_any_match) + + def _allow(self, record): + if not self.inclusive: + return True + return self._any_match(self.inclusive, record) + + def _deny(self, record): + if not self.exclusive: + return False + return self._any_match(self.exclusive, record) + + +class MyMemoryHandler(Handler): + def __init__(self, logformat, logdatefmt, filters): + Handler.__init__(self) + fmt = logging.Formatter(logformat, logdatefmt) + self.setFormatter(fmt) + self.filterset = FilterSet(filters) + self.buffer = [] + def emit(self, record): + self.buffer.append(self.format(record)) + def flush(self): + pass # do nothing + def truncate(self): + self.buffer = [] + def filter(self, record): + if self.filterset.allow(record.name): + return Handler.filter(self, record) + def __getstate__(self): + state = self.__dict__.copy() + del state['lock'] + return state + def __setstate__(self, state): + self.__dict__.update(state) + self.lock = threading.RLock() + + +class LogCapture(Plugin): + """ + Log capture plugin. Enabled by default. Disable with --nologcapture. + This plugin captures logging statements issued during test execution, + appending any output captured to the error or failure output, + should the test fail or raise an error. + """ + enabled = True + env_opt = 'NOSE_NOLOGCAPTURE' + name = 'logcapture' + score = 500 + logformat = '%(name)s: %(levelname)s: %(message)s' + logdatefmt = None + clear = False + filters = ['-nose'] + + def options(self, parser, env): + """Register commandline options. + """ + parser.add_option( + "--nologcapture", action="store_false", + default=not env.get(self.env_opt), dest="logcapture", + help="Disable logging capture plugin. " + "Logging configuration will be left intact." + " [NOSE_NOLOGCAPTURE]") + parser.add_option( + "--logging-format", action="store", dest="logcapture_format", + default=env.get('NOSE_LOGFORMAT') or self.logformat, + metavar="FORMAT", + help="Specify custom format to print statements. " + "Uses the same format as used by standard logging handlers." + " [NOSE_LOGFORMAT]") + parser.add_option( + "--logging-datefmt", action="store", dest="logcapture_datefmt", + default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt, + metavar="FORMAT", + help="Specify custom date/time format to print statements. " + "Uses the same format as used by standard logging handlers." + " [NOSE_LOGDATEFMT]") + parser.add_option( + "--logging-filter", action="store", dest="logcapture_filters", + default=env.get('NOSE_LOGFILTER'), + metavar="FILTER", + help="Specify which statements to filter in/out. " + "By default, everything is captured. If the output is too" + " verbose,\nuse this option to filter out needless output.\n" + "Example: filter=foo will capture statements issued ONLY to\n" + " foo or foo.what.ever.sub but not foobar or other logger.\n" + "Specify multiple loggers with comma: filter=foo,bar,baz.\n" + "If any logger name is prefixed with a minus, eg filter=-foo,\n" + "it will be excluded rather than included. Default: " + "exclude logging messages from nose itself (-nose)." + " [NOSE_LOGFILTER]\n") + parser.add_option( + "--logging-clear-handlers", action="store_true", + default=False, dest="logcapture_clear", + help="Clear all other logging handlers") + parser.add_option( + "--logging-level", action="store", + default='NOTSET', dest="logcapture_level", + help="Set the log level to capture") + + def configure(self, options, conf): + """Configure plugin. + """ + self.conf = conf + # Disable if explicitly disabled, or if logging is + # configured via logging config file + if not options.logcapture or conf.loggingConfig: + self.enabled = False + self.logformat = options.logcapture_format + self.logdatefmt = options.logcapture_datefmt + self.clear = options.logcapture_clear + self.loglevel = options.logcapture_level + if options.logcapture_filters: + self.filters = options.logcapture_filters.split(',') + + def setupLoghandler(self): + # setup our handler with root logger + root_logger = logging.getLogger() + if self.clear: + if hasattr(root_logger, "handlers"): + for handler in root_logger.handlers: + root_logger.removeHandler(handler) + for logger in logging.Logger.manager.loggerDict.values(): + if hasattr(logger, "handlers"): + for handler in logger.handlers: + logger.removeHandler(handler) + # make sure there isn't one already + # you can't simply use "if self.handler not in root_logger.handlers" + # since at least in unit tests this doesn't work -- + # LogCapture() is instantiated for each test case while root_logger + # is module global + # so we always add new MyMemoryHandler instance + for handler in root_logger.handlers[:]: + if isinstance(handler, MyMemoryHandler): + root_logger.handlers.remove(handler) + root_logger.addHandler(self.handler) + # to make sure everything gets captured + loglevel = getattr(self, "loglevel", "NOTSET") + root_logger.setLevel(getattr(logging, loglevel)) + + def begin(self): + """Set up logging handler before test run begins. + """ + self.start() + + def start(self): + self.handler = MyMemoryHandler(self.logformat, self.logdatefmt, + self.filters) + self.setupLoghandler() + + def end(self): + pass + + def beforeTest(self, test): + """Clear buffers and handlers before test. + """ + self.setupLoghandler() + + def afterTest(self, test): + """Clear buffers after test. + """ + self.handler.truncate() + + def formatFailure(self, test, err): + """Add captured log messages to failure output. + """ + return self.formatError(test, err) + + def formatError(self, test, err): + """Add captured log messages to error output. + """ + # logic flow copied from Capture.formatError + test.capturedLogging = records = self.formatLogRecords() + if not records: + return err + ec, ev, tb = err + return (ec, self.addCaptureToErr(ev, records), tb) + + def formatLogRecords(self): + return map(safe_str, self.handler.buffer) + + def addCaptureToErr(self, ev, records): + return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \ + records + \ + [ln('>> end captured logging <<')]) diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py b/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py new file mode 100755 index 00000000..4d2ed22b --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py @@ -0,0 +1,460 @@ +""" +Plugin Manager +-------------- + +A plugin manager class is used to load plugins, manage the list of +loaded plugins, and proxy calls to those plugins. + +The plugin managers provided with nose are: + +:class:`PluginManager` + This manager doesn't implement loadPlugins, so it can only work + with a static list of plugins. + +:class:`BuiltinPluginManager` + This manager loads plugins referenced in ``nose.plugins.builtin``. + +:class:`EntryPointPluginManager` + This manager uses setuptools entrypoints to load plugins. + +:class:`ExtraPluginsPluginManager` + This manager loads extra plugins specified with the keyword + `addplugins`. + +:class:`DefaultPluginMananger` + This is the manager class that will be used by default. If + setuptools is installed, it is a subclass of + :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`; + otherwise, an alias to :class:`BuiltinPluginManager`. + +:class:`RestrictedPluginManager` + This manager is for use in test runs where some plugin calls are + not available, such as runs started with ``python setup.py test``, + where the test runner is the default unittest :class:`TextTestRunner`. It + is a subclass of :class:`DefaultPluginManager`. + +Writing a plugin manager +======================== + +If you want to load plugins via some other means, you can write a +plugin manager and pass an instance of your plugin manager class when +instantiating the :class:`nose.config.Config` instance that you pass to +:class:`TestProgram` (or :func:`main` or :func:`run`). + +To implement your plugin loading scheme, implement ``loadPlugins()``, +and in that method, call ``addPlugin()`` with an instance of each plugin +you wish to make available. Make sure to call +``super(self).loadPlugins()`` as well if have subclassed a manager +other than ``PluginManager``. + +""" +import inspect +import logging +import os +import sys +from itertools import chain as iterchain +from warnings import warn +import nose.config +from nose.failure import Failure +from nose.plugins.base import IPluginInterface +from nose.pyversion import sort_list + +try: + import cPickle as pickle +except: + import pickle +try: + from cStringIO import StringIO +except: + from StringIO import StringIO + + +__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager', + 'BuiltinPluginManager', 'RestrictedPluginManager'] + +log = logging.getLogger(__name__) + + +class PluginProxy(object): + """Proxy for plugin calls. Essentially a closure bound to the + given call and plugin list. + + The plugin proxy also must be bound to a particular plugin + interface specification, so that it knows what calls are available + and any special handling that is required for each call. + """ + interface = IPluginInterface + def __init__(self, call, plugins): + try: + self.method = getattr(self.interface, call) + except AttributeError: + raise AttributeError("%s is not a valid %s method" + % (call, self.interface.__name__)) + self.call = self.makeCall(call) + self.plugins = [] + for p in plugins: + self.addPlugin(p, call) + + def __call__(self, *arg, **kw): + return self.call(*arg, **kw) + + def addPlugin(self, plugin, call): + """Add plugin to my list of plugins to call, if it has the attribute + I'm bound to. + """ + meth = getattr(plugin, call, None) + if meth is not None: + if call == 'loadTestsFromModule' and \ + len(inspect.getargspec(meth)[0]) == 2: + orig_meth = meth + meth = lambda module, path, **kwargs: orig_meth(module) + self.plugins.append((plugin, meth)) + + def makeCall(self, call): + if call == 'loadTestsFromNames': + # special case -- load tests from names behaves somewhat differently + # from other chainable calls, because plugins return a tuple, only + # part of which can be chained to the next plugin. + return self._loadTestsFromNames + + meth = self.method + if getattr(meth, 'generative', False): + # call all plugins and yield a flattened iterator of their results + return lambda *arg, **kw: list(self.generate(*arg, **kw)) + elif getattr(meth, 'chainable', False): + return self.chain + else: + # return a value from the first plugin that returns non-None + return self.simple + + def chain(self, *arg, **kw): + """Call plugins in a chain, where the result of each plugin call is + sent to the next plugin as input. The final output result is returned. + """ + result = None + # extract the static arguments (if any) from arg so they can + # be passed to each plugin call in the chain + static = [a for (static, a) + in zip(getattr(self.method, 'static_args', []), arg) + if static] + for p, meth in self.plugins: + result = meth(*arg, **kw) + arg = static[:] + arg.append(result) + return result + + def generate(self, *arg, **kw): + """Call all plugins, yielding each item in each non-None result. + """ + for p, meth in self.plugins: + result = None + try: + result = meth(*arg, **kw) + if result is not None: + for r in result: + yield r + except (KeyboardInterrupt, SystemExit): + raise + except: + exc = sys.exc_info() + yield Failure(*exc) + continue + + def simple(self, *arg, **kw): + """Call all plugins, returning the first non-None result. + """ + for p, meth in self.plugins: + result = meth(*arg, **kw) + if result is not None: + return result + + def _loadTestsFromNames(self, names, module=None): + """Chainable but not quite normal. Plugins return a tuple of + (tests, names) after processing the names. The tests are added + to a suite that is accumulated throughout the full call, while + names are input for the next plugin in the chain. + """ + suite = [] + for p, meth in self.plugins: + result = meth(names, module=module) + if result is not None: + suite_part, names = result + if suite_part: + suite.extend(suite_part) + return suite, names + + +class NoPlugins(object): + """Null Plugin manager that has no plugins.""" + interface = IPluginInterface + def __init__(self): + self._plugins = self.plugins = () + + def __iter__(self): + return () + + def _doNothing(self, *args, **kwds): + pass + + def _emptyIterator(self, *args, **kwds): + return () + + def __getattr__(self, call): + method = getattr(self.interface, call) + if getattr(method, "generative", False): + return self._emptyIterator + else: + return self._doNothing + + def addPlugin(self, plug): + raise NotImplementedError() + + def addPlugins(self, plugins): + raise NotImplementedError() + + def configure(self, options, config): + pass + + def loadPlugins(self): + pass + + def sort(self): + pass + + +class PluginManager(object): + """Base class for plugin managers. PluginManager is intended to be + used only with a static list of plugins. The loadPlugins() implementation + only reloads plugins from _extraplugins to prevent those from being + overridden by a subclass. + + The basic functionality of a plugin manager is to proxy all unknown + attributes through a ``PluginProxy`` to a list of plugins. + + Note that the list of plugins *may not* be changed after the first plugin + call. + """ + proxyClass = PluginProxy + + def __init__(self, plugins=(), proxyClass=None): + self._plugins = [] + self._extraplugins = () + self._proxies = {} + if plugins: + self.addPlugins(plugins) + if proxyClass is not None: + self.proxyClass = proxyClass + + def __getattr__(self, call): + try: + return self._proxies[call] + except KeyError: + proxy = self.proxyClass(call, self._plugins) + self._proxies[call] = proxy + return proxy + + def __iter__(self): + return iter(self.plugins) + + def addPlugin(self, plug): + # allow, for instance, plugins loaded via entry points to + # supplant builtin plugins. + new_name = getattr(plug, 'name', object()) + self._plugins[:] = [p for p in self._plugins + if getattr(p, 'name', None) != new_name] + self._plugins.append(plug) + + def addPlugins(self, plugins=(), extraplugins=()): + """extraplugins are maintained in a separate list and + re-added by loadPlugins() to prevent their being overwritten + by plugins added by a subclass of PluginManager + """ + self._extraplugins = extraplugins + for plug in iterchain(plugins, extraplugins): + self.addPlugin(plug) + + def configure(self, options, config): + """Configure the set of plugins with the given options + and config instance. After configuration, disabled plugins + are removed from the plugins list. + """ + log.debug("Configuring plugins") + self.config = config + cfg = PluginProxy('configure', self._plugins) + cfg(options, config) + enabled = [plug for plug in self._plugins if plug.enabled] + self.plugins = enabled + self.sort() + log.debug("Plugins enabled: %s", enabled) + + def loadPlugins(self): + for plug in self._extraplugins: + self.addPlugin(plug) + + def sort(self): + return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True) + + def _get_plugins(self): + return self._plugins + + def _set_plugins(self, plugins): + self._plugins = [] + self.addPlugins(plugins) + + plugins = property(_get_plugins, _set_plugins, None, + """Access the list of plugins managed by + this plugin manager""") + + +class ZeroNinePlugin: + """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard. + """ + def __init__(self, plugin): + self.plugin = plugin + + def options(self, parser, env=os.environ): + self.plugin.add_options(parser, env) + + def addError(self, test, err): + if not hasattr(self.plugin, 'addError'): + return + # switch off to addSkip, addDeprecated if those types + from nose.exc import SkipTest, DeprecatedTest + ec, ev, tb = err + if issubclass(ec, SkipTest): + if not hasattr(self.plugin, 'addSkip'): + return + return self.plugin.addSkip(test.test) + elif issubclass(ec, DeprecatedTest): + if not hasattr(self.plugin, 'addDeprecated'): + return + return self.plugin.addDeprecated(test.test) + # add capt + capt = test.capturedOutput + return self.plugin.addError(test.test, err, capt) + + def loadTestsFromFile(self, filename): + if hasattr(self.plugin, 'loadTestsFromPath'): + return self.plugin.loadTestsFromPath(filename) + + def addFailure(self, test, err): + if not hasattr(self.plugin, 'addFailure'): + return + # add capt and tbinfo + capt = test.capturedOutput + tbinfo = test.tbinfo + return self.plugin.addFailure(test.test, err, capt, tbinfo) + + def addSuccess(self, test): + if not hasattr(self.plugin, 'addSuccess'): + return + capt = test.capturedOutput + self.plugin.addSuccess(test.test, capt) + + def startTest(self, test): + if not hasattr(self.plugin, 'startTest'): + return + return self.plugin.startTest(test.test) + + def stopTest(self, test): + if not hasattr(self.plugin, 'stopTest'): + return + return self.plugin.stopTest(test.test) + + def __getattr__(self, val): + return getattr(self.plugin, val) + + +class EntryPointPluginManager(PluginManager): + """Plugin manager that loads plugins from the `nose.plugins` and + `nose.plugins.0.10` entry points. + """ + entry_points = (('nose.plugins.0.10', None), + ('nose.plugins', ZeroNinePlugin)) + + def loadPlugins(self): + """Load plugins by iterating the `nose.plugins` entry point. + """ + from pkg_resources import iter_entry_points + loaded = {} + for entry_point, adapt in self.entry_points: + for ep in iter_entry_points(entry_point): + if ep.name in loaded: + continue + loaded[ep.name] = True + log.debug('%s load plugin %s', self.__class__.__name__, ep) + try: + plugcls = ep.load() + except KeyboardInterrupt: + raise + except Exception, e: + # never want a plugin load to kill the test run + # but we can't log here because the logger is not yet + # configured + warn("Unable to load plugin %s: %s" % (ep, e), + RuntimeWarning) + continue + if adapt: + plug = adapt(plugcls()) + else: + plug = plugcls() + self.addPlugin(plug) + super(EntryPointPluginManager, self).loadPlugins() + + +class BuiltinPluginManager(PluginManager): + """Plugin manager that loads plugins from the list in + `nose.plugins.builtin`. + """ + def loadPlugins(self): + """Load plugins in nose.plugins.builtin + """ + from nose.plugins import builtin + for plug in builtin.plugins: + self.addPlugin(plug()) + super(BuiltinPluginManager, self).loadPlugins() + +try: + import pkg_resources + class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager): + pass + +except ImportError: + class DefaultPluginManager(BuiltinPluginManager): + pass + +class RestrictedPluginManager(DefaultPluginManager): + """Plugin manager that restricts the plugin list to those not + excluded by a list of exclude methods. Any plugin that implements + an excluded method will be removed from the manager's plugin list + after plugins are loaded. + """ + def __init__(self, plugins=(), exclude=(), load=True): + DefaultPluginManager.__init__(self, plugins) + self.load = load + self.exclude = exclude + self.excluded = [] + self._excludedOpts = None + + def excludedOption(self, name): + if self._excludedOpts is None: + from optparse import OptionParser + self._excludedOpts = OptionParser(add_help_option=False) + for plugin in self.excluded: + plugin.options(self._excludedOpts, env={}) + return self._excludedOpts.get_option('--' + name) + + def loadPlugins(self): + if self.load: + DefaultPluginManager.loadPlugins(self) + allow = [] + for plugin in self.plugins: + ok = True + for method in self.exclude: + if hasattr(plugin, method): + ok = False + self.excluded.append(plugin) + break + if ok: + allow.append(plugin) + self.plugins = allow diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py b/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py new file mode 100755 index 00000000..2cae744a --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py @@ -0,0 +1,835 @@ +""" +Overview +======== + +The multiprocess plugin enables you to distribute your test run among a set of +worker processes that run tests in parallel. This can speed up CPU-bound test +runs (as long as the number of work processeses is around the number of +processors or cores available), but is mainly useful for IO-bound tests that +spend most of their time waiting for data to arrive from someplace else. + +.. note :: + + See :doc:`../doc_tests/test_multiprocess/multiprocess` for + additional documentation and examples. Use of this plugin on python + 2.5 or earlier requires the multiprocessing_ module, also available + from PyPI. + +.. _multiprocessing : http://code.google.com/p/python-multiprocessing/ + +How tests are distributed +========================= + +The ideal case would be to dispatch each test to a worker process +separately. This ideal is not attainable in all cases, however, because many +test suites depend on context (class, module or package) fixtures. + +The plugin can't know (unless you tell it -- see below!) if a context fixture +can be called many times concurrently (is re-entrant), or if it can be shared +among tests running in different processes. Therefore, if a context has +fixtures, the default behavior is to dispatch the entire suite to a worker as +a unit. + +Controlling distribution +^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two context-level variables that you can use to control this default +behavior. + +If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True`` +in the context, and the plugin will dispatch tests in suites bound to that +context as if the context had no fixtures. This means that the fixtures will +execute concurrently and multiple times, typically once per test. + +If a context's fixtures can be shared by tests running in different processes +-- such as a package-level fixture that starts an external http server or +initializes a shared database -- then set ``_multiprocess_shared_ = True`` in +the context. These fixtures will then execute in the primary nose process, and +tests in those contexts will be individually dispatched to run in parallel. + +How results are collected and reported +====================================== + +As each test or suite executes in a worker process, results (failures, errors, +and specially handled exceptions like SkipTest) are collected in that +process. When the worker process finishes, it returns results to the main +nose process. There, any progress output is printed (dots!), and the +results from the test run are combined into a consolidated result +set. When results have been received for all dispatched tests, or all +workers have died, the result summary is output as normal. + +Beware! +======= + +Not all test suites will benefit from, or even operate correctly using, this +plugin. For example, CPU-bound tests will run more slowly if you don't have +multiple processors. There are also some differences in plugin +interactions and behaviors due to the way in which tests are dispatched and +loaded. In general, test loading under this plugin operates as if it were +always in directed mode instead of discovered mode. For instance, doctests +in test modules will always be found when using this plugin with the doctest +plugin. + +But the biggest issue you will face is probably concurrency. Unless you +have kept your tests as religiously pure unit tests, with no side-effects, no +ordering issues, and no external dependencies, chances are you will experience +odd, intermittent and unexplainable failures and errors when using this +plugin. This doesn't necessarily mean the plugin is broken; it may mean that +your test suite is not safe for concurrency. + +New Features in 1.1.0 +===================== + +* functions generated by test generators are now added to the worker queue + making them multi-threaded. +* fixed timeout functionality, now functions will be terminated with a + TimedOutException exception when they exceed their execution time. The + worker processes are not terminated. +* added ``--process-restartworker`` option to restart workers once they are + done, this helps control memory usage. Sometimes memory leaks can accumulate + making long runs very difficult. +* added global _instantiate_plugins to configure which plugins are started + on the worker processes. + +""" + +import logging +import os +import sys +import time +import traceback +import unittest +import pickle +import signal +import nose.case +from nose.core import TextTestRunner +from nose import failure +from nose import loader +from nose.plugins.base import Plugin +from nose.pyversion import bytes_ +from nose.result import TextTestResult +from nose.suite import ContextSuite +from nose.util import test_address +try: + # 2.7+ + from unittest.runner import _WritelnDecorator +except ImportError: + from unittest import _WritelnDecorator +from Queue import Empty +from warnings import warn +try: + from cStringIO import StringIO +except ImportError: + import StringIO + +# this is a list of plugin classes that will be checked for and created inside +# each worker process +_instantiate_plugins = None + +log = logging.getLogger(__name__) + +Process = Queue = Pool = Event = Value = Array = None + +# have to inherit KeyboardInterrupt to it will interrupt process properly +class TimedOutException(KeyboardInterrupt): + def __init__(self, value = "Timed Out"): + self.value = value + def __str__(self): + return repr(self.value) + +def _import_mp(): + global Process, Queue, Pool, Event, Value, Array + try: + from multiprocessing import Manager, Process + #prevent the server process created in the manager which holds Python + #objects and allows other processes to manipulate them using proxies + #to interrupt on SIGINT (keyboardinterrupt) so that the communication + #channel between subprocesses and main process is still usable after + #ctrl+C is received in the main process. + old=signal.signal(signal.SIGINT, signal.SIG_IGN) + m = Manager() + #reset it back so main process will receive a KeyboardInterrupt + #exception on ctrl+c + signal.signal(signal.SIGINT, old) + Queue, Pool, Event, Value, Array = ( + m.Queue, m.Pool, m.Event, m.Value, m.Array + ) + except ImportError: + warn("multiprocessing module is not available, multiprocess plugin " + "cannot be used", RuntimeWarning) + + +class TestLet: + def __init__(self, case): + try: + self._id = case.id() + except AttributeError: + pass + self._short_description = case.shortDescription() + self._str = str(case) + + def id(self): + return self._id + + def shortDescription(self): + return self._short_description + + def __str__(self): + return self._str + +class MultiProcess(Plugin): + """ + Run tests in multiple processes. Requires processing module. + """ + score = 1000 + status = {} + + def options(self, parser, env): + """ + Register command-line options. + """ + parser.add_option("--processes", action="store", + default=env.get('NOSE_PROCESSES', 0), + dest="multiprocess_workers", + metavar="NUM", + help="Spread test run among this many processes. " + "Set a number equal to the number of processors " + "or cores in your machine for best results. " + "Pass a negative number to have the number of " + "processes automatically set to the number of " + "cores. Passing 0 means to disable parallel " + "testing. Default is 0 unless NOSE_PROCESSES is " + "set. " + "[NOSE_PROCESSES]") + parser.add_option("--process-timeout", action="store", + default=env.get('NOSE_PROCESS_TIMEOUT', 10), + dest="multiprocess_timeout", + metavar="SECONDS", + help="Set timeout for return of results from each " + "test runner process. Default is 10. " + "[NOSE_PROCESS_TIMEOUT]") + parser.add_option("--process-restartworker", action="store_true", + default=env.get('NOSE_PROCESS_RESTARTWORKER', False), + dest="multiprocess_restartworker", + help="If set, will restart each worker process once" + " their tests are done, this helps control memory " + "leaks from killing the system. " + "[NOSE_PROCESS_RESTARTWORKER]") + + def configure(self, options, config): + """ + Configure plugin. + """ + try: + self.status.pop('active') + except KeyError: + pass + if not hasattr(options, 'multiprocess_workers'): + self.enabled = False + return + # don't start inside of a worker process + if config.worker: + return + self.config = config + try: + workers = int(options.multiprocess_workers) + except (TypeError, ValueError): + workers = 0 + if workers: + _import_mp() + if Process is None: + self.enabled = False + return + # Negative number of workers will cause multiprocessing to hang. + # Set the number of workers to the CPU count to avoid this. + if workers < 0: + try: + import multiprocessing + workers = multiprocessing.cpu_count() + except NotImplementedError: + self.enabled = False + return + self.enabled = True + self.config.multiprocess_workers = workers + t = float(options.multiprocess_timeout) + self.config.multiprocess_timeout = t + r = int(options.multiprocess_restartworker) + self.config.multiprocess_restartworker = r + self.status['active'] = True + + def prepareTestLoader(self, loader): + """Remember loader class so MultiProcessTestRunner can instantiate + the right loader. + """ + self.loaderClass = loader.__class__ + + def prepareTestRunner(self, runner): + """Replace test runner with MultiProcessTestRunner. + """ + # replace with our runner class + return MultiProcessTestRunner(stream=runner.stream, + verbosity=self.config.verbosity, + config=self.config, + loaderClass=self.loaderClass) + +def signalhandler(sig, frame): + raise TimedOutException() + +class MultiProcessTestRunner(TextTestRunner): + waitkilltime = 5.0 # max time to wait to terminate a process that does not + # respond to SIGILL + def __init__(self, **kw): + self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader) + super(MultiProcessTestRunner, self).__init__(**kw) + + def collect(self, test, testQueue, tasks, to_teardown, result): + # dispatch and collect results + # put indexes only on queue because tests aren't picklable + for case in self.nextBatch(test): + log.debug("Next batch %s (%s)", case, type(case)) + if (isinstance(case, nose.case.Test) and + isinstance(case.test, failure.Failure)): + log.debug("Case is a Failure") + case(result) # run here to capture the failure + continue + # handle shared fixtures + if isinstance(case, ContextSuite) and case.context is failure.Failure: + log.debug("Case is a Failure") + case(result) # run here to capture the failure + continue + elif isinstance(case, ContextSuite) and self.sharedFixtures(case): + log.debug("%s has shared fixtures", case) + try: + case.setUp() + except (KeyboardInterrupt, SystemExit): + raise + except: + log.debug("%s setup failed", sys.exc_info()) + result.addError(case, sys.exc_info()) + else: + to_teardown.append(case) + if case.factory: + ancestors=case.factory.context.get(case, []) + for an in ancestors[:2]: + #log.debug('reset ancestor %s', an) + if getattr(an, '_multiprocess_shared_', False): + an._multiprocess_can_split_=True + #an._multiprocess_shared_=False + self.collect(case, testQueue, tasks, to_teardown, result) + + else: + test_addr = self.addtask(testQueue,tasks,case) + log.debug("Queued test %s (%s) to %s", + len(tasks), test_addr, testQueue) + + def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result): + currentaddr = Value('c',bytes_('')) + currentstart = Value('d',time.time()) + keyboardCaught = Event() + p = Process(target=runner, + args=(iworker, testQueue, + resultQueue, + currentaddr, + currentstart, + keyboardCaught, + shouldStop, + self.loaderClass, + result.__class__, + pickle.dumps(self.config))) + p.currentaddr = currentaddr + p.currentstart = currentstart + p.keyboardCaught = keyboardCaught + old = signal.signal(signal.SIGILL, signalhandler) + p.start() + signal.signal(signal.SIGILL, old) + return p + + def run(self, test): + """ + Execute the test (which may be a test suite). If the test is a suite, + distribute it out among as many processes as have been configured, at + as fine a level as is possible given the context fixtures defined in + the suite or any sub-suites. + + """ + log.debug("%s.run(%s) (%s)", self, test, os.getpid()) + wrapper = self.config.plugins.prepareTest(test) + if wrapper is not None: + test = wrapper + + # plugins can decorate or capture the output stream + wrapped = self.config.plugins.setOutputStream(self.stream) + if wrapped is not None: + self.stream = wrapped + + testQueue = Queue() + resultQueue = Queue() + tasks = [] + completed = [] + workers = [] + to_teardown = [] + shouldStop = Event() + + result = self._makeResult() + start = time.time() + + self.collect(test, testQueue, tasks, to_teardown, result) + + log.debug("Starting %s workers", self.config.multiprocess_workers) + for i in range(self.config.multiprocess_workers): + p = self.startProcess(i, testQueue, resultQueue, shouldStop, result) + workers.append(p) + log.debug("Started worker process %s", i+1) + + total_tasks = len(tasks) + # need to keep track of the next time to check for timeouts in case + # more than one process times out at the same time. + nexttimeout=self.config.multiprocess_timeout + thrownError = None + + try: + while tasks: + log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs", + len(completed), total_tasks,nexttimeout) + try: + iworker, addr, newtask_addrs, batch_result = resultQueue.get( + timeout=nexttimeout) + log.debug('Results received for worker %d, %s, new tasks: %d', + iworker,addr,len(newtask_addrs)) + try: + try: + tasks.remove(addr) + except ValueError: + log.warn('worker %s failed to remove from tasks: %s', + iworker,addr) + total_tasks += len(newtask_addrs) + tasks.extend(newtask_addrs) + except KeyError: + log.debug("Got result for unknown task? %s", addr) + log.debug("current: %s",str(list(tasks)[0])) + else: + completed.append([addr,batch_result]) + self.consolidate(result, batch_result) + if (self.config.stopOnError + and not result.wasSuccessful()): + # set the stop condition + shouldStop.set() + break + if self.config.multiprocess_restartworker: + log.debug('joining worker %s',iworker) + # wait for working, but not that important if worker + # cannot be joined in fact, for workers that add to + # testQueue, they will not terminate until all their + # items are read + workers[iworker].join(timeout=1) + if not shouldStop.is_set() and not testQueue.empty(): + log.debug('starting new process on worker %s',iworker) + workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result) + except Empty: + log.debug("Timed out with %s tasks pending " + "(empty testQueue=%r): %s", + len(tasks),testQueue.empty(),str(tasks)) + any_alive = False + for iworker, w in enumerate(workers): + if w.is_alive(): + worker_addr = bytes_(w.currentaddr.value,'ascii') + timeprocessing = time.time() - w.currentstart.value + if ( len(worker_addr) == 0 + and timeprocessing > self.config.multiprocess_timeout-0.1): + log.debug('worker %d has finished its work item, ' + 'but is not exiting? do we wait for it?', + iworker) + else: + any_alive = True + if (len(worker_addr) > 0 + and timeprocessing > self.config.multiprocess_timeout-0.1): + log.debug('timed out worker %s: %s', + iworker,worker_addr) + w.currentaddr.value = bytes_('') + # If the process is in C++ code, sending a SIGILL + # might not send a python KeybordInterrupt exception + # therefore, send multiple signals until an + # exception is caught. If this takes too long, then + # terminate the process + w.keyboardCaught.clear() + startkilltime = time.time() + while not w.keyboardCaught.is_set() and w.is_alive(): + if time.time()-startkilltime > self.waitkilltime: + # have to terminate... + log.error("terminating worker %s",iworker) + w.terminate() + # there is a small probability that the + # terminated process might send a result, + # which has to be specially handled or + # else processes might get orphaned. + workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result) + break + os.kill(w.pid, signal.SIGILL) + time.sleep(0.1) + if not any_alive and testQueue.empty(): + log.debug("All workers dead") + break + nexttimeout=self.config.multiprocess_timeout + for w in workers: + if w.is_alive() and len(w.currentaddr.value) > 0: + timeprocessing = time.time()-w.currentstart.value + if timeprocessing <= self.config.multiprocess_timeout: + nexttimeout = min(nexttimeout, + self.config.multiprocess_timeout-timeprocessing) + log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks)) + + except (KeyboardInterrupt, SystemExit), e: + log.info('parent received ctrl-c when waiting for test results') + thrownError = e + #resultQueue.get(False) + + result.addError(test, sys.exc_info()) + + try: + for case in to_teardown: + log.debug("Tearing down shared fixtures for %s", case) + try: + case.tearDown() + except (KeyboardInterrupt, SystemExit): + raise + except: + result.addError(case, sys.exc_info()) + + stop = time.time() + + # first write since can freeze on shutting down processes + result.printErrors() + result.printSummary(start, stop) + self.config.plugins.finalize(result) + + if thrownError is None: + log.debug("Tell all workers to stop") + for w in workers: + if w.is_alive(): + testQueue.put('STOP', block=False) + + # wait for the workers to end + for iworker,worker in enumerate(workers): + if worker.is_alive(): + log.debug('joining worker %s',iworker) + worker.join() + if worker.is_alive(): + log.debug('failed to join worker %s',iworker) + except (KeyboardInterrupt, SystemExit): + log.info('parent received ctrl-c when shutting down: stop all processes') + for worker in workers: + if worker.is_alive(): + worker.terminate() + + if thrownError: raise thrownError + else: raise + + return result + + def addtask(testQueue,tasks,case): + arg = None + if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'): + # this removes the top level descriptor and allows real function + # name to be returned + case.test.descriptor = None + arg = case.test.arg + test_addr = MultiProcessTestRunner.address(case) + testQueue.put((test_addr,arg), block=False) + if arg is not None: + test_addr += str(arg) + if tasks is not None: + tasks.append(test_addr) + return test_addr + addtask = staticmethod(addtask) + + def address(case): + if hasattr(case, 'address'): + file, mod, call = case.address() + elif hasattr(case, 'context'): + file, mod, call = test_address(case.context) + else: + raise Exception("Unable to convert %s to address" % case) + parts = [] + if file is None: + if mod is None: + raise Exception("Unaddressable case %s" % case) + else: + parts.append(mod) + else: + # strip __init__.py(c) from end of file part + # if present, having it there confuses loader + dirname, basename = os.path.split(file) + if basename.startswith('__init__'): + file = dirname + parts.append(file) + if call is not None: + parts.append(call) + return ':'.join(map(str, parts)) + address = staticmethod(address) + + def nextBatch(self, test): + # allows tests or suites to mark themselves as not safe + # for multiprocess execution + if hasattr(test, 'context'): + if not getattr(test.context, '_multiprocess_', True): + return + + if ((isinstance(test, ContextSuite) + and test.hasFixtures(self.checkCanSplit)) + or not getattr(test, 'can_split', True) + or not isinstance(test, unittest.TestSuite)): + # regular test case, or a suite with context fixtures + + # special case: when run like nosetests path/to/module.py + # the top-level suite has only one item, and it shares + # the same context as that item. In that case, we want the + # item, not the top-level suite + if isinstance(test, ContextSuite): + contained = list(test) + if (len(contained) == 1 + and getattr(contained[0], + 'context', None) == test.context): + test = contained[0] + yield test + else: + # Suite is without fixtures at this level; but it may have + # fixtures at any deeper level, so we need to examine it all + # the way down to the case level + for case in test: + for batch in self.nextBatch(case): + yield batch + + def checkCanSplit(context, fixt): + """ + Callback that we use to check whether the fixtures found in a + context or ancestor are ones we care about. + + Contexts can tell us that their fixtures are reentrant by setting + _multiprocess_can_split_. So if we see that, we return False to + disregard those fixtures. + """ + if not fixt: + return False + if getattr(context, '_multiprocess_can_split_', False): + return False + return True + checkCanSplit = staticmethod(checkCanSplit) + + def sharedFixtures(self, case): + context = getattr(case, 'context', None) + if not context: + return False + return getattr(context, '_multiprocess_shared_', False) + + def consolidate(self, result, batch_result): + log.debug("batch result is %s" , batch_result) + try: + output, testsRun, failures, errors, errorClasses = batch_result + except ValueError: + log.debug("result in unexpected format %s", batch_result) + failure.Failure(*sys.exc_info())(result) + return + self.stream.write(output) + result.testsRun += testsRun + result.failures.extend(failures) + result.errors.extend(errors) + for key, (storage, label, isfail) in errorClasses.items(): + if key not in result.errorClasses: + # Ordinarily storage is result attribute + # but it's only processed through the errorClasses + # dict, so it's ok to fake it here + result.errorClasses[key] = ([], label, isfail) + mystorage, _junk, _junk = result.errorClasses[key] + mystorage.extend(storage) + log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun) + + +def runner(ix, testQueue, resultQueue, currentaddr, currentstart, + keyboardCaught, shouldStop, loaderClass, resultClass, config): + try: + try: + return __runner(ix, testQueue, resultQueue, currentaddr, currentstart, + keyboardCaught, shouldStop, loaderClass, resultClass, config) + except KeyboardInterrupt: + log.debug('Worker %s keyboard interrupt, stopping',ix) + except Empty: + log.debug("Worker %s timed out waiting for tasks", ix) + +def __runner(ix, testQueue, resultQueue, currentaddr, currentstart, + keyboardCaught, shouldStop, loaderClass, resultClass, config): + + config = pickle.loads(config) + dummy_parser = config.parserClass() + if _instantiate_plugins is not None: + for pluginclass in _instantiate_plugins: + plugin = pluginclass() + plugin.addOptions(dummy_parser,{}) + config.plugins.addPlugin(plugin) + config.plugins.configure(config.options,config) + config.plugins.begin() + log.debug("Worker %s executing, pid=%d", ix,os.getpid()) + loader = loaderClass(config=config) + loader.suiteClass.suiteClass = NoSharedFixtureContextSuite + + def get(): + return testQueue.get(timeout=config.multiprocess_timeout) + + def makeResult(): + stream = _WritelnDecorator(StringIO()) + result = resultClass(stream, descriptions=1, + verbosity=config.verbosity, + config=config) + plug_result = config.plugins.prepareTestResult(result) + if plug_result: + return plug_result + return result + + def batch(result): + failures = [(TestLet(c), err) for c, err in result.failures] + errors = [(TestLet(c), err) for c, err in result.errors] + errorClasses = {} + for key, (storage, label, isfail) in result.errorClasses.items(): + errorClasses[key] = ([(TestLet(c), err) for c, err in storage], + label, isfail) + return ( + result.stream.getvalue(), + result.testsRun, + failures, + errors, + errorClasses) + for test_addr, arg in iter(get, 'STOP'): + if shouldStop.is_set(): + log.exception('Worker %d STOPPED',ix) + break + result = makeResult() + test = loader.loadTestsFromNames([test_addr]) + test.testQueue = testQueue + test.tasks = [] + test.arg = arg + log.debug("Worker %s Test is %s (%s)", ix, test_addr, test) + try: + if arg is not None: + test_addr = test_addr + str(arg) + currentaddr.value = bytes_(test_addr) + currentstart.value = time.time() + test(result) + currentaddr.value = bytes_('') + resultQueue.put((ix, test_addr, test.tasks, batch(result))) + except KeyboardInterrupt, e: #TimedOutException: + timeout = isinstance(e, TimedOutException) + if timeout: + keyboardCaught.set() + if len(currentaddr.value): + if timeout: + msg = 'Worker %s timed out, failing current test %s' + else: + msg = 'Worker %s keyboard interrupt, failing current test %s' + log.exception(msg,ix,test_addr) + currentaddr.value = bytes_('') + failure.Failure(*sys.exc_info())(result) + resultQueue.put((ix, test_addr, test.tasks, batch(result))) + else: + if timeout: + msg = 'Worker %s test %s timed out' + else: + msg = 'Worker %s test %s keyboard interrupt' + log.debug(msg,ix,test_addr) + resultQueue.put((ix, test_addr, test.tasks, batch(result))) + if not timeout: + raise + except SystemExit: + currentaddr.value = bytes_('') + log.exception('Worker %s system exit',ix) + raise + except: + currentaddr.value = bytes_('') + log.exception("Worker %s error running test or returning " + "results",ix) + failure.Failure(*sys.exc_info())(result) + resultQueue.put((ix, test_addr, test.tasks, batch(result))) + if config.multiprocess_restartworker: + break + log.debug("Worker %s ending", ix) + + +class NoSharedFixtureContextSuite(ContextSuite): + """ + Context suite that never fires shared fixtures. + + When a context sets _multiprocess_shared_, fixtures in that context + are executed by the main process. Using this suite class prevents them + from executing in the runner process as well. + + """ + testQueue = None + tasks = None + arg = None + def setupContext(self, context): + if getattr(context, '_multiprocess_shared_', False): + return + super(NoSharedFixtureContextSuite, self).setupContext(context) + + def teardownContext(self, context): + if getattr(context, '_multiprocess_shared_', False): + return + super(NoSharedFixtureContextSuite, self).teardownContext(context) + def run(self, result): + """Run tests in suite inside of suite fixtures. + """ + # proxy the result for myself + log.debug("suite %s (%s) run called, tests: %s", + id(self), self, self._tests) + if self.resultProxy: + result, orig = self.resultProxy(result, self), result + else: + result, orig = result, result + try: + #log.debug('setUp for %s', id(self)); + self.setUp() + except KeyboardInterrupt: + raise + except: + self.error_context = 'setup' + result.addError(self, self._exc_info()) + return + try: + for test in self._tests: + if (isinstance(test,nose.case.Test) + and self.arg is not None): + test.test.arg = self.arg + else: + test.arg = self.arg + test.testQueue = self.testQueue + test.tasks = self.tasks + if result.shouldStop: + log.debug("stopping") + break + # each nose.case.Test will create its own result proxy + # so the cases need the original result, to avoid proxy + # chains + #log.debug('running test %s in suite %s', test, self); + try: + test(orig) + except KeyboardInterrupt, e: + timeout = isinstance(e, TimedOutException) + if timeout: + msg = 'Timeout when running test %s in suite %s' + else: + msg = 'KeyboardInterrupt when running test %s in suite %s' + log.debug(msg, test, self) + err = (TimedOutException,TimedOutException(str(test)), + sys.exc_info()[2]) + test.config.plugins.addError(test,err) + orig.addError(test,err) + if not timeout: + raise + finally: + self.has_run = True + try: + #log.debug('tearDown for %s', id(self)); + self.tearDown() + except KeyboardInterrupt: + raise + except: + self.error_context = 'teardown' + result.addError(self, self._exc_info()) diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py b/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py new file mode 100755 index 00000000..76d0d2c4 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py @@ -0,0 +1,416 @@ +""" +Testing Plugins +=============== + +The plugin interface is well-tested enough to safely unit test your +use of its hooks with some level of confidence. However, there is also +a mixin for unittest.TestCase called PluginTester that's designed to +test plugins in their native runtime environment. + +Here's a simple example with a do-nothing plugin and a composed suite. + + >>> import unittest + >>> from nose.plugins import Plugin, PluginTester + >>> class FooPlugin(Plugin): + ... pass + >>> class TestPluginFoo(PluginTester, unittest.TestCase): + ... activate = '--with-foo' + ... plugins = [FooPlugin()] + ... def test_foo(self): + ... for line in self.output: + ... # i.e. check for patterns + ... pass + ... + ... # or check for a line containing ... + ... assert "ValueError" in self.output + ... def makeSuite(self): + ... class TC(unittest.TestCase): + ... def runTest(self): + ... raise ValueError("I hate foo") + ... return [TC('runTest')] + ... + >>> res = unittest.TestResult() + >>> case = TestPluginFoo('test_foo') + >>> _ = case(res) + >>> res.errors + [] + >>> res.failures + [] + >>> res.wasSuccessful() + True + >>> res.testsRun + 1 + +And here is a more complex example of testing a plugin that has extra +arguments and reads environment variables. + + >>> import unittest, os + >>> from nose.plugins import Plugin, PluginTester + >>> class FancyOutputter(Plugin): + ... name = "fancy" + ... def configure(self, options, conf): + ... Plugin.configure(self, options, conf) + ... if not self.enabled: + ... return + ... self.fanciness = 1 + ... if options.more_fancy: + ... self.fanciness = 2 + ... if 'EVEN_FANCIER' in self.env: + ... self.fanciness = 3 + ... + ... def options(self, parser, env=os.environ): + ... self.env = env + ... parser.add_option('--more-fancy', action='store_true') + ... Plugin.options(self, parser, env=env) + ... + ... def report(self, stream): + ... stream.write("FANCY " * self.fanciness) + ... + >>> class TestFancyOutputter(PluginTester, unittest.TestCase): + ... activate = '--with-fancy' # enables the plugin + ... plugins = [FancyOutputter()] + ... args = ['--more-fancy'] + ... env = {'EVEN_FANCIER': '1'} + ... + ... def test_fancy_output(self): + ... assert "FANCY FANCY FANCY" in self.output, ( + ... "got: %s" % self.output) + ... def makeSuite(self): + ... class TC(unittest.TestCase): + ... def runTest(self): + ... raise ValueError("I hate fancy stuff") + ... return [TC('runTest')] + ... + >>> res = unittest.TestResult() + >>> case = TestFancyOutputter('test_fancy_output') + >>> _ = case(res) + >>> res.errors + [] + >>> res.failures + [] + >>> res.wasSuccessful() + True + >>> res.testsRun + 1 + +""" + +import re +import sys +from warnings import warn + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +__all__ = ['PluginTester', 'run'] + +from os import getpid +class MultiProcessFile(object): + """ + helper for testing multiprocessing + + multiprocessing poses a problem for doctests, since the strategy + of replacing sys.stdout/stderr with file-like objects then + inspecting the results won't work: the child processes will + write to the objects, but the data will not be reflected + in the parent doctest-ing process. + + The solution is to create file-like objects which will interact with + multiprocessing in a more desirable way. + + All processes can write to this object, but only the creator can read. + This allows the testing system to see a unified picture of I/O. + """ + def __init__(self): + # per advice at: + # http://docs.python.org/library/multiprocessing.html#all-platforms + self.__master = getpid() + self.__queue = Manager().Queue() + self.__buffer = StringIO() + self.softspace = 0 + + def buffer(self): + if getpid() != self.__master: + return + + from Queue import Empty + from collections import defaultdict + cache = defaultdict(str) + while True: + try: + pid, data = self.__queue.get_nowait() + except Empty: + break + if pid == (): + #show parent output after children + #this is what users see, usually + pid = ( 1e100, ) # googol! + cache[pid] += data + for pid in sorted(cache): + #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG + self.__buffer.write( cache[pid] ) + def write(self, data): + # note that these pids are in the form of current_process()._identity + # rather than OS pids + from multiprocessing import current_process + pid = current_process()._identity + self.__queue.put((pid, data)) + def __iter__(self): + "getattr doesn't work for iter()" + self.buffer() + return self.__buffer + def seek(self, offset, whence=0): + self.buffer() + return self.__buffer.seek(offset, whence) + def getvalue(self): + self.buffer() + return self.__buffer.getvalue() + def __getattr__(self, attr): + return getattr(self.__buffer, attr) + +try: + from multiprocessing import Manager + Buffer = MultiProcessFile +except ImportError: + Buffer = StringIO + +class PluginTester(object): + """A mixin for testing nose plugins in their runtime environment. + + Subclass this and mix in unittest.TestCase to run integration/functional + tests on your plugin. When setUp() is called, the stub test suite is + executed with your plugin so that during an actual test you can inspect the + artifacts of how your plugin interacted with the stub test suite. + + - activate + + - the argument to send nosetests to activate the plugin + + - suitepath + + - if set, this is the path of the suite to test. Otherwise, you + will need to use the hook, makeSuite() + + - plugins + + - the list of plugins to make available during the run. Note + that this does not mean these plugins will be *enabled* during + the run -- only the plugins enabled by the activate argument + or other settings in argv or env will be enabled. + + - args + + - a list of arguments to add to the nosetests command, in addition to + the activate argument + + - env + + - optional dict of environment variables to send nosetests + + """ + activate = None + suitepath = None + args = None + env = {} + argv = None + plugins = [] + ignoreFiles = None + + def makeSuite(self): + """returns a suite object of tests to run (unittest.TestSuite()) + + If self.suitepath is None, this must be implemented. The returned suite + object will be executed with all plugins activated. It may return + None. + + Here is an example of a basic suite object you can return :: + + >>> import unittest + >>> class SomeTest(unittest.TestCase): + ... def runTest(self): + ... raise ValueError("Now do something, plugin!") + ... + >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS + <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]> + + """ + raise NotImplementedError + + def _execPlugin(self): + """execute the plugin on the internal test suite. + """ + from nose.config import Config + from nose.core import TestProgram + from nose.plugins.manager import PluginManager + + suite = None + stream = Buffer() + conf = Config(env=self.env, + stream=stream, + plugins=PluginManager(plugins=self.plugins)) + if self.ignoreFiles is not None: + conf.ignoreFiles = self.ignoreFiles + if not self.suitepath: + suite = self.makeSuite() + + self.nose = TestProgram(argv=self.argv, config=conf, suite=suite, + exit=False) + self.output = AccessDecorator(stream) + + def setUp(self): + """runs nosetests with the specified test suite, all plugins + activated. + """ + self.argv = ['nosetests', self.activate] + if self.args: + self.argv.extend(self.args) + if self.suitepath: + self.argv.append(self.suitepath) + + self._execPlugin() + + +class AccessDecorator(object): + stream = None + _buf = None + def __init__(self, stream): + self.stream = stream + stream.seek(0) + self._buf = stream.read() + stream.seek(0) + def __contains__(self, val): + return val in self._buf + def __iter__(self): + return iter(self.stream) + def __str__(self): + return self._buf + + +def blankline_separated_blocks(text): + "a bunch of === characters is also considered a blank line" + block = [] + for line in text.splitlines(True): + block.append(line) + line = line.strip() + if not line or line.startswith('===') and not line.strip('='): + yield "".join(block) + block = [] + if block: + yield "".join(block) + + +def remove_stack_traces(out): + # this regexp taken from Python 2.5's doctest + traceback_re = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P<hdr> Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P<stack> .*?) # don't blink: absorb stuff until... + ^(?=\w) # a line *starts* with alphanum. + .*?(?P<exception> \w+ ) # exception name + (?P<msg> [:\n] .*) # the rest + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + blocks = [] + for block in blankline_separated_blocks(out): + blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block)) + return "".join(blocks) + + +def simplify_warnings(out): + warn_re = re.compile(r""" + # Cut the file and line no, up to the warning name + ^.*:\d+:\s + (?P<category>\w+): \s+ # warning category + (?P<detail>.+) $ \n? # warning message + ^ .* $ # stack frame + """, re.VERBOSE | re.MULTILINE) + return warn_re.sub(r"\g<category>: \g<detail>", out) + + +def remove_timings(out): + return re.sub( + r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out) + + +def munge_nose_output_for_doctest(out): + """Modify nose output to make it easy to use in doctests.""" + out = remove_stack_traces(out) + out = simplify_warnings(out) + out = remove_timings(out) + return out.strip() + + +def run(*arg, **kw): + """ + Specialized version of nose.run for use inside of doctests that + test test runs. + + This version of run() prints the result output to stdout. Before + printing, the output is processed by replacing the timing + information with an ellipsis (...), removing traceback stacks, and + removing trailing whitespace. + + Use this version of run wherever you are writing a doctest that + tests nose (or unittest) test result output. + + Note: do not use doctest: +ELLIPSIS when testing nose output, + since ellipses ("test_foo ... ok") in your expected test runner + output may match multiple lines of output, causing spurious test + passes! + """ + from nose import run + from nose.config import Config + from nose.plugins.manager import PluginManager + + buffer = Buffer() + if 'config' not in kw: + plugins = kw.pop('plugins', []) + if isinstance(plugins, list): + plugins = PluginManager(plugins=plugins) + env = kw.pop('env', {}) + kw['config'] = Config(env=env, plugins=plugins) + if 'argv' not in kw: + kw['argv'] = ['nosetests', '-v'] + kw['config'].stream = buffer + + # Set up buffering so that all output goes to our buffer, + # or warn user if deprecated behavior is active. If this is not + # done, prints and warnings will either be out of place or + # disappear. + stderr = sys.stderr + stdout = sys.stdout + if kw.pop('buffer_all', False): + sys.stdout = sys.stderr = buffer + restore = True + else: + restore = False + warn("The behavior of nose.plugins.plugintest.run() will change in " + "the next release of nose. The current behavior does not " + "correctly account for output to stdout and stderr. To enable " + "correct behavior, use run_buffered() instead, or pass " + "the keyword argument buffer_all=True to run().", + DeprecationWarning, stacklevel=2) + try: + run(*arg, **kw) + finally: + if restore: + sys.stderr = stderr + sys.stdout = stdout + out = buffer.getvalue() + print munge_nose_output_for_doctest(out) + + +def run_buffered(*arg, **kw): + kw['buffer_all'] = True + run(*arg, **kw) + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py b/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py new file mode 100755 index 00000000..4d304a93 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py @@ -0,0 +1,154 @@ +"""This plugin will run tests using the hotshot profiler, which is part +of the standard library. To turn it on, use the ``--with-profile`` option +or set the NOSE_WITH_PROFILE environment variable. Profiler output can be +controlled with the ``--profile-sort`` and ``--profile-restrict`` options, +and the profiler output file may be changed with ``--profile-stats-file``. + +See the `hotshot documentation`_ in the standard library documentation for +more details on the various output options. + +.. _hotshot documentation: http://docs.python.org/library/hotshot.html +""" + +try: + import hotshot + from hotshot import stats +except ImportError: + hotshot, stats = None, None +import logging +import os +import sys +import tempfile +from nose.plugins.base import Plugin +from nose.util import tolist + +log = logging.getLogger('nose.plugins') + +class Profile(Plugin): + """ + Use this plugin to run tests using the hotshot profiler. + """ + pfile = None + clean_stats_file = False + def options(self, parser, env): + """Register commandline options. + """ + if not self.available(): + return + Plugin.options(self, parser, env) + parser.add_option('--profile-sort', action='store', dest='profile_sort', + default=env.get('NOSE_PROFILE_SORT', 'cumulative'), + metavar="SORT", + help="Set sort order for profiler output") + parser.add_option('--profile-stats-file', action='store', + dest='profile_stats_file', + metavar="FILE", + default=env.get('NOSE_PROFILE_STATS_FILE'), + help='Profiler stats file; default is a new ' + 'temp file on each run') + parser.add_option('--profile-restrict', action='append', + dest='profile_restrict', + metavar="RESTRICT", + default=env.get('NOSE_PROFILE_RESTRICT'), + help="Restrict profiler output. See help for " + "pstats.Stats for details") + + def available(cls): + return hotshot is not None + available = classmethod(available) + + def begin(self): + """Create profile stats file and load profiler. + """ + if not self.available(): + return + self._create_pfile() + self.prof = hotshot.Profile(self.pfile) + + def configure(self, options, conf): + """Configure plugin. + """ + if not self.available(): + self.enabled = False + return + Plugin.configure(self, options, conf) + self.conf = conf + if options.profile_stats_file: + self.pfile = options.profile_stats_file + self.clean_stats_file = False + else: + self.pfile = None + self.clean_stats_file = True + self.fileno = None + self.sort = options.profile_sort + self.restrict = tolist(options.profile_restrict) + + def prepareTest(self, test): + """Wrap entire test run in :func:`prof.runcall`. + """ + if not self.available(): + return + log.debug('preparing test %s' % test) + def run_and_profile(result, prof=self.prof, test=test): + self._create_pfile() + prof.runcall(test, result) + return run_and_profile + + def report(self, stream): + """Output profiler report. + """ + log.debug('printing profiler report') + self.prof.close() + prof_stats = stats.load(self.pfile) + prof_stats.sort_stats(self.sort) + + # 2.5 has completely different stream handling from 2.4 and earlier. + # Before 2.5, stats objects have no stream attribute; in 2.5 and later + # a reference sys.stdout is stored before we can tweak it. + compat_25 = hasattr(prof_stats, 'stream') + if compat_25: + tmp = prof_stats.stream + prof_stats.stream = stream + else: + tmp = sys.stdout + sys.stdout = stream + try: + if self.restrict: + log.debug('setting profiler restriction to %s', self.restrict) + prof_stats.print_stats(*self.restrict) + else: + prof_stats.print_stats() + finally: + if compat_25: + prof_stats.stream = tmp + else: + sys.stdout = tmp + + def finalize(self, result): + """Clean up stats file, if configured to do so. + """ + if not self.available(): + return + try: + self.prof.close() + except AttributeError: + # TODO: is this trying to catch just the case where not + # hasattr(self.prof, "close")? If so, the function call should be + # moved out of the try: suite. + pass + if self.clean_stats_file: + if self.fileno: + try: + os.close(self.fileno) + except OSError: + pass + try: + os.unlink(self.pfile) + except OSError: + pass + return None + + def _create_pfile(self): + if not self.pfile: + self.fileno, self.pfile = tempfile.mkstemp() + self.clean_stats_file = True diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py b/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py new file mode 100755 index 00000000..9d1ac8f6 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py @@ -0,0 +1,63 @@ +""" +This plugin installs a SKIP error class for the SkipTest exception. +When SkipTest is raised, the exception will be logged in the skipped +attribute of the result, 'S' or 'SKIP' (verbose) will be output, and +the exception will not be counted as an error or failure. This plugin +is enabled by default but may be disabled with the ``--no-skip`` option. +""" + +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin + + +# on SkipTest: +# - unittest SkipTest is first preference, but it's only available +# for >= 2.7 +# - unittest2 SkipTest is second preference for older pythons. This +# mirrors logic for choosing SkipTest exception in testtools +# - if none of the above, provide custom class +try: + from unittest.case import SkipTest +except ImportError: + try: + from unittest2.case import SkipTest + except ImportError: + class SkipTest(Exception): + """Raise this exception to mark a test as skipped. + """ + pass + + +class Skip(ErrorClassPlugin): + """ + Plugin that installs a SKIP error class for the SkipTest + exception. When SkipTest is raised, the exception will be logged + in the skipped attribute of the result, 'S' or 'SKIP' (verbose) + will be output, and the exception will not be counted as an error + or failure. + """ + enabled = True + skipped = ErrorClass(SkipTest, + label='SKIP', + isfailure=False) + + def options(self, parser, env): + """ + Add my options to command line. + """ + env_opt = 'NOSE_WITHOUT_SKIP' + parser.add_option('--no-skip', action='store_true', + dest='noSkip', default=env.get(env_opt, False), + help="Disable special handling of SkipTest " + "exceptions.") + + def configure(self, options, conf): + """ + Configure plugin. Skip plugin is enabled by default. + """ + if not self.can_configure: + return + self.conf = conf + disable = getattr(options, 'noSkip', False) + if disable: + self.enabled = False + diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py b/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py new file mode 100755 index 00000000..49fff9b1 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py @@ -0,0 +1,306 @@ +""" +This plugin adds a test id (like #1) to each test name output. After +you've run once to generate test ids, you can re-run individual +tests by activating the plugin and passing the ids (with or +without the # prefix) instead of test names. + +For example, if your normal test run looks like:: + + % nosetests -v + tests.test_a ... ok + tests.test_b ... ok + tests.test_c ... ok + +When adding ``--with-id`` you'll see:: + + % nosetests -v --with-id + #1 tests.test_a ... ok + #2 tests.test_b ... ok + #3 tests.test_c ... ok + +Then you can re-run individual tests by supplying just an id number:: + + % nosetests -v --with-id 2 + #2 tests.test_b ... ok + +You can also pass multiple id numbers:: + + % nosetests -v --with-id 2 3 + #2 tests.test_b ... ok + #3 tests.test_c ... ok + +Since most shells consider '#' a special character, you can leave it out when +specifying a test id. + +Note that when run without the -v switch, no special output is displayed, but +the ids file is still written. + +Looping over failed tests +------------------------- + +This plugin also adds a mode that will direct the test runner to record +failed tests. Subsequent test runs will then run only the tests that failed +last time. Activate this mode with the ``--failed`` switch:: + + % nosetests -v --failed + #1 test.test_a ... ok + #2 test.test_b ... ERROR + #3 test.test_c ... FAILED + #4 test.test_d ... ok + +On the second run, only tests #2 and #3 will run:: + + % nosetests -v --failed + #2 test.test_b ... ERROR + #3 test.test_c ... FAILED + +As you correct errors and tests pass, they'll drop out of subsequent runs. + +First:: + + % nosetests -v --failed + #2 test.test_b ... ok + #3 test.test_c ... FAILED + +Second:: + + % nosetests -v --failed + #3 test.test_c ... FAILED + +When all tests pass, the full set will run on the next invocation. + +First:: + + % nosetests -v --failed + #3 test.test_c ... ok + +Second:: + + % nosetests -v --failed + #1 test.test_a ... ok + #2 test.test_b ... ok + #3 test.test_c ... ok + #4 test.test_d ... ok + +.. note :: + + If you expect to use ``--failed`` regularly, it's a good idea to always run + using the ``--with-id`` option. This will ensure that an id file is always + created, allowing you to add ``--failed`` to the command line as soon as + you have failing tests. Otherwise, your first run using ``--failed`` will + (perhaps surprisingly) run *all* tests, because there won't be an id file + containing the record of failed tests from your previous run. + +""" +__test__ = False + +import logging +import os +from nose.plugins import Plugin +from nose.util import src, set + +try: + from cPickle import dump, load +except ImportError: + from pickle import dump, load + +log = logging.getLogger(__name__) + + +class TestId(Plugin): + """ + Activate to add a test id (like #1) to each test name output. Activate + with --failed to rerun failing tests only. + """ + name = 'id' + idfile = None + collecting = True + loopOnFailed = False + + def options(self, parser, env): + """Register commandline options. + """ + Plugin.options(self, parser, env) + parser.add_option('--id-file', action='store', dest='testIdFile', + default='.noseids', metavar="FILE", + help="Store test ids found in test runs in this " + "file. Default is the file .noseids in the " + "working directory.") + parser.add_option('--failed', action='store_true', + dest='failed', default=False, + help="Run the tests that failed in the last " + "test run.") + + def configure(self, options, conf): + """Configure plugin. + """ + Plugin.configure(self, options, conf) + if options.failed: + self.enabled = True + self.loopOnFailed = True + log.debug("Looping on failed tests") + self.idfile = os.path.expanduser(options.testIdFile) + if not os.path.isabs(self.idfile): + self.idfile = os.path.join(conf.workingDir, self.idfile) + self.id = 1 + # Ids and tests are mirror images: ids are {id: test address} and + # tests are {test address: id} + self.ids = {} + self.tests = {} + self.failed = [] + self.source_names = [] + # used to track ids seen when tests is filled from + # loaded ids file + self._seen = {} + self._write_hashes = conf.verbosity >= 2 + + def finalize(self, result): + """Save new ids file, if needed. + """ + if result.wasSuccessful(): + self.failed = [] + if self.collecting: + ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys())))) + else: + ids = self.ids + fh = open(self.idfile, 'wb') + dump({'ids': ids, + 'failed': self.failed, + 'source_names': self.source_names}, fh) + fh.close() + log.debug('Saved test ids: %s, failed %s to %s', + ids, self.failed, self.idfile) + + def loadTestsFromNames(self, names, module=None): + """Translate ids in the list of requested names into their + test addresses, if they are found in my dict of tests. + """ + log.debug('ltfn %s %s', names, module) + try: + fh = open(self.idfile, 'rb') + data = load(fh) + if 'ids' in data: + self.ids = data['ids'] + self.failed = data['failed'] + self.source_names = data['source_names'] + else: + # old ids field + self.ids = data + self.failed = [] + self.source_names = names + if self.ids: + self.id = max(self.ids) + 1 + self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys())))) + else: + self.id = 1 + log.debug( + 'Loaded test ids %s tests %s failed %s sources %s from %s', + self.ids, self.tests, self.failed, self.source_names, + self.idfile) + fh.close() + except IOError: + log.debug('IO error reading %s', self.idfile) + + if self.loopOnFailed and self.failed: + self.collecting = False + names = self.failed + self.failed = [] + # I don't load any tests myself, only translate names like '#2' + # into the associated test addresses + translated = [] + new_source = [] + really_new = [] + for name in names: + trans = self.tr(name) + if trans != name: + translated.append(trans) + else: + new_source.append(name) + # names that are not ids and that are not in the current + # list of source names go into the list for next time + if new_source: + new_set = set(new_source) + old_set = set(self.source_names) + log.debug("old: %s new: %s", old_set, new_set) + really_new = [s for s in new_source + if not s in old_set] + if really_new: + # remember new sources + self.source_names.extend(really_new) + if not translated: + # new set of source names, no translations + # means "run the requested tests" + names = new_source + else: + # no new names to translate and add to id set + self.collecting = False + log.debug("translated: %s new sources %s names %s", + translated, really_new, names) + return (None, translated + really_new or names) + + def makeName(self, addr): + log.debug("Make name %s", addr) + filename, module, call = addr + if filename is not None: + head = src(filename) + else: + head = module + if call is not None: + return "%s:%s" % (head, call) + return head + + def setOutputStream(self, stream): + """Get handle on output stream so the plugin can print id #s + """ + self.stream = stream + + def startTest(self, test): + """Maybe output an id # before the test name. + + Example output:: + + #1 test.test ... ok + #2 test.test_two ... ok + + """ + adr = test.address() + log.debug('start test %s (%s)', adr, adr in self.tests) + if adr in self.tests: + if adr in self._seen: + self.write(' ') + else: + self.write('#%s ' % self.tests[adr]) + self._seen[adr] = 1 + return + self.tests[adr] = self.id + self.write('#%s ' % self.id) + self.id += 1 + + def afterTest(self, test): + # None means test never ran, False means failed/err + if test.passed is False: + try: + key = str(self.tests[test.address()]) + except KeyError: + # never saw this test -- startTest didn't run + pass + else: + if key not in self.failed: + self.failed.append(key) + + def tr(self, name): + log.debug("tr '%s'", name) + try: + key = int(name.replace('#', '')) + except ValueError: + return name + log.debug("Got key %s", key) + # I'm running tests mapped from the ids file, + # not collecting new ones + if key in self.ids: + return self.makeName(self.ids[key]) + return name + + def write(self, output): + if self._write_hashes: + self.stream.write(output) diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py b/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py new file mode 100755 index 00000000..e1ec0e1d --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py @@ -0,0 +1,329 @@ +"""This plugin provides test results in the standard XUnit XML format. + +It's designed for the `Jenkins`_ (previously Hudson) continuous build +system, but will probably work for anything else that understands an +XUnit-formatted XML representation of test results. + +Add this shell command to your builder :: + + nosetests --with-xunit + +And by default a file named nosetests.xml will be written to the +working directory. + +In a Jenkins builder, tick the box named "Publish JUnit test result report" +under the Post-build Actions and enter this value for Test report XMLs:: + + **/nosetests.xml + +If you need to change the name or location of the file, you can set the +``--xunit-file`` option. + +Here is an abbreviated version of what an XML test report might look like:: + + <?xml version="1.0" encoding="UTF-8"?> + <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0"> + <testcase classname="path_to_test_suite.TestSomething" + name="test_it" time="0"> + <error type="exceptions.TypeError" message="oops, wrong type"> + Traceback (most recent call last): + ... + TypeError: oops, wrong type + </error> + </testcase> + </testsuite> + +.. _Jenkins: http://jenkins-ci.org/ + +""" +import codecs +import doctest +import os +import sys +import traceback +import re +import inspect +from StringIO import StringIO +from time import time +from xml.sax import saxutils + +from nose.plugins.base import Plugin +from nose.exc import SkipTest +from nose.pyversion import force_unicode, format_exception + +# Invalid XML characters, control characters 0-31 sans \t, \n and \r +CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]") + +TEST_ID = re.compile(r'^(.*?)(\(.*\))$') + +def xml_safe(value): + """Replaces invalid XML characters with '?'.""" + return CONTROL_CHARACTERS.sub('?', value) + +def escape_cdata(cdata): + """Escape a string for an XML CDATA section.""" + return xml_safe(cdata).replace(']]>', ']]>]]><![CDATA[') + +def id_split(idval): + m = TEST_ID.match(idval) + if m: + name, fargs = m.groups() + head, tail = name.rsplit(".", 1) + return [head, tail+fargs] + else: + return idval.rsplit(".", 1) + +def nice_classname(obj): + """Returns a nice name for class object or class instance. + + >>> nice_classname(Exception()) # doctest: +ELLIPSIS + '...Exception' + >>> nice_classname(Exception) # doctest: +ELLIPSIS + '...Exception' + + """ + if inspect.isclass(obj): + cls_name = obj.__name__ + else: + cls_name = obj.__class__.__name__ + mod = inspect.getmodule(obj) + if mod: + name = mod.__name__ + # jython + if name.startswith('org.python.core.'): + name = name[len('org.python.core.'):] + return "%s.%s" % (name, cls_name) + else: + return cls_name + +def exc_message(exc_info): + """Return the exception's message.""" + exc = exc_info[1] + if exc is None: + # str exception + result = exc_info[0] + else: + try: + result = str(exc) + except UnicodeEncodeError: + try: + result = unicode(exc) + except UnicodeError: + # Fallback to args as neither str nor + # unicode(Exception(u'\xe6')) work in Python < 2.6 + result = exc.args[0] + result = force_unicode(result, 'UTF-8') + return xml_safe(result) + +class Tee(object): + def __init__(self, encoding, *args): + self._encoding = encoding + self._streams = args + + def write(self, data): + data = force_unicode(data, self._encoding) + for s in self._streams: + s.write(data) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def flush(self): + for s in self._streams: + s.flush() + + def isatty(self): + return False + + +class Xunit(Plugin): + """This plugin provides test results in the standard XUnit XML format.""" + name = 'xunit' + score = 1500 + encoding = 'UTF-8' + error_report_file = None + + def __init__(self): + super(Xunit, self).__init__() + self._capture_stack = [] + self._currentStdout = None + self._currentStderr = None + + def _timeTaken(self): + if hasattr(self, '_timer'): + taken = time() - self._timer + else: + # test died before it ran (probably error in setup()) + # or success/failure added before test started probably + # due to custom TestResult munging + taken = 0.0 + return taken + + def _quoteattr(self, attr): + """Escape an XML attribute. Value can be unicode.""" + attr = xml_safe(attr) + return saxutils.quoteattr(attr) + + def options(self, parser, env): + """Sets additional command line options.""" + Plugin.options(self, parser, env) + parser.add_option( + '--xunit-file', action='store', + dest='xunit_file', metavar="FILE", + default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'), + help=("Path to xml file to store the xunit report in. " + "Default is nosetests.xml in the working directory " + "[NOSE_XUNIT_FILE]")) + + def configure(self, options, config): + """Configures the xunit plugin.""" + Plugin.configure(self, options, config) + self.config = config + if self.enabled: + self.stats = {'errors': 0, + 'failures': 0, + 'passes': 0, + 'skipped': 0 + } + self.errorlist = [] + self.error_report_file_name = os.path.realpath(options.xunit_file) + + def report(self, stream): + """Writes an Xunit-formatted XML file + + The file includes a report of test errors and failures. + + """ + self.error_report_file = codecs.open(self.error_report_file_name, 'w', + self.encoding, 'replace') + self.stats['encoding'] = self.encoding + self.stats['total'] = (self.stats['errors'] + self.stats['failures'] + + self.stats['passes'] + self.stats['skipped']) + self.error_report_file.write( + u'<?xml version="1.0" encoding="%(encoding)s"?>' + u'<testsuite name="nosetests" tests="%(total)d" ' + u'errors="%(errors)d" failures="%(failures)d" ' + u'skip="%(skipped)d">' % self.stats) + self.error_report_file.write(u''.join([force_unicode(e, self.encoding) + for e in self.errorlist])) + self.error_report_file.write(u'</testsuite>') + self.error_report_file.close() + if self.config.verbosity > 1: + stream.writeln("-" * 70) + stream.writeln("XML: %s" % self.error_report_file.name) + + def _startCapture(self): + self._capture_stack.append((sys.stdout, sys.stderr)) + self._currentStdout = StringIO() + self._currentStderr = StringIO() + sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout) + sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr) + + def startContext(self, context): + self._startCapture() + + def stopContext(self, context): + self._endCapture() + + def beforeTest(self, test): + """Initializes a timer before starting a test.""" + self._timer = time() + self._startCapture() + + def _endCapture(self): + if self._capture_stack: + sys.stdout, sys.stderr = self._capture_stack.pop() + + def afterTest(self, test): + self._endCapture() + self._currentStdout = None + self._currentStderr = None + + def finalize(self, test): + while self._capture_stack: + self._endCapture() + + def _getCapturedStdout(self): + if self._currentStdout: + value = self._currentStdout.getvalue() + if value: + return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata( + value) + return '' + + def _getCapturedStderr(self): + if self._currentStderr: + value = self._currentStderr.getvalue() + if value: + return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata( + value) + return '' + + def addError(self, test, err, capt=None): + """Add error output to Xunit report. + """ + taken = self._timeTaken() + + if issubclass(err[0], SkipTest): + type = 'skipped' + self.stats['skipped'] += 1 + else: + type = 'error' + self.stats['errors'] += 1 + + tb = format_exception(err, self.encoding) + id = test.id() + + self.errorlist.append( + u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">' + u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>' + u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' % + {'cls': self._quoteattr(id_split(id)[0]), + 'name': self._quoteattr(id_split(id)[-1]), + 'taken': taken, + 'type': type, + 'errtype': self._quoteattr(nice_classname(err[0])), + 'message': self._quoteattr(exc_message(err)), + 'tb': escape_cdata(tb), + 'systemout': self._getCapturedStdout(), + 'systemerr': self._getCapturedStderr(), + }) + + def addFailure(self, test, err, capt=None, tb_info=None): + """Add failure output to Xunit report. + """ + taken = self._timeTaken() + tb = format_exception(err, self.encoding) + self.stats['failures'] += 1 + id = test.id() + + self.errorlist.append( + u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">' + u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>' + u'</failure>%(systemout)s%(systemerr)s</testcase>' % + {'cls': self._quoteattr(id_split(id)[0]), + 'name': self._quoteattr(id_split(id)[-1]), + 'taken': taken, + 'errtype': self._quoteattr(nice_classname(err[0])), + 'message': self._quoteattr(exc_message(err)), + 'tb': escape_cdata(tb), + 'systemout': self._getCapturedStdout(), + 'systemerr': self._getCapturedStderr(), + }) + + def addSuccess(self, test, capt=None): + """Add success output to Xunit report. + """ + taken = self._timeTaken() + self.stats['passes'] += 1 + id = test.id() + self.errorlist.append( + '<testcase classname=%(cls)s name=%(name)s ' + 'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' % + {'cls': self._quoteattr(id_split(id)[0]), + 'name': self._quoteattr(id_split(id)[-1]), + 'taken': taken, + 'systemout': self._getCapturedStdout(), + 'systemerr': self._getCapturedStderr(), + }) diff --git a/scripts/external_libs/nose-1.3.4/nose/proxy.py b/scripts/external_libs/nose-1.3.4/nose/proxy.py new file mode 100755 index 00000000..c2676cb1 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/proxy.py @@ -0,0 +1,188 @@ +""" +Result Proxy +------------ + +The result proxy wraps the result instance given to each test. It +performs two functions: enabling extended error/failure reporting +and calling plugins. + +As each result event is fired, plugins are called with the same event; +however, plugins are called with the nose.case.Test instance that +wraps the actual test. So when a test fails and calls +result.addFailure(self, err), the result proxy calls +addFailure(self.test, err) for each plugin. This allows plugins to +have a single stable interface for all test types, and also to +manipulate the test object itself by setting the `test` attribute of +the nose.case.Test that they receive. +""" +import logging +from nose.config import Config + + +log = logging.getLogger(__name__) + + +def proxied_attribute(local_attr, proxied_attr, doc): + """Create a property that proxies attribute ``proxied_attr`` through + the local attribute ``local_attr``. + """ + def fget(self): + return getattr(getattr(self, local_attr), proxied_attr) + def fset(self, value): + setattr(getattr(self, local_attr), proxied_attr, value) + def fdel(self): + delattr(getattr(self, local_attr), proxied_attr) + return property(fget, fset, fdel, doc) + + +class ResultProxyFactory(object): + """Factory for result proxies. Generates a ResultProxy bound to each test + and the result passed to the test. + """ + def __init__(self, config=None): + if config is None: + config = Config() + self.config = config + self.__prepared = False + self.__result = None + + def __call__(self, result, test): + """Return a ResultProxy for the current test. + + On first call, plugins are given a chance to replace the + result used for the remaining tests. If a plugin returns a + value from prepareTestResult, that object will be used as the + result for all tests. + """ + if not self.__prepared: + self.__prepared = True + plug_result = self.config.plugins.prepareTestResult(result) + if plug_result is not None: + self.__result = result = plug_result + if self.__result is not None: + result = self.__result + return ResultProxy(result, test, config=self.config) + + +class ResultProxy(object): + """Proxy to TestResults (or other results handler). + + One ResultProxy is created for each nose.case.Test. The result + proxy calls plugins with the nose.case.Test instance (instead of + the wrapped test case) as each result call is made. Finally, the + real result method is called, also with the nose.case.Test + instance as the test parameter. + + """ + def __init__(self, result, test, config=None): + if config is None: + config = Config() + self.config = config + self.plugins = config.plugins + self.result = result + self.test = test + + def __repr__(self): + return repr(self.result) + + def _prepareErr(self, err): + if not isinstance(err[1], Exception) and isinstance(err[0], type): + # Turn value back into an Exception (required in Python 3.x). + # Plugins do all sorts of crazy things with exception values. + # Convert it to a custom subclass of Exception with the same + # name as the actual exception to make it print correctly. + value = type(err[0].__name__, (Exception,), {})(err[1]) + err = (err[0], value, err[2]) + return err + + def assertMyTest(self, test): + # The test I was called with must be my .test or my + # .test's .test. or my .test.test's .case + + case = getattr(self.test, 'test', None) + assert (test is self.test + or test is case + or test is getattr(case, '_nose_case', None)), ( + "ResultProxy for %r (%s) was called with test %r (%s)" + % (self.test, id(self.test), test, id(test))) + + def afterTest(self, test): + self.assertMyTest(test) + self.plugins.afterTest(self.test) + if hasattr(self.result, "afterTest"): + self.result.afterTest(self.test) + + def beforeTest(self, test): + self.assertMyTest(test) + self.plugins.beforeTest(self.test) + if hasattr(self.result, "beforeTest"): + self.result.beforeTest(self.test) + + def addError(self, test, err): + self.assertMyTest(test) + plugins = self.plugins + plugin_handled = plugins.handleError(self.test, err) + if plugin_handled: + return + # test.passed is set in result, to account for error classes + formatted = plugins.formatError(self.test, err) + if formatted is not None: + err = formatted + plugins.addError(self.test, err) + self.result.addError(self.test, self._prepareErr(err)) + if not self.result.wasSuccessful() and self.config.stopOnError: + self.shouldStop = True + + def addFailure(self, test, err): + self.assertMyTest(test) + plugins = self.plugins + plugin_handled = plugins.handleFailure(self.test, err) + if plugin_handled: + return + self.test.passed = False + formatted = plugins.formatFailure(self.test, err) + if formatted is not None: + err = formatted + plugins.addFailure(self.test, err) + self.result.addFailure(self.test, self._prepareErr(err)) + if self.config.stopOnError: + self.shouldStop = True + + def addSkip(self, test, reason): + # 2.7 compat shim + from nose.plugins.skip import SkipTest + self.assertMyTest(test) + plugins = self.plugins + if not isinstance(reason, Exception): + # for Python 3.2+ + reason = Exception(reason) + plugins.addError(self.test, (SkipTest, reason, None)) + self.result.addSkip(self.test, reason) + + def addSuccess(self, test): + self.assertMyTest(test) + self.plugins.addSuccess(self.test) + self.result.addSuccess(self.test) + + def startTest(self, test): + self.assertMyTest(test) + self.plugins.startTest(self.test) + self.result.startTest(self.test) + + def stop(self): + self.result.stop() + + def stopTest(self, test): + self.assertMyTest(test) + self.plugins.stopTest(self.test) + self.result.stopTest(self.test) + + # proxied attributes + shouldStop = proxied_attribute('result', 'shouldStop', + """Should the test run stop?""") + errors = proxied_attribute('result', 'errors', + """Tests that raised an exception""") + failures = proxied_attribute('result', 'failures', + """Tests that failed""") + testsRun = proxied_attribute('result', 'testsRun', + """Number of tests run""") diff --git a/scripts/external_libs/nose-1.3.4/nose/pyversion.py b/scripts/external_libs/nose-1.3.4/nose/pyversion.py new file mode 100755 index 00000000..8b566141 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/pyversion.py @@ -0,0 +1,214 @@ +""" +This module contains fixups for using nose under different versions of Python. +""" +import sys +import os +import traceback +import types +import inspect +import nose.util + +__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType', + 'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod', + 'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode', + 'format_exception'] + +# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x +# source will be replaced with 'str()' when running 2to3, so this test will +# then become true) +UNICODE_STRINGS = (type(unicode()) == type(str())) + +if sys.version_info[:2] < (3, 0): + def force_unicode(s, encoding='UTF-8'): + try: + s = unicode(s) + except UnicodeDecodeError: + s = str(s).decode(encoding, 'replace') + + return s +else: + def force_unicode(s, encoding='UTF-8'): + return str(s) + +# new.instancemethod() is obsolete for new-style classes (Python 3.x) +# We need to use descriptor methods instead. +try: + import new + def make_instancemethod(function, instance): + return new.instancemethod(function.im_func, instance, + instance.__class__) +except ImportError: + def make_instancemethod(function, instance): + return function.__get__(instance, instance.__class__) + +# To be forward-compatible, we do all list sorts using keys instead of cmp +# functions. However, part of the unittest.TestLoader API involves a +# user-provideable cmp function, so we need some way to convert that. +def cmp_to_key(mycmp): + 'Convert a cmp= function into a key= function' + class Key(object): + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + return Key + +# Python 2.3 also does not support list-sorting by key, so we need to convert +# keys to cmp functions if we're running on old Python.. +if sys.version_info < (2, 4): + def sort_list(l, key, reverse=False): + if reverse: + return l.sort(lambda a, b: cmp(key(b), key(a))) + else: + return l.sort(lambda a, b: cmp(key(a), key(b))) +else: + def sort_list(l, key, reverse=False): + return l.sort(key=key, reverse=reverse) + +# In Python 3.x, all objects are "new style" objects descended from 'type', and +# thus types.ClassType and types.TypeType don't exist anymore. For +# compatibility, we make sure they still work. +if hasattr(types, 'ClassType'): + ClassType = types.ClassType + TypeType = types.TypeType +else: + ClassType = type + TypeType = type + +# The following emulates the behavior (we need) of an 'unbound method' under +# Python 3.x (namely, the ability to have a class associated with a function +# definition so that things can do stuff based on its associated class) +class UnboundMethod: + def __init__(self, cls, func): + # Make sure we have all the same attributes as the original function, + # so that the AttributeSelector plugin will work correctly... + self.__dict__ = func.__dict__.copy() + self._func = func + self.__self__ = UnboundSelf(cls) + if sys.version_info < (3, 0): + self.im_class = cls + + def address(self): + cls = self.__self__.cls + modname = cls.__module__ + module = sys.modules[modname] + filename = getattr(module, '__file__', None) + if filename is not None: + filename = os.path.abspath(filename) + return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__, + self._func.__name__)) + + def __call__(self, *args, **kwargs): + return self._func(*args, **kwargs) + + def __getattr__(self, attr): + return getattr(self._func, attr) + + def __repr__(self): + return '<unbound method %s.%s>' % (self.__self__.cls.__name__, + self._func.__name__) + +class UnboundSelf: + def __init__(self, cls): + self.cls = cls + + # We have to do this hackery because Python won't let us override the + # __class__ attribute... + def __getattribute__(self, attr): + if attr == '__class__': + return self.cls + else: + return object.__getattribute__(self, attr) + +def unbound_method(cls, func): + if inspect.ismethod(func): + return func + if not inspect.isfunction(func): + raise TypeError('%s is not a function' % (repr(func),)) + return UnboundMethod(cls, func) + +def ismethod(obj): + return inspect.ismethod(obj) or isinstance(obj, UnboundMethod) + + +# Make a pseudo-bytes function that can be called without the encoding arg: +if sys.version_info >= (3, 0): + def bytes_(s, encoding='utf8'): + if isinstance(s, bytes): + return s + return bytes(s, encoding) +else: + def bytes_(s, encoding=None): + return str(s) + + +if sys.version_info[:2] >= (2, 6): + def isgenerator(o): + if isinstance(o, UnboundMethod): + o = o._func + return inspect.isgeneratorfunction(o) or inspect.isgenerator(o) +else: + try: + from compiler.consts import CO_GENERATOR + except ImportError: + # IronPython doesn't have a complier module + CO_GENERATOR=0x20 + + def isgenerator(func): + try: + return func.func_code.co_flags & CO_GENERATOR != 0 + except AttributeError: + return False + +# Make a function to help check if an exception is derived from BaseException. +# In Python 2.4, we just use Exception instead. +if sys.version_info[:2] < (2, 5): + def is_base_exception(exc): + return isinstance(exc, Exception) +else: + def is_base_exception(exc): + return isinstance(exc, BaseException) + +if sys.version_info[:2] < (3, 0): + def exc_to_unicode(ev, encoding='utf-8'): + if is_base_exception(ev): + if not hasattr(ev, '__unicode__'): + # 2.5- + if not hasattr(ev, 'message'): + # 2.4 + msg = len(ev.args) and ev.args[0] or '' + else: + msg = ev.message + msg = force_unicode(msg, encoding=encoding) + clsname = force_unicode(ev.__class__.__name__, + encoding=encoding) + ev = u'%s: %s' % (clsname, msg) + elif not isinstance(ev, unicode): + ev = repr(ev) + + return force_unicode(ev, encoding=encoding) +else: + def exc_to_unicode(ev, encoding='utf-8'): + return str(ev) + +def format_exception(exc_info, encoding='UTF-8'): + ec, ev, tb = exc_info + + # Our exception object may have been turned into a string, and Python 3's + # traceback.format_exception() doesn't take kindly to that (it expects an + # actual exception object). So we work around it, by doing the work + # ourselves if ev is not an exception object. + if not is_base_exception(ev): + tb_data = force_unicode( + ''.join(traceback.format_tb(tb)), + encoding) + ev = exc_to_unicode(ev) + return tb_data + ev + else: + return force_unicode( + ''.join(traceback.format_exception(*exc_info)), + encoding) diff --git a/scripts/external_libs/nose-1.3.4/nose/result.py b/scripts/external_libs/nose-1.3.4/nose/result.py new file mode 100755 index 00000000..f974a14a --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/result.py @@ -0,0 +1,200 @@ +""" +Test Result +----------- + +Provides a TextTestResult that extends unittest's _TextTestResult to +provide support for error classes (such as the builtin skip and +deprecated classes), and hooks for plugins to take over or extend +reporting. +""" + +import logging +try: + # 2.7+ + from unittest.runner import _TextTestResult +except ImportError: + from unittest import _TextTestResult +from nose.config import Config +from nose.util import isclass, ln as _ln # backwards compat + +log = logging.getLogger('nose.result') + + +def _exception_detail(exc): + # this is what stdlib module traceback does + try: + return str(exc) + except: + return '<unprintable %s object>' % type(exc).__name__ + + +class TextTestResult(_TextTestResult): + """Text test result that extends unittest's default test result + support for a configurable set of errorClasses (eg, Skip, + Deprecated, TODO) that extend the errors/failures/success triad. + """ + def __init__(self, stream, descriptions, verbosity, config=None, + errorClasses=None): + if errorClasses is None: + errorClasses = {} + self.errorClasses = errorClasses + if config is None: + config = Config() + self.config = config + _TextTestResult.__init__(self, stream, descriptions, verbosity) + + def addSkip(self, test, reason): + # 2.7 skip compat + from nose.plugins.skip import SkipTest + if SkipTest in self.errorClasses: + storage, label, isfail = self.errorClasses[SkipTest] + storage.append((test, reason)) + self.printLabel(label, (SkipTest, reason, None)) + + def addError(self, test, err): + """Overrides normal addError to add support for + errorClasses. If the exception is a registered class, the + error will be added to the list for that class, not errors. + """ + ec, ev, tb = err + try: + exc_info = self._exc_info_to_string(err, test) + except TypeError: + # 2.3 compat + exc_info = self._exc_info_to_string(err) + for cls, (storage, label, isfail) in self.errorClasses.items(): + #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__: + # from nose.tools import set_trace + # set_trace() + if isclass(ec) and issubclass(ec, cls): + if isfail: + test.passed = False + storage.append((test, exc_info)) + self.printLabel(label, err) + return + self.errors.append((test, exc_info)) + test.passed = False + self.printLabel('ERROR') + + # override to bypass changes in 2.7 + def getDescription(self, test): + if self.descriptions: + return test.shortDescription() or str(test) + else: + return str(test) + + def printLabel(self, label, err=None): + # Might get patched into a streamless result + stream = getattr(self, 'stream', None) + if stream is not None: + if self.showAll: + message = [label] + if err: + detail = _exception_detail(err[1]) + if detail: + message.append(detail) + stream.writeln(": ".join(message)) + elif self.dots: + stream.write(label[:1]) + + def printErrors(self): + """Overrides to print all errorClasses errors as well. + """ + _TextTestResult.printErrors(self) + for cls in self.errorClasses.keys(): + storage, label, isfail = self.errorClasses[cls] + if isfail: + self.printErrorList(label, storage) + # Might get patched into a result with no config + if hasattr(self, 'config'): + self.config.plugins.report(self.stream) + + def printSummary(self, start, stop): + """Called by the test runner to print the final summary of test + run results. + """ + write = self.stream.write + writeln = self.stream.writeln + taken = float(stop - start) + run = self.testsRun + plural = run != 1 and "s" or "" + + writeln(self.separator2) + writeln("Ran %s test%s in %.3fs" % (run, plural, taken)) + writeln() + + summary = {} + eckeys = self.errorClasses.keys() + for cls in eckeys: + storage, label, isfail = self.errorClasses[cls] + count = len(storage) + if not count: + continue + summary[label] = count + if len(self.failures): + summary['failures'] = len(self.failures) + if len(self.errors): + summary['errors'] = len(self.errors) + + if not self.wasSuccessful(): + write("FAILED") + else: + write("OK") + items = summary.items() + if items: + items.sort() + write(" (") + write(", ".join(["%s=%s" % (label, count) for + label, count in items])) + writeln(")") + else: + writeln() + + def wasSuccessful(self): + """Overrides to check that there are no errors in errorClasses + lists that are marked as errors and should cause a run to + fail. + """ + if self.errors or self.failures: + return False + for cls in self.errorClasses.keys(): + storage, label, isfail = self.errorClasses[cls] + if not isfail: + continue + if storage: + return False + return True + + def _addError(self, test, err): + try: + exc_info = self._exc_info_to_string(err, test) + except TypeError: + # 2.3: does not take test arg + exc_info = self._exc_info_to_string(err) + self.errors.append((test, exc_info)) + if self.showAll: + self.stream.write('ERROR') + elif self.dots: + self.stream.write('E') + + def _exc_info_to_string(self, err, test=None): + # 2.7 skip compat + from nose.plugins.skip import SkipTest + if isclass(err[0]) and issubclass(err[0], SkipTest): + return str(err[1]) + # 2.3/2.4 -- 2.4 passes test, 2.3 does not + try: + return _TextTestResult._exc_info_to_string(self, err, test) + except TypeError: + # 2.3: does not take test arg + return _TextTestResult._exc_info_to_string(self, err) + + +def ln(*arg, **kw): + from warnings import warn + warn("ln() has moved to nose.util from nose.result and will be removed " + "from nose.result in a future release. Please update your imports ", + DeprecationWarning) + return _ln(*arg, **kw) + + diff --git a/scripts/external_libs/nose-1.3.4/nose/selector.py b/scripts/external_libs/nose-1.3.4/nose/selector.py new file mode 100755 index 00000000..c4a006a8 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/selector.py @@ -0,0 +1,251 @@ +""" +Test Selection +-------------- + +Test selection is handled by a Selector. The test loader calls the +appropriate selector method for each object it encounters that it +thinks may be a test. +""" +import logging +import os +import unittest +from nose.config import Config +from nose.util import split_test_name, src, getfilename, getpackage, ispackage + +log = logging.getLogger(__name__) + +__all__ = ['Selector', 'defaultSelector', 'TestAddress'] + + +# for efficiency and easier mocking +op_join = os.path.join +op_basename = os.path.basename +op_exists = os.path.exists +op_splitext = os.path.splitext +op_isabs = os.path.isabs +op_abspath = os.path.abspath + + +class Selector(object): + """Core test selector. Examines test candidates and determines whether, + given the specified configuration, the test candidate should be selected + as a test. + """ + def __init__(self, config): + if config is None: + config = Config() + self.configure(config) + + def configure(self, config): + self.config = config + self.exclude = config.exclude + self.ignoreFiles = config.ignoreFiles + self.include = config.include + self.plugins = config.plugins + self.match = config.testMatch + + def matches(self, name): + """Does the name match my requirements? + + To match, a name must match config.testMatch OR config.include + and it must not match config.exclude + """ + return ((self.match.search(name) + or (self.include and + filter(None, + [inc.search(name) for inc in self.include]))) + and ((not self.exclude) + or not filter(None, + [exc.search(name) for exc in self.exclude]) + )) + + def wantClass(self, cls): + """Is the class a wanted test class? + + A class must be a unittest.TestCase subclass, or match test name + requirements. Classes that start with _ are always excluded. + """ + declared = getattr(cls, '__test__', None) + if declared is not None: + wanted = declared + else: + wanted = (not cls.__name__.startswith('_') + and (issubclass(cls, unittest.TestCase) + or self.matches(cls.__name__))) + + plug_wants = self.plugins.wantClass(cls) + if plug_wants is not None: + log.debug("Plugin setting selection of %s to %s", cls, plug_wants) + wanted = plug_wants + log.debug("wantClass %s? %s", cls, wanted) + return wanted + + def wantDirectory(self, dirname): + """Is the directory a wanted test directory? + + All package directories match, so long as they do not match exclude. + All other directories must match test requirements. + """ + tail = op_basename(dirname) + if ispackage(dirname): + wanted = (not self.exclude + or not filter(None, + [exc.search(tail) for exc in self.exclude] + )) + else: + wanted = (self.matches(tail) + or (self.config.srcDirs + and tail in self.config.srcDirs)) + plug_wants = self.plugins.wantDirectory(dirname) + if plug_wants is not None: + log.debug("Plugin setting selection of %s to %s", + dirname, plug_wants) + wanted = plug_wants + log.debug("wantDirectory %s? %s", dirname, wanted) + return wanted + + def wantFile(self, file): + """Is the file a wanted test file? + + The file must be a python source file and match testMatch or + include, and not match exclude. Files that match ignore are *never* + wanted, regardless of plugin, testMatch, include or exclude settings. + """ + # never, ever load files that match anything in ignore + # (.* _* and *setup*.py by default) + base = op_basename(file) + ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles + if ignore_this.search(base) ] + if ignore_matches: + log.debug('%s matches ignoreFiles pattern; skipped', + base) + return False + if not self.config.includeExe and os.access(file, os.X_OK): + log.info('%s is executable; skipped', file) + return False + dummy, ext = op_splitext(base) + pysrc = ext == '.py' + + wanted = pysrc and self.matches(base) + plug_wants = self.plugins.wantFile(file) + if plug_wants is not None: + log.debug("plugin setting want %s to %s", file, plug_wants) + wanted = plug_wants + log.debug("wantFile %s? %s", file, wanted) + return wanted + + def wantFunction(self, function): + """Is the function a test function? + """ + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + return False + declared = getattr(function, '__test__', None) + if declared is not None: + wanted = declared + else: + wanted = not funcname.startswith('_') and self.matches(funcname) + plug_wants = self.plugins.wantFunction(function) + if plug_wants is not None: + wanted = plug_wants + log.debug("wantFunction %s? %s", function, wanted) + return wanted + + def wantMethod(self, method): + """Is the method a test method? + """ + try: + method_name = method.__name__ + except AttributeError: + # not a method + return False + if method_name.startswith('_'): + # never collect 'private' methods + return False + declared = getattr(method, '__test__', None) + if declared is not None: + wanted = declared + else: + wanted = self.matches(method_name) + plug_wants = self.plugins.wantMethod(method) + if plug_wants is not None: + wanted = plug_wants + log.debug("wantMethod %s? %s", method, wanted) + return wanted + + def wantModule(self, module): + """Is the module a test module? + + The tail of the module name must match test requirements. One exception: + we always want __main__. + """ + declared = getattr(module, '__test__', None) + if declared is not None: + wanted = declared + else: + wanted = self.matches(module.__name__.split('.')[-1]) \ + or module.__name__ == '__main__' + plug_wants = self.plugins.wantModule(module) + if plug_wants is not None: + wanted = plug_wants + log.debug("wantModule %s? %s", module, wanted) + return wanted + +defaultSelector = Selector + + +class TestAddress(object): + """A test address represents a user's request to run a particular + test. The user may specify a filename or module (or neither), + and/or a callable (a class, function, or method). The naming + format for test addresses is: + + filename_or_module:callable + + Filenames that are not absolute will be made absolute relative to + the working dir. + + The filename or module part will be considered a module name if it + doesn't look like a file, that is, if it doesn't exist on the file + system and it doesn't contain any directory separators and it + doesn't end in .py. + + Callables may be a class name, function name, method name, or + class.method specification. + """ + def __init__(self, name, workingDir=None): + if workingDir is None: + workingDir = os.getcwd() + self.name = name + self.workingDir = workingDir + self.filename, self.module, self.call = split_test_name(name) + log.debug('Test name %s resolved to file %s, module %s, call %s', + name, self.filename, self.module, self.call) + if self.filename is None: + if self.module is not None: + self.filename = getfilename(self.module, self.workingDir) + if self.filename: + self.filename = src(self.filename) + if not op_isabs(self.filename): + self.filename = op_abspath(op_join(workingDir, + self.filename)) + if self.module is None: + self.module = getpackage(self.filename) + log.debug( + 'Final resolution of test name %s: file %s module %s call %s', + name, self.filename, self.module, self.call) + + def totuple(self): + return (self.filename, self.module, self.call) + + def __str__(self): + return self.name + + def __repr__(self): + return "%s: (%s, %s, %s)" % (self.name, self.filename, + self.module, self.call) diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py b/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py new file mode 100755 index 00000000..2ae28399 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py @@ -0,0 +1 @@ +pass diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py b/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py new file mode 100755 index 00000000..d2b284ab --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py @@ -0,0 +1,189 @@ +""" +Adds a sphinx directive that can be used to automatically document a plugin. + +this:: + + .. autoplugin :: nose.plugins.foo + :plugin: Pluggy + +produces:: + + .. automodule :: nose.plugins.foo + + Options + ------- + + .. cmdoption :: --foo=BAR, --fooble=BAR + + Do the foo thing to the new thing. + + Plugin + ------ + + .. autoclass :: nose.plugins.foo.Pluggy + :members: + + Source + ------ + + .. include :: path/to/nose/plugins/foo.py + :literal: + +""" +import os +try: + from docutils import nodes, utils + from docutils.statemachine import ViewList + from docutils.parsers.rst import directives +except ImportError: + pass # won't run anyway + +from nose.util import resolve_name +from nose.plugins.base import Plugin +from nose.plugins.manager import BuiltinPluginManager +from nose.config import Config +from nose.core import TestProgram +from inspect import isclass + + +def autoplugin_directive(dirname, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + mod_name = arguments[0] + mod = resolve_name(mod_name) + plug_name = options.get('plugin', None) + if plug_name: + obj = getattr(mod, plug_name) + else: + for entry in dir(mod): + obj = getattr(mod, entry) + if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin: + plug_name = '%s.%s' % (mod_name, entry) + break + + # mod docstring + rst = ViewList() + rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>') + rst.append('', '<autodoc>') + + # options + rst.append('Options', '<autodoc>') + rst.append('-------', '<autodoc>') + rst.append('', '<autodoc>') + + plug = obj() + opts = OptBucket() + plug.options(opts, {}) + for opt in opts: + rst.append(opt.options(), '<autodoc>') + rst.append(' \n', '<autodoc>') + rst.append(' ' + opt.help + '\n', '<autodoc>') + rst.append('\n', '<autodoc>') + + # plugin class + rst.append('Plugin', '<autodoc>') + rst.append('------', '<autodoc>') + rst.append('', '<autodoc>') + + rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>') + rst.append(' :members:\n', '<autodoc>') + rst.append(' :show-inheritance:\n', '<autodoc>') + rst.append('', '<autodoc>') + + # source + rst.append('Source', '<autodoc>') + rst.append('------', '<autodoc>') + rst.append( + '.. include :: %s\n' % utils.relative_path( + state_machine.document['source'], + os.path.abspath(mod.__file__.replace('.pyc', '.py'))), + '<autodoc>') + rst.append(' :literal:\n', '<autodoc>') + rst.append('', '<autodoc>') + + node = nodes.section() + node.document = state.document + surrounding_title_styles = state.memo.title_styles + surrounding_section_level = state.memo.section_level + state.memo.title_styles = [] + state.memo.section_level = 0 + state.nested_parse(rst, 0, node, match_titles=1) + state.memo.title_styles = surrounding_title_styles + state.memo.section_level = surrounding_section_level + + return node.children + + +def autohelp_directive(dirname, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + """produces rst from nose help""" + config = Config(parserClass=OptBucket, + plugins=BuiltinPluginManager()) + parser = config.getParser(TestProgram.usage()) + rst = ViewList() + for line in parser.format_help().split('\n'): + rst.append(line, '<autodoc>') + + rst.append('Options', '<autodoc>') + rst.append('-------', '<autodoc>') + rst.append('', '<autodoc>') + for opt in parser: + rst.append(opt.options(), '<autodoc>') + rst.append(' \n', '<autodoc>') + rst.append(' ' + opt.help + '\n', '<autodoc>') + rst.append('\n', '<autodoc>') + node = nodes.section() + node.document = state.document + surrounding_title_styles = state.memo.title_styles + surrounding_section_level = state.memo.section_level + state.memo.title_styles = [] + state.memo.section_level = 0 + state.nested_parse(rst, 0, node, match_titles=1) + state.memo.title_styles = surrounding_title_styles + state.memo.section_level = surrounding_section_level + + return node.children + + +class OptBucket(object): + def __init__(self, doc=None, prog='nosetests'): + self.opts = [] + self.doc = doc + self.prog = prog + + def __iter__(self): + return iter(self.opts) + + def format_help(self): + return self.doc.replace('%prog', self.prog).replace(':\n', '::\n') + + def add_option(self, *arg, **kw): + self.opts.append(Opt(*arg, **kw)) + + +class Opt(object): + def __init__(self, *arg, **kw): + self.opts = arg + self.action = kw.pop('action', None) + self.default = kw.pop('default', None) + self.metavar = kw.pop('metavar', None) + self.help = kw.pop('help', None) + + def options(self): + buf = [] + for optstring in self.opts: + desc = optstring + if self.action not in ('store_true', 'store_false'): + desc += '=%s' % self.meta(optstring) + buf.append(desc) + return '.. cmdoption :: ' + ', '.join(buf) + + def meta(self, optstring): + # FIXME optparser default metavar? + return self.metavar or 'DEFAULT' + + +def setup(app): + app.add_directive('autoplugin', + autoplugin_directive, 1, (1, 0, 1), + plugin=directives.unchanged) + app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1)) diff --git a/scripts/external_libs/nose-1.3.4/nose/suite.py b/scripts/external_libs/nose-1.3.4/nose/suite.py new file mode 100755 index 00000000..a831105e --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/suite.py @@ -0,0 +1,609 @@ +""" +Test Suites +----------- + +Provides a LazySuite, which is a suite whose test list is a generator +function, and ContextSuite,which can run fixtures (setup/teardown +functions or methods) for the context that contains its tests. + +""" +from __future__ import generators + +import logging +import sys +import unittest +from nose.case import Test +from nose.config import Config +from nose.proxy import ResultProxyFactory +from nose.util import isclass, resolve_name, try_run + +if sys.platform == 'cli': + if sys.version_info[:2] < (2, 6): + import clr + clr.AddReference("IronPython") + from IronPython.Runtime.Exceptions import StringException + else: + class StringException(Exception): + pass + +log = logging.getLogger(__name__) +#log.setLevel(logging.DEBUG) + +# Singleton for default value -- see ContextSuite.__init__ below +_def = object() + + +def _strclass(cls): + return "%s.%s" % (cls.__module__, cls.__name__) + +class MixedContextError(Exception): + """Error raised when a context suite sees tests from more than + one context. + """ + pass + + +class LazySuite(unittest.TestSuite): + """A suite that may use a generator as its list of tests + """ + def __init__(self, tests=()): + """Initialize the suite. tests may be an iterable or a generator + """ + super(LazySuite, self).__init__() + self._set_tests(tests) + + def __iter__(self): + return iter(self._tests) + + def __repr__(self): + return "<%s tests=generator (%s)>" % ( + _strclass(self.__class__), id(self)) + + def __hash__(self): + return object.__hash__(self) + + __str__ = __repr__ + + def addTest(self, test): + self._precache.append(test) + + # added to bypass run changes in 2.7's unittest + def run(self, result): + for test in self._tests: + if result.shouldStop: + break + test(result) + return result + + def __nonzero__(self): + log.debug("tests in %s?", id(self)) + if self._precache: + return True + if self.test_generator is None: + return False + try: + test = self.test_generator.next() + if test is not None: + self._precache.append(test) + return True + except StopIteration: + pass + return False + + def _get_tests(self): + log.debug("precache is %s", self._precache) + for test in self._precache: + yield test + if self.test_generator is None: + return + for test in self.test_generator: + yield test + + def _set_tests(self, tests): + self._precache = [] + is_suite = isinstance(tests, unittest.TestSuite) + if callable(tests) and not is_suite: + self.test_generator = tests() + elif is_suite: + # Suites need special treatment: they must be called like + # tests for their setup/teardown to run (if any) + self.addTests([tests]) + self.test_generator = None + else: + self.addTests(tests) + self.test_generator = None + + _tests = property(_get_tests, _set_tests, None, + "Access the tests in this suite. Access is through a " + "generator, so iteration may not be repeatable.") + + +class ContextSuite(LazySuite): + """A suite with context. + + A ContextSuite executes fixtures (setup and teardown functions or + methods) for the context containing its tests. + + The context may be explicitly passed. If it is not, a context (or + nested set of contexts) will be constructed by examining the tests + in the suite. + """ + failureException = unittest.TestCase.failureException + was_setup = False + was_torndown = False + classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll', + 'setUpClass', 'setUpAll') + classTeardown = ('teardown_class', 'teardown_all', 'teardownClass', + 'teardownAll', 'tearDownClass', 'tearDownAll') + moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup', + 'setUp') + moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule', + 'teardown', 'tearDown') + packageSetup = ('setup_package', 'setupPackage', 'setUpPackage') + packageTeardown = ('teardown_package', 'teardownPackage', + 'tearDownPackage') + + def __init__(self, tests=(), context=None, factory=None, + config=None, resultProxy=None, can_split=True): + log.debug("Context suite for %s (%s) (%s)", tests, context, id(self)) + self.context = context + self.factory = factory + if config is None: + config = Config() + self.config = config + self.resultProxy = resultProxy + self.has_run = False + self.can_split = can_split + self.error_context = None + super(ContextSuite, self).__init__(tests) + + def __repr__(self): + return "<%s context=%s>" % ( + _strclass(self.__class__), + getattr(self.context, '__name__', self.context)) + __str__ = __repr__ + + def id(self): + if self.error_context: + return '%s:%s' % (repr(self), self.error_context) + else: + return repr(self) + + def __hash__(self): + return object.__hash__(self) + + # 2.3 compat -- force 2.4 call sequence + def __call__(self, *arg, **kw): + return self.run(*arg, **kw) + + def exc_info(self): + """Hook for replacing error tuple output + """ + return sys.exc_info() + + def _exc_info(self): + """Bottleneck to fix up IronPython string exceptions + """ + e = self.exc_info() + if sys.platform == 'cli': + if isinstance(e[0], StringException): + # IronPython throws these StringExceptions, but + # traceback checks type(etype) == str. Make a real + # string here. + e = (str(e[0]), e[1], e[2]) + + return e + + def run(self, result): + """Run tests in suite inside of suite fixtures. + """ + # proxy the result for myself + log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests) + #import pdb + #pdb.set_trace() + if self.resultProxy: + result, orig = self.resultProxy(result, self), result + else: + result, orig = result, result + try: + self.setUp() + except KeyboardInterrupt: + raise + except: + self.error_context = 'setup' + result.addError(self, self._exc_info()) + return + try: + for test in self._tests: + if result.shouldStop: + log.debug("stopping") + break + # each nose.case.Test will create its own result proxy + # so the cases need the original result, to avoid proxy + # chains + test(orig) + finally: + self.has_run = True + try: + self.tearDown() + except KeyboardInterrupt: + raise + except: + self.error_context = 'teardown' + result.addError(self, self._exc_info()) + + def hasFixtures(self, ctx_callback=None): + context = self.context + if context is None: + return False + if self.implementsAnyFixture(context, ctx_callback=ctx_callback): + return True + # My context doesn't have any, but its ancestors might + factory = self.factory + if factory: + ancestors = factory.context.get(self, []) + for ancestor in ancestors: + if self.implementsAnyFixture( + ancestor, ctx_callback=ctx_callback): + return True + return False + + def implementsAnyFixture(self, context, ctx_callback): + if isclass(context): + names = self.classSetup + self.classTeardown + else: + names = self.moduleSetup + self.moduleTeardown + if hasattr(context, '__path__'): + names += self.packageSetup + self.packageTeardown + # If my context has any fixture attribute, I have fixtures + fixt = False + for m in names: + if hasattr(context, m): + fixt = True + break + if ctx_callback is None: + return fixt + return ctx_callback(context, fixt) + + def setUp(self): + log.debug("suite %s setUp called, tests: %s", id(self), self._tests) + if not self: + # I have no tests + log.debug("suite %s has no tests", id(self)) + return + if self.was_setup: + log.debug("suite %s already set up", id(self)) + return + context = self.context + if context is None: + return + # before running my own context's setup, I need to + # ask the factory if my context's contexts' setups have been run + factory = self.factory + if factory: + # get a copy, since we'll be destroying it as we go + ancestors = factory.context.get(self, [])[:] + while ancestors: + ancestor = ancestors.pop() + log.debug("ancestor %s may need setup", ancestor) + if ancestor in factory.was_setup: + continue + log.debug("ancestor %s does need setup", ancestor) + self.setupContext(ancestor) + if not context in factory.was_setup: + self.setupContext(context) + else: + self.setupContext(context) + self.was_setup = True + log.debug("completed suite setup") + + def setupContext(self, context): + self.config.plugins.startContext(context) + log.debug("%s setup context %s", self, context) + if self.factory: + if context in self.factory.was_setup: + return + # note that I ran the setup for this context, so that I'll run + # the teardown in my teardown + self.factory.was_setup[context] = self + if isclass(context): + names = self.classSetup + else: + names = self.moduleSetup + if hasattr(context, '__path__'): + names = self.packageSetup + names + try_run(context, names) + + def shortDescription(self): + if self.context is None: + return "test suite" + return "test suite for %s" % self.context + + def tearDown(self): + log.debug('context teardown') + if not self.was_setup or self.was_torndown: + log.debug( + "No reason to teardown (was_setup? %s was_torndown? %s)" + % (self.was_setup, self.was_torndown)) + return + self.was_torndown = True + context = self.context + if context is None: + log.debug("No context to tear down") + return + + # for each ancestor... if the ancestor was setup + # and I did the setup, I can do teardown + factory = self.factory + if factory: + ancestors = factory.context.get(self, []) + [context] + for ancestor in ancestors: + log.debug('ancestor %s may need teardown', ancestor) + if not ancestor in factory.was_setup: + log.debug('ancestor %s was not setup', ancestor) + continue + if ancestor in factory.was_torndown: + log.debug('ancestor %s already torn down', ancestor) + continue + setup = factory.was_setup[ancestor] + log.debug("%s setup ancestor %s", setup, ancestor) + if setup is self: + self.teardownContext(ancestor) + else: + self.teardownContext(context) + + def teardownContext(self, context): + log.debug("%s teardown context %s", self, context) + if self.factory: + if context in self.factory.was_torndown: + return + self.factory.was_torndown[context] = self + if isclass(context): + names = self.classTeardown + else: + names = self.moduleTeardown + if hasattr(context, '__path__'): + names = self.packageTeardown + names + try_run(context, names) + self.config.plugins.stopContext(context) + + # FIXME the wrapping has to move to the factory? + def _get_wrapped_tests(self): + for test in self._get_tests(): + if isinstance(test, Test) or isinstance(test, unittest.TestSuite): + yield test + else: + yield Test(test, + config=self.config, + resultProxy=self.resultProxy) + + _tests = property(_get_wrapped_tests, LazySuite._set_tests, None, + "Access the tests in this suite. Tests are returned " + "inside of a context wrapper.") + + +class ContextSuiteFactory(object): + """Factory for ContextSuites. Called with a collection of tests, + the factory decides on a hierarchy of contexts by introspecting + the collection or the tests themselves to find the objects + containing the test objects. It always returns one suite, but that + suite may consist of a hierarchy of nested suites. + """ + suiteClass = ContextSuite + def __init__(self, config=None, suiteClass=None, resultProxy=_def): + if config is None: + config = Config() + self.config = config + if suiteClass is not None: + self.suiteClass = suiteClass + # Using a singleton to represent default instead of None allows + # passing resultProxy=None to turn proxying off. + if resultProxy is _def: + resultProxy = ResultProxyFactory(config=config) + self.resultProxy = resultProxy + self.suites = {} + self.context = {} + self.was_setup = {} + self.was_torndown = {} + + def __call__(self, tests, **kw): + """Return ``ContextSuite`` for tests. ``tests`` may either + be a callable (in which case the resulting ContextSuite will + have no parent context and be evaluated lazily) or an + iterable. In that case the tests will wrapped in + nose.case.Test, be examined and the context of each found and a + suite of suites returned, organized into a stack with the + outermost suites belonging to the outermost contexts. + """ + log.debug("Create suite for %s", tests) + context = kw.pop('context', getattr(tests, 'context', None)) + log.debug("tests %s context %s", tests, context) + if context is None: + tests = self.wrapTests(tests) + try: + context = self.findContext(tests) + except MixedContextError: + return self.makeSuite(self.mixedSuites(tests), None, **kw) + return self.makeSuite(tests, context, **kw) + + def ancestry(self, context): + """Return the ancestry of the context (that is, all of the + packages and modules containing the context), in order of + descent with the outermost ancestor last. + This method is a generator. + """ + log.debug("get ancestry %s", context) + if context is None: + return + # Methods include reference to module they are defined in, we + # don't want that, instead want the module the class is in now + # (classes are re-ancestored elsewhere). + if hasattr(context, 'im_class'): + context = context.im_class + elif hasattr(context, '__self__'): + context = context.__self__.__class__ + if hasattr(context, '__module__'): + ancestors = context.__module__.split('.') + elif hasattr(context, '__name__'): + ancestors = context.__name__.split('.')[:-1] + else: + raise TypeError("%s has no ancestors?" % context) + while ancestors: + log.debug(" %s ancestors %s", context, ancestors) + yield resolve_name('.'.join(ancestors)) + ancestors.pop() + + def findContext(self, tests): + if callable(tests) or isinstance(tests, unittest.TestSuite): + return None + context = None + for test in tests: + # Don't look at suites for contexts, only tests + ctx = getattr(test, 'context', None) + if ctx is None: + continue + if context is None: + context = ctx + elif context != ctx: + raise MixedContextError( + "Tests with different contexts in same suite! %s != %s" + % (context, ctx)) + return context + + def makeSuite(self, tests, context, **kw): + suite = self.suiteClass( + tests, context=context, config=self.config, factory=self, + resultProxy=self.resultProxy, **kw) + if context is not None: + self.suites.setdefault(context, []).append(suite) + self.context.setdefault(suite, []).append(context) + log.debug("suite %s has context %s", suite, + getattr(context, '__name__', None)) + for ancestor in self.ancestry(context): + self.suites.setdefault(ancestor, []).append(suite) + self.context[suite].append(ancestor) + log.debug("suite %s has ancestor %s", suite, ancestor.__name__) + return suite + + def mixedSuites(self, tests): + """The complex case where there are tests that don't all share + the same context. Groups tests into suites with common ancestors, + according to the following (essentially tail-recursive) procedure: + + Starting with the context of the first test, if it is not + None, look for tests in the remaining tests that share that + ancestor. If any are found, group into a suite with that + ancestor as the context, and replace the current suite with + that suite. Continue this process for each ancestor of the + first test, until all ancestors have been processed. At this + point if any tests remain, recurse with those tests as the + input, returning a list of the common suite (which may be the + suite or test we started with, if no common tests were found) + plus the results of recursion. + """ + if not tests: + return [] + head = tests.pop(0) + if not tests: + return [head] # short circuit when none are left to combine + suite = head # the common ancestry suite, so far + tail = tests[:] + context = getattr(head, 'context', None) + if context is not None: + ancestors = [context] + [a for a in self.ancestry(context)] + for ancestor in ancestors: + common = [suite] # tests with ancestor in common, so far + remain = [] # tests that remain to be processed + for test in tail: + found_common = False + test_ctx = getattr(test, 'context', None) + if test_ctx is None: + remain.append(test) + continue + if test_ctx is ancestor: + common.append(test) + continue + for test_ancestor in self.ancestry(test_ctx): + if test_ancestor is ancestor: + common.append(test) + found_common = True + break + if not found_common: + remain.append(test) + if common: + suite = self.makeSuite(common, ancestor) + tail = self.mixedSuites(remain) + return [suite] + tail + + def wrapTests(self, tests): + log.debug("wrap %s", tests) + if callable(tests) or isinstance(tests, unittest.TestSuite): + log.debug("I won't wrap") + return tests + wrapped = [] + for test in tests: + log.debug("wrapping %s", test) + if isinstance(test, Test) or isinstance(test, unittest.TestSuite): + wrapped.append(test) + elif isinstance(test, ContextList): + wrapped.append(self.makeSuite(test, context=test.context)) + else: + wrapped.append( + Test(test, config=self.config, resultProxy=self.resultProxy) + ) + return wrapped + + +class ContextList(object): + """Not quite a suite -- a group of tests in a context. This is used + to hint the ContextSuiteFactory about what context the tests + belong to, in cases where it may be ambiguous or missing. + """ + def __init__(self, tests, context=None): + self.tests = tests + self.context = context + + def __iter__(self): + return iter(self.tests) + + +class FinalizingSuiteWrapper(unittest.TestSuite): + """Wraps suite and calls final function after suite has + executed. Used to call final functions in cases (like running in + the standard test runner) where test running is not under nose's + control. + """ + def __init__(self, suite, finalize): + super(FinalizingSuiteWrapper, self).__init__() + self.suite = suite + self.finalize = finalize + + def __call__(self, *arg, **kw): + return self.run(*arg, **kw) + + # 2.7 compat + def __iter__(self): + return iter(self.suite) + + def run(self, *arg, **kw): + try: + return self.suite(*arg, **kw) + finally: + self.finalize(*arg, **kw) + + +# backwards compat -- sort of +class TestDir: + def __init__(*arg, **kw): + raise NotImplementedError( + "TestDir is not usable with nose 0.10. The class is present " + "in nose.suite for backwards compatibility purposes but it " + "may not be used.") + + +class TestModule: + def __init__(*arg, **kw): + raise NotImplementedError( + "TestModule is not usable with nose 0.10. The class is present " + "in nose.suite for backwards compatibility purposes but it " + "may not be used.") diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py b/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py new file mode 100755 index 00000000..74dab16a --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py @@ -0,0 +1,15 @@ +""" +Tools for testing +----------------- + +nose.tools provides a few convenience functions to make writing tests +easier. You don't have to use them; nothing in the rest of nose depends +on any of these methods. + +""" +from nose.tools.nontrivial import * +from nose.tools.nontrivial import __all__ as nontrivial_all +from nose.tools.trivial import * +from nose.tools.trivial import __all__ as trivial_all + +__all__ = trivial_all + nontrivial_all diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py b/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py new file mode 100755 index 00000000..28397324 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py @@ -0,0 +1,151 @@ +"""Tools not exempt from being descended into in tracebacks""" + +import time + + +__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup', + 'TimeExpired', 'istest', 'nottest'] + + +class TimeExpired(AssertionError): + pass + + +def make_decorator(func): + """ + Wraps a test decorator so as to properly replicate metadata + of the decorated function, including nose's additional stuff + (namely, setup and teardown). + """ + def decorate(newfunc): + if hasattr(func, 'compat_func_name'): + name = func.compat_func_name + else: + name = func.__name__ + newfunc.__dict__ = func.__dict__ + newfunc.__doc__ = func.__doc__ + newfunc.__module__ = func.__module__ + if not hasattr(newfunc, 'compat_co_firstlineno'): + newfunc.compat_co_firstlineno = func.func_code.co_firstlineno + try: + newfunc.__name__ = name + except TypeError: + # can't set func name in 2.3 + newfunc.compat_func_name = name + return newfunc + return decorate + + +def raises(*exceptions): + """Test must raise one of expected exceptions to pass. + + Example use:: + + @raises(TypeError, ValueError) + def test_raises_type_error(): + raise TypeError("This test passes") + + @raises(Exception) + def test_that_fails_by_passing(): + pass + + If you want to test many assertions about exceptions in a single test, + you may want to use `assert_raises` instead. + """ + valid = ' or '.join([e.__name__ for e in exceptions]) + def decorate(func): + name = func.__name__ + def newfunc(*arg, **kw): + try: + func(*arg, **kw) + except exceptions: + pass + except: + raise + else: + message = "%s() did not raise %s" % (name, valid) + raise AssertionError(message) + newfunc = make_decorator(func)(newfunc) + return newfunc + return decorate + + +def set_trace(): + """Call pdb.set_trace in the calling frame, first restoring + sys.stdout to the real output stream. Note that sys.stdout is NOT + reset to whatever it was before the call once pdb is done! + """ + import pdb + import sys + stdout = sys.stdout + sys.stdout = sys.__stdout__ + pdb.Pdb().set_trace(sys._getframe().f_back) + + +def timed(limit): + """Test must finish within specified time limit to pass. + + Example use:: + + @timed(.1) + def test_that_fails(): + time.sleep(.2) + """ + def decorate(func): + def newfunc(*arg, **kw): + start = time.time() + result = func(*arg, **kw) + end = time.time() + if end - start > limit: + raise TimeExpired("Time limit (%s) exceeded" % limit) + return result + newfunc = make_decorator(func)(newfunc) + return newfunc + return decorate + + +def with_setup(setup=None, teardown=None): + """Decorator to add setup and/or teardown methods to a test function:: + + @with_setup(setup, teardown) + def test_something(): + " ... " + + Note that `with_setup` is useful *only* for test functions, not for test + methods or inside of TestCase subclasses. + """ + def decorate(func, setup=setup, teardown=teardown): + if setup: + if hasattr(func, 'setup'): + _old_s = func.setup + def _s(): + setup() + _old_s() + func.setup = _s + else: + func.setup = setup + if teardown: + if hasattr(func, 'teardown'): + _old_t = func.teardown + def _t(): + _old_t() + teardown() + func.teardown = _t + else: + func.teardown = teardown + return func + return decorate + + +def istest(func): + """Decorator to mark a function or method as a test + """ + func.__test__ = True + return func + + +def nottest(func): + """Decorator to mark a function or method as *not* a test + """ + func.__test__ = False + return func diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py b/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py new file mode 100755 index 00000000..cf83efed --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py @@ -0,0 +1,54 @@ +"""Tools so trivial that tracebacks should not descend into them + +We define the ``__unittest`` symbol in their module namespace so unittest will +skip them when printing tracebacks, just as it does for their corresponding +methods in ``unittest`` proper. + +""" +import re +import unittest + + +__all__ = ['ok_', 'eq_'] + +# Use the same flag as unittest itself to prevent descent into these functions: +__unittest = 1 + + +def ok_(expr, msg=None): + """Shorthand for assert. Saves 3 whole characters! + """ + if not expr: + raise AssertionError(msg) + + +def eq_(a, b, msg=None): + """Shorthand for 'assert a == b, "%r != %r" % (a, b) + """ + if not a == b: + raise AssertionError(msg or "%r != %r" % (a, b)) + + +# +# Expose assert* from unittest.TestCase +# - give them pep8 style names +# +caps = re.compile('([A-Z])') + +def pep8(name): + return caps.sub(lambda m: '_' + m.groups()[0].lower(), name) + +class Dummy(unittest.TestCase): + def nop(): + pass +_t = Dummy('nop') + +for at in [ at for at in dir(_t) + if at.startswith('assert') and not '_' in at ]: + pepd = pep8(at) + vars()[pepd] = getattr(_t, at) + __all__.append(pepd) + +del Dummy +del _t +del pep8 diff --git a/scripts/external_libs/nose-1.3.4/nose/twistedtools.py b/scripts/external_libs/nose-1.3.4/nose/twistedtools.py new file mode 100755 index 00000000..8d9c6ffe --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/twistedtools.py @@ -0,0 +1,173 @@ +""" +Twisted integration +------------------- + +This module provides a very simple way to integrate your tests with the +Twisted_ event loop. + +You must import this module *before* importing anything from Twisted itself! + +Example:: + + from nose.twistedtools import reactor, deferred + + @deferred() + def test_resolve(): + return reactor.resolve("www.python.org") + +Or, more realistically:: + + @deferred(timeout=5.0) + def test_resolve(): + d = reactor.resolve("www.python.org") + def check_ip(ip): + assert ip == "67.15.36.43" + d.addCallback(check_ip) + return d + +.. _Twisted: http://twistedmatrix.com/trac/ +""" + +import sys +from Queue import Queue, Empty +from nose.tools import make_decorator, TimeExpired + +__all__ = [ + 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired', + 'stop_reactor' +] + +_twisted_thread = None + +def threaded_reactor(): + """ + Start the Twisted reactor in a separate thread, if not already done. + Returns the reactor. + The thread will automatically be destroyed when all the tests are done. + """ + global _twisted_thread + try: + from twisted.internet import reactor + except ImportError: + return None, None + if not _twisted_thread: + from twisted.python import threadable + from threading import Thread + _twisted_thread = Thread(target=lambda: reactor.run( \ + installSignalHandlers=False)) + _twisted_thread.setDaemon(True) + _twisted_thread.start() + return reactor, _twisted_thread + +# Export global reactor variable, as Twisted does +reactor, reactor_thread = threaded_reactor() + + +def stop_reactor(): + """Stop the reactor and join the reactor thread until it stops. + Call this function in teardown at the module or package level to + reset the twisted system after your tests. You *must* do this if + you mix tests using these tools and tests using twisted.trial. + """ + global _twisted_thread + + def stop_reactor(): + '''Helper for calling stop from withing the thread.''' + reactor.stop() + + reactor.callFromThread(stop_reactor) + reactor_thread.join() + for p in reactor.getDelayedCalls(): + if p.active(): + p.cancel() + _twisted_thread = None + + +def deferred(timeout=None): + """ + By wrapping a test function with this decorator, you can return a + twisted Deferred and the test will wait for the deferred to be triggered. + The whole test function will run inside the Twisted event loop. + + The optional timeout parameter specifies the maximum duration of the test. + The difference with timed() is that timed() will still wait for the test + to end, while deferred() will stop the test when its timeout has expired. + The latter is more desireable when dealing with network tests, because + the result may actually never arrive. + + If the callback is triggered, the test has passed. + If the errback is triggered or the timeout expires, the test has failed. + + Example:: + + @deferred(timeout=5.0) + def test_resolve(): + return reactor.resolve("www.python.org") + + Attention! If you combine this decorator with other decorators (like + "raises"), deferred() must be called *first*! + + In other words, this is good:: + + @raises(DNSLookupError) + @deferred() + def test_error(): + return reactor.resolve("xxxjhjhj.biz") + + and this is bad:: + + @deferred() + @raises(DNSLookupError) + def test_error(): + return reactor.resolve("xxxjhjhj.biz") + """ + reactor, reactor_thread = threaded_reactor() + if reactor is None: + raise ImportError("twisted is not available or could not be imported") + # Check for common syntax mistake + # (otherwise, tests can be silently ignored + # if one writes "@deferred" instead of "@deferred()") + try: + timeout is None or timeout + 0 + except TypeError: + raise TypeError("'timeout' argument must be a number or None") + + def decorate(func): + def wrapper(*args, **kargs): + q = Queue() + def callback(value): + q.put(None) + def errback(failure): + # Retrieve and save full exception info + try: + failure.raiseException() + except: + q.put(sys.exc_info()) + def g(): + try: + d = func(*args, **kargs) + try: + d.addCallbacks(callback, errback) + # Check for a common mistake and display a nice error + # message + except AttributeError: + raise TypeError("you must return a twisted Deferred " + "from your test case!") + # Catch exceptions raised in the test body (from the + # Twisted thread) + except: + q.put(sys.exc_info()) + reactor.callFromThread(g) + try: + error = q.get(timeout=timeout) + except Empty: + raise TimeExpired("timeout expired before end of test (%f s.)" + % timeout) + # Re-raise all exceptions + if error is not None: + exc_type, exc_value, tb = error + raise exc_type, exc_value, tb + wrapper = make_decorator(func)(wrapper) + return wrapper + return decorate + diff --git a/scripts/external_libs/nose-1.3.4/nose/usage.txt b/scripts/external_libs/nose-1.3.4/nose/usage.txt new file mode 100755 index 00000000..bc96894a --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/usage.txt @@ -0,0 +1,115 @@ +nose collects tests automatically from python source files, +directories and packages found in its working directory (which +defaults to the current working directory). Any python source file, +directory or package that matches the testMatch regular expression +(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or +source for collection of tests). In addition, all other packages +found in the working directory will be examined for python source files +or directories that match testMatch. Package discovery descends all +the way down the tree, so package.tests and package.sub.tests and +package.sub.sub2.tests will all be collected. + +Within a test directory or package, any python source file matching +testMatch will be examined for test cases. Within a test module, +functions and classes whose names match testMatch and TestCase +subclasses with any name will be loaded and executed as tests. Tests +may use the assert keyword or raise AssertionErrors to indicate test +failure. TestCase subclasses may do the same or use the various +TestCase methods available. + +**It is important to note that the default behavior of nose is to +not include tests from files which are executable.** To include +tests from such files, remove their executable bit or use +the --exe flag (see 'Options' section below). + +Selecting Tests +--------------- + +To specify which tests to run, pass test names on the command line: + + %prog only_test_this.py + +Test names specified may be file or module names, and may optionally +indicate the test case to run by separating the module or file name +from the test case name with a colon. Filenames may be relative or +absolute. Examples: + + %prog test.module + %prog another.test:TestCase.test_method + %prog a.test:TestCase + %prog /path/to/test/file.py:test_function + +You may also change the working directory where nose looks for tests +by using the -w switch: + + %prog -w /path/to/tests + +Note, however, that support for multiple -w arguments is now deprecated +and will be removed in a future release. As of nose 0.10, you can get +the same behavior by specifying the target directories *without* +the -w switch: + + %prog /path/to/tests /another/path/to/tests + +Further customization of test selection and loading is possible +through the use of plugins. + +Test result output is identical to that of unittest, except for +the additional features (error classes, and plugin-supplied +features such as output capture and assert introspection) detailed +in the options below. + +Configuration +------------- + +In addition to passing command-line options, you may also put +configuration options in your project's *setup.cfg* file, or a .noserc +or nose.cfg file in your home directory. In any of these standard +ini-style config files, you put your nosetests configuration in a +``[nosetests]`` section. Options are the same as on the command line, +with the -- prefix removed. For options that are simple switches, you +must supply a value: + + [nosetests] + verbosity=3 + with-doctest=1 + +All configuration files that are found will be loaded and their +options combined. You can override the standard config file loading +with the ``-c`` option. + +Using Plugins +------------- + +There are numerous nose plugins available via easy_install and +elsewhere. To use a plugin, just install it. The plugin will add +command line options to nosetests. To verify that the plugin is installed, +run: + + nosetests --plugins + +You can add -v or -vv to that command to show more information +about each plugin. + +If you are running nose.main() or nose.run() from a script, you +can specify a list of plugins to use by passing a list of plugins +with the plugins keyword argument. + +0.9 plugins +----------- + +nose 1.0 can use SOME plugins that were written for nose 0.9. The +default plugin manager inserts a compatibility wrapper around 0.9 +plugins that adapts the changed plugin api calls. However, plugins +that access nose internals are likely to fail, especially if they +attempt to access test case or test suite classes. For example, +plugins that try to determine if a test passed to startTest is an +individual test or a suite will fail, partly because suites are no +longer passed to startTest and partly because it's likely that the +plugin is trying to find out if the test is an instance of a class +that no longer exists. + +0.10 and 0.11 plugins +--------------------- + +All plugins written for nose 0.10 and 0.11 should work with nose 1.0. diff --git a/scripts/external_libs/nose-1.3.4/nose/util.py b/scripts/external_libs/nose-1.3.4/nose/util.py new file mode 100755 index 00000000..e6f735e0 --- /dev/null +++ b/scripts/external_libs/nose-1.3.4/nose/util.py @@ -0,0 +1,660 @@ +"""Utility functions and classes used by nose internally. +""" +import inspect +import itertools +import logging +import os +import re +import sys +import types +import unittest +from nose.pyversion import ClassType, TypeType, isgenerator, ismethod + + +log = logging.getLogger('nose') + +ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$') +class_types = (ClassType, TypeType) +skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)" + +try: + set() + set = set # make from nose.util import set happy +except NameError: + try: + from sets import Set as set + except ImportError: + pass + + +def ls_tree(dir_path="", + skip_pattern=skip_pattern, + indent="|-- ", branch_indent="| ", + last_indent="`-- ", last_branch_indent=" "): + # TODO: empty directories look like non-directory files + return "\n".join(_ls_tree_lines(dir_path, skip_pattern, + indent, branch_indent, + last_indent, last_branch_indent)) + + +def _ls_tree_lines(dir_path, skip_pattern, + indent, branch_indent, last_indent, last_branch_indent): + if dir_path == "": + dir_path = os.getcwd() + + lines = [] + + names = os.listdir(dir_path) + names.sort() + dirs, nondirs = [], [] + for name in names: + if re.match(skip_pattern, name): + continue + if os.path.isdir(os.path.join(dir_path, name)): + dirs.append(name) + else: + nondirs.append(name) + + # list non-directories first + entries = list(itertools.chain([(name, False) for name in nondirs], + [(name, True) for name in dirs])) + def ls_entry(name, is_dir, ind, branch_ind): + if not is_dir: + yield ind + name + else: + path = os.path.join(dir_path, name) + if not os.path.islink(path): + yield ind + name + subtree = _ls_tree_lines(path, skip_pattern, + indent, branch_indent, + last_indent, last_branch_indent) + for x in subtree: + yield branch_ind + x + for name, is_dir in entries[:-1]: + for line in ls_entry(name, is_dir, indent, branch_indent): + yield line + if entries: + name, is_dir = entries[-1] + for line in ls_entry(name, is_dir, last_indent, last_branch_indent): + yield line + + +def absdir(path): + """Return absolute, normalized path to directory, if it exists; None + otherwise. + """ + if not os.path.isabs(path): + path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), + path))) + if path is None or not os.path.isdir(path): + return None + return path + + +def absfile(path, where=None): + """Return absolute, normalized path to file (optionally in directory + where), or None if the file can't be found either in where or the current + working directory. + """ + orig = path + if where is None: + where = os.getcwd() + if isinstance(where, list) or isinstance(where, tuple): + for maybe_path in where: + maybe_abs = absfile(path, maybe_path) + if maybe_abs is not None: + return maybe_abs + return None + if not os.path.isabs(path): + path = os.path.normpath(os.path.abspath(os.path.join(where, path))) + if path is None or not os.path.exists(path): + if where != os.getcwd(): + # try the cwd instead + path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), + orig))) + if path is None or not os.path.exists(path): + return None + if os.path.isdir(path): + # might want an __init__.py from pacakge + init = os.path.join(path,'__init__.py') + if os.path.isfile(init): + return init + elif os.path.isfile(path): + return path + return None + + +def anyp(predicate, iterable): + for item in iterable: + if predicate(item): + return True + return False + + +def file_like(name): + """A name is file-like if it is a path that exists, or it has a + directory part, or it ends in .py, or it isn't a legal python + identifier. + """ + return (os.path.exists(name) + or os.path.dirname(name) + or name.endswith('.py') + or not ident_re.match(os.path.splitext(name)[0])) + + +def func_lineno(func): + """Get the line number of a function. First looks for + compat_co_firstlineno, then func_code.co_first_lineno. + """ + try: + return func.compat_co_firstlineno + except AttributeError: + try: + return func.func_code.co_firstlineno + except AttributeError: + return -1 + + +def isclass(obj): + """Is obj a class? Inspect's isclass is too liberal and returns True + for objects that can't be subclasses of anything. + """ + obj_type = type(obj) + return obj_type in class_types or issubclass(obj_type, type) + + +# backwards compat (issue #64) +is_generator = isgenerator + + +def ispackage(path): + """ + Is this path a package directory? + + >>> ispackage('nose') + True + >>> ispackage('unit_tests') + False + >>> ispackage('nose/plugins') + True + >>> ispackage('nose/loader.py') + False + """ + if os.path.isdir(path): + # at least the end of the path must be a legal python identifier + # and __init__.py[co] must exist + end = os.path.basename(path) + if ident_re.match(end): + for init in ('__init__.py', '__init__.pyc', '__init__.pyo'): + if os.path.isfile(os.path.join(path, init)): + return True + if sys.platform.startswith('java') and \ + os.path.isfile(os.path.join(path, '__init__$py.class')): + return True + return False + + +def isproperty(obj): + """ + Is this a property? + + >>> class Foo: + ... def got(self): + ... return 2 + ... def get(self): + ... return 1 + ... get = property(get) + + >>> isproperty(Foo.got) + False + >>> isproperty(Foo.get) + True + """ + return type(obj) == property + + +def getfilename(package, relativeTo=None): + """Find the python source file for a package, relative to a + particular directory (defaults to current working directory if not + given). + """ + if relativeTo is None: + relativeTo = os.getcwd() + path = os.path.join(relativeTo, os.sep.join(package.split('.'))) + suffixes = ('/__init__.py', '.py') + for suffix in suffixes: + filename = path + suffix + if os.path.exists(filename): + return filename + return None + + +def getpackage(filename): + """ + Find the full dotted package name for a given python source file + name. Returns None if the file is not a python source file. + + >>> getpackage('foo.py') + 'foo' + >>> getpackage('biff/baf.py') + 'baf' + >>> getpackage('nose/util.py') + 'nose.util' + + Works for directories too. + + >>> getpackage('nose') + 'nose' + >>> getpackage('nose/plugins') + 'nose.plugins' + + And __init__ files stuck onto directories + + >>> getpackage('nose/plugins/__init__.py') + 'nose.plugins' + + Absolute paths also work. + + >>> path = os.path.abspath(os.path.join('nose', 'plugins')) + >>> getpackage(path) + 'nose.plugins' + """ + src_file = src(filename) + if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file): + return None + base, ext = os.path.splitext(os.path.basename(src_file)) + if base == '__init__': + mod_parts = [] + else: + mod_parts = [base] + path, part = os.path.split(os.path.split(src_file)[0]) + while part: + if ispackage(os.path.join(path, part)): + mod_parts.append(part) + else: + break + path, part = os.path.split(path) + mod_parts.reverse() + return '.'.join(mod_parts) + + +def ln(label): + """Draw a 70-char-wide divider, with label in the middle. + + >>> ln('hello there') + '---------------------------- hello there -----------------------------' + """ + label_len = len(label) + 2 + chunk = (70 - label_len) // 2 + out = '%s %s %s' % ('-' * chunk, label, '-' * chunk) + pad = 70 - len(out) + if pad > 0: + out = out + ('-' * pad) + return out + + +def resolve_name(name, module=None): + """Resolve a dotted name to a module and its parts. This is stolen + wholesale from unittest.TestLoader.loadTestByName. + + >>> resolve_name('nose.util') #doctest: +ELLIPSIS + <module 'nose.util' from...> + >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS + <function resolve_name at...> + """ + parts = name.split('.') + parts_copy = parts[:] + if module is None: + while parts_copy: + try: + log.debug("__import__ %s", name) + module = __import__('.'.join(parts_copy)) + break + except ImportError: + del parts_copy[-1] + if not parts_copy: + raise + parts = parts[1:] + obj = module + log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module) + for part in parts: + obj = getattr(obj, part) + return obj + + +def split_test_name(test): + """Split a test name into a 3-tuple containing file, module, and callable + names, any of which (but not all) may be blank. + + Test names are in the form: + + file_or_module:callable + + Either side of the : may be dotted. To change the splitting behavior, you + can alter nose.util.split_test_re. + """ + norm = os.path.normpath + file_or_mod = test + fn = None + if not ':' in test: + # only a file or mod part + if file_like(test): + return (norm(test), None, None) + else: + return (None, test, None) + + # could be path|mod:callable, or a : in the file path someplace + head, tail = os.path.split(test) + if not head: + # this is a case like 'foo:bar' -- generally a module + # name followed by a callable, but also may be a windows + # drive letter followed by a path + try: + file_or_mod, fn = test.split(':') + if file_like(fn): + # must be a funny path + file_or_mod, fn = test, None + except ValueError: + # more than one : in the test + # this is a case like c:\some\path.py:a_test + parts = test.split(':') + if len(parts[0]) == 1: + file_or_mod, fn = ':'.join(parts[:-1]), parts[-1] + else: + # nonsense like foo:bar:baz + raise ValueError("Test name '%s' could not be parsed. Please " + "format test names as path:callable or " + "module:callable." % (test,)) + elif not tail: + # this is a case like 'foo:bar/' + # : must be part of the file path, so ignore it + file_or_mod = test + else: + if ':' in tail: + file_part, fn = tail.split(':') + else: + file_part = tail + file_or_mod = os.sep.join([head, file_part]) + if file_or_mod: + if file_like(file_or_mod): + return (norm(file_or_mod), None, fn) + else: + return (None, file_or_mod, fn) + else: + return (None, None, fn) +split_test_name.__test__ = False # do not collect + + +def test_address(test): + """Find the test address for a test, which may be a module, filename, + class, method or function. + """ + if hasattr(test, "address"): + return test.address() + # type-based polymorphism sucks in general, but I believe is + # appropriate here + t = type(test) + file = module = call = None + if t == types.ModuleType: + file = getattr(test, '__file__', None) + module = getattr(test, '__name__', None) + return (src(file), module, call) + if t == types.FunctionType or issubclass(t, type) or t == types.ClassType: + module = getattr(test, '__module__', None) + if module is not None: + m = sys.modules[module] + file = getattr(m, '__file__', None) + if file is not None: + file = os.path.abspath(file) + call = getattr(test, '__name__', None) + return (src(file), module, call) + if t == types.MethodType: + cls_adr = test_address(test.im_class) + return (src(cls_adr[0]), cls_adr[1], + "%s.%s" % (cls_adr[2], test.__name__)) + # handle unittest.TestCase instances + if isinstance(test, unittest.TestCase): + if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7 + or hasattr(test, '_testFunc')): # 2.7 + # unittest FunctionTestCase + try: + return test_address(test._FunctionTestCase__testFunc) + except AttributeError: + return test_address(test._testFunc) + # regular unittest.TestCase + cls_adr = test_address(test.__class__) + # 2.5 compat: __testMethodName changed to _testMethodName + try: + method_name = test._TestCase__testMethodName + except AttributeError: + method_name = test._testMethodName + return (src(cls_adr[0]), cls_adr[1], + "%s.%s" % (cls_adr[2], method_name)) + if (hasattr(test, '__class__') and + test.__class__.__module__ not in ('__builtin__', 'builtins')): + return test_address(test.__class__) + raise TypeError("I don't know what %s is (%s)" % (test, t)) +test_address.__test__ = False # do not collect + + +def try_run(obj, names): + """Given a list of possible method names, try to run them with the + provided object. Keep going until something works. Used to run + setup/teardown methods for module, package, and function tests. + """ + for name in names: + func = getattr(obj, name, None) + if func is not None: + if type(obj) == types.ModuleType: + # py.test compatibility + if isinstance(func, types.FunctionType): + args, varargs, varkw, defaults = \ + inspect.getargspec(func) + else: + # Not a function. If it's callable, call it anyway + if hasattr(func, '__call__') and not inspect.ismethod(func): + func = func.__call__ + try: + args, varargs, varkw, defaults = \ + inspect.getargspec(func) + args.pop(0) # pop the self off + except TypeError: + raise TypeError("Attribute %s of %r is not a python " + "function. Only functions or callables" + " may be used as fixtures." % + (name, obj)) + if len(args): + log.debug("call fixture %s.%s(%s)", obj, name, obj) + return func(obj) + log.debug("call fixture %s.%s", obj, name) + return func() + + +def src(filename): + """Find the python source file for a .pyc, .pyo or $py.class file on + jython. Returns the filename provided if it is not a python source + file. + """ + if filename is None: + return filename + if sys.platform.startswith('java') and filename.endswith('$py.class'): + return '.'.join((filename[:-9], 'py')) + base, ext = os.path.splitext(filename) + if ext in ('.pyc', '.pyo', '.py'): + return '.'.join((base, 'py')) + return filename + + +def regex_last_key(regex): + """Sort key function factory that puts items that match a + regular expression last. + + >>> from nose.config import Config + >>> from nose.pyversion import sort_list + >>> c = Config() + >>> regex = c.testMatch + >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py'] + >>> sort_list(entries, regex_last_key(regex)) + >>> entries + ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test'] + """ + def k(obj): + if regex.search(obj): + return (1, obj) + return (0, obj) + return k + + +def tolist(val): + """Convert a value that may be a list or a (possibly comma-separated) + string into a list. The exception: None is returned as None, not [None]. + + >>> tolist(["one", "two"]) + ['one', 'two'] + >>> tolist("hello") + ['hello'] + >>> tolist("separate,values, with, commas, spaces , are ,ok") + ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok'] + """ + if val is None: + return None + try: + # might already be a list + val.extend([]) + return val + except AttributeError: + pass + # might be a string + try: + return re.split(r'\s*,\s*', val) + except TypeError: + # who knows... + return list(val) + + +class odict(dict): + """Simple ordered dict implementation, based on: + + http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747 + """ + def __init__(self, *arg, **kw): + self._keys = [] + super(odict, self).__init__(*arg, **kw) + + def __delitem__(self, key): + super(odict, self).__delitem__(key) + self._keys.remove(key) + + def __setitem__(self, key, item): + super(odict, self).__setitem__(key, item) + if key not in self._keys: + self._keys.append(key) + + def __str__(self): + return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()]) + + def clear(self): + super(odict, self).clear() + self._keys = [] + + def copy(self): + d = super(odict, self).copy() + d._keys = self._keys[:] + return d + + def items(self): + return zip(self._keys, self.values()) + + def keys(self): + return self._keys[:] + + def setdefault(self, key, failobj=None): + item = super(odict, self).setdefault(key, failobj) + if key not in self._keys: + self._keys.append(key) + return item + + def update(self, dict): + super(odict, self).update(dict) + for key in dict.keys(): + if key not in self._keys: + self._keys.append(key) + + def values(self): + return map(self.get, self._keys) + + +def transplant_func(func, module): + """ + Make a function imported from module A appear as if it is located + in module B. + + >>> from pprint import pprint + >>> pprint.__module__ + 'pprint' + >>> pp = transplant_func(pprint, __name__) + >>> pp.__module__ + 'nose.util' + + The original function is not modified. + + >>> pprint.__module__ + 'pprint' + + Calling the transplanted function calls the original. + + >>> pp([1, 2]) + [1, 2] + >>> pprint([1,2]) + [1, 2] + + """ + from nose.tools import make_decorator + if isgenerator(func): + def newfunc(*arg, **kw): + for v in func(*arg, **kw): + yield v + else: + def newfunc(*arg, **kw): + return func(*arg, **kw) + + newfunc = make_decorator(func)(newfunc) + newfunc.__module__ = module + return newfunc + + +def transplant_class(cls, module): + """ + Make a class appear to reside in `module`, rather than the module in which + it is actually defined. + + >>> from nose.failure import Failure + >>> Failure.__module__ + 'nose.failure' + >>> Nf = transplant_class(Failure, __name__) + >>> Nf.__module__ + 'nose.util' + >>> Nf.__name__ + 'Failure' + + """ + class C(cls): + pass + C.__module__ = module + C.__name__ = cls.__name__ + return C + + +def safe_str(val, encoding='utf-8'): + try: + return str(val) + except UnicodeEncodeError: + if isinstance(val, Exception): + return ' '.join([safe_str(arg, encoding) + for arg in val]) + return unicode(val).encode(encoding) + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/scripts/external_libs/progressbar-2.2/LICENSE b/scripts/external_libs/progressbar-2.2/LICENSE new file mode 100755 index 00000000..ffce87a8 --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/LICENSE @@ -0,0 +1,16 @@ +progressbar - Text progressbar library for python. +Copyright (C) 2005 Nilton Volpato + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA diff --git a/scripts/external_libs/progressbar-2.2/MANIFEST b/scripts/external_libs/progressbar-2.2/MANIFEST new file mode 100755 index 00000000..6e64f9f1 --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/MANIFEST @@ -0,0 +1,6 @@ +LICENSE +MANIFEST +MANIFEST.in +README +progressbar.py +setup.py diff --git a/scripts/external_libs/progressbar-2.2/MANIFEST.in b/scripts/external_libs/progressbar-2.2/MANIFEST.in new file mode 100755 index 00000000..31ed35dd --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/MANIFEST.in @@ -0,0 +1,3 @@ +include README MANIFEST MANIFEST.in LICENSE +include setup.py +include progressbar.py diff --git a/scripts/external_libs/progressbar-2.2/PKG-INFO b/scripts/external_libs/progressbar-2.2/PKG-INFO new file mode 100755 index 00000000..c8d81b37 --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/PKG-INFO @@ -0,0 +1,38 @@ +Metadata-Version: 1.0 +Name: progressbar +Version: 2.2 +Summary: Text progressbar library for python. +Home-page: http://qubit.ic.unicamp.br/~nilton +Author: Nilton Volpato +Author-email: first-name dot last-name @ gmail.com +License: UNKNOWN +Description: Text progressbar library for python. + + This library provides a text mode progressbar. This is tipically used + to display the progress of a long running operation, providing a + visual clue that processing is underway. + + The ProgressBar class manages the progress, and the format of the line + is given by a number of widgets. A widget is an object that may + display diferently depending on the state of the progress. There are + three types of widget: + - a string, which always shows itself; + - a ProgressBarWidget, which may return a diferent value every time + it's update method is called; and + - a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it + expands to fill the remaining width of the line. + + The progressbar module is very easy to use, yet very powerful. And + automatically supports features like auto-resizing when available. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: User Interfaces +Classifier: Topic :: Terminals diff --git a/scripts/external_libs/progressbar-2.2/README b/scripts/external_libs/progressbar-2.2/README new file mode 100755 index 00000000..a5e861bf --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/README @@ -0,0 +1,18 @@ +Text progressbar library for python. + +This library provides a text mode progressbar. This is tipically used +to display the progress of a long running operation, providing a +visual clue that processing is underway. + +The ProgressBar class manages the progress, and the format of the line +is given by a number of widgets. A widget is an object that may +display diferently depending on the state of the progress. There are +three types of widget: +- a string, which always shows itself; +- a ProgressBarWidget, which may return a diferent value every time +it's update method is called; and +- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it +expands to fill the remaining width of the line. + +The progressbar module is very easy to use, yet very powerful. And +automatically supports features like auto-resizing when available. diff --git a/scripts/external_libs/progressbar-2.2/progressbar.py b/scripts/external_libs/progressbar-2.2/progressbar.py new file mode 100755 index 00000000..07981a34 --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/progressbar.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# -*- coding: iso-8859-1 -*- +# +# progressbar - Text progressbar library for python. +# Copyright (c) 2005 Nilton Volpato +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +"""Text progressbar library for python. + +This library provides a text mode progressbar. This is tipically used +to display the progress of a long running operation, providing a +visual clue that processing is underway. + +The ProgressBar class manages the progress, and the format of the line +is given by a number of widgets. A widget is an object that may +display diferently depending on the state of the progress. There are +three types of widget: +- a string, which always shows itself; +- a ProgressBarWidget, which may return a diferent value every time +it's update method is called; and +- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it +expands to fill the remaining width of the line. + +The progressbar module is very easy to use, yet very powerful. And +automatically supports features like auto-resizing when available. +""" + +__author__ = "Nilton Volpato" +__author_email__ = "first-name dot last-name @ gmail.com" +__date__ = "2006-05-07" +__version__ = "2.2" + +# Changelog +# +# 2006-05-07: v2.2 fixed bug in windows +# 2005-12-04: v2.1 autodetect terminal width, added start method +# 2005-12-04: v2.0 everything is now a widget (wow!) +# 2005-12-03: v1.0 rewrite using widgets +# 2005-06-02: v0.5 rewrite +# 2004-??-??: v0.1 first version + + +import sys, time +from array import array +try: + from fcntl import ioctl + import termios +except ImportError: + pass +import signal + +class ProgressBarWidget(object): + """This is an element of ProgressBar formatting. + + The ProgressBar object will call it's update value when an update + is needed. It's size may change between call, but the results will + not be good if the size changes drastically and repeatedly. + """ + def update(self, pbar): + """Returns the string representing the widget. + + The parameter pbar is a reference to the calling ProgressBar, + where one can access attributes of the class for knowing how + the update must be made. + + At least this function must be overriden.""" + pass + +class ProgressBarWidgetHFill(object): + """This is a variable width element of ProgressBar formatting. + + The ProgressBar object will call it's update value, informing the + width this object must the made. This is like TeX \\hfill, it will + expand to fill the line. You can use more than one in the same + line, and they will all have the same width, and together will + fill the line. + """ + def update(self, pbar, width): + """Returns the string representing the widget. + + The parameter pbar is a reference to the calling ProgressBar, + where one can access attributes of the class for knowing how + the update must be made. The parameter width is the total + horizontal width the widget must have. + + At least this function must be overriden.""" + pass + + +class ETA(ProgressBarWidget): + "Widget for the Estimated Time of Arrival" + def format_time(self, seconds): + return time.strftime('%H:%M:%S', time.gmtime(seconds)) + def update(self, pbar): + if pbar.currval == 0: + return 'ETA: --:--:--' + elif pbar.finished: + return 'Time: %s' % self.format_time(pbar.seconds_elapsed) + else: + elapsed = pbar.seconds_elapsed + eta = elapsed * pbar.maxval / pbar.currval - elapsed + return 'ETA: %s' % self.format_time(eta) + +class FileTransferSpeed(ProgressBarWidget): + "Widget for showing the transfer speed (useful for file transfers)." + def __init__(self): + self.fmt = '%6.2f %s' + self.units = ['B','K','M','G','T','P'] + def update(self, pbar): + if pbar.seconds_elapsed < 2e-6:#== 0: + bps = 0.0 + else: + bps = float(pbar.currval) / pbar.seconds_elapsed + spd = bps + for u in self.units: + if spd < 1000: + break + spd /= 1000 + return self.fmt % (spd, u+'/s') + +class RotatingMarker(ProgressBarWidget): + "A rotating marker for filling the bar of progress." + def __init__(self, markers='|/-\\'): + self.markers = markers + self.curmark = -1 + def update(self, pbar): + if pbar.finished: + return self.markers[0] + self.curmark = (self.curmark + 1)%len(self.markers) + return self.markers[self.curmark] + +class Percentage(ProgressBarWidget): + "Just the percentage done." + def update(self, pbar): + return '%3d%%' % pbar.percentage() + +class Bar(ProgressBarWidgetHFill): + "The bar of progress. It will strech to fill the line." + def __init__(self, marker='#', left='|', right='|'): + self.marker = marker + self.left = left + self.right = right + def _format_marker(self, pbar): + if isinstance(self.marker, (str, unicode)): + return self.marker + else: + return self.marker.update(pbar) + def update(self, pbar, width): + percent = pbar.percentage() + cwidth = width - len(self.left) - len(self.right) + marked_width = int(percent * cwidth / 100) + m = self._format_marker(pbar) + bar = (self.left + (m*marked_width).ljust(cwidth) + self.right) + return bar + +class ReverseBar(Bar): + "The reverse bar of progress, or bar of regress. :)" + def update(self, pbar, width): + percent = pbar.percentage() + cwidth = width - len(self.left) - len(self.right) + marked_width = int(percent * cwidth / 100) + m = self._format_marker(pbar) + bar = (self.left + (m*marked_width).rjust(cwidth) + self.right) + return bar + +default_widgets = [Percentage(), ' ', Bar()] +class ProgressBar(object): + """This is the ProgressBar class, it updates and prints the bar. + + The term_width parameter may be an integer. Or None, in which case + it will try to guess it, if it fails it will default to 80 columns. + + The simple use is like this: + >>> pbar = ProgressBar().start() + >>> for i in xrange(100): + ... # do something + ... pbar.update(i+1) + ... + >>> pbar.finish() + + But anything you want to do is possible (well, almost anything). + You can supply different widgets of any type in any order. And you + can even write your own widgets! There are many widgets already + shipped and you should experiment with them. + + When implementing a widget update method you may access any + attribute or function of the ProgressBar object calling the + widget's update method. The most important attributes you would + like to access are: + - currval: current value of the progress, 0 <= currval <= maxval + - maxval: maximum (and final) value of the progress + - finished: True if the bar is have finished (reached 100%), False o/w + - start_time: first time update() method of ProgressBar was called + - seconds_elapsed: seconds elapsed since start_time + - percentage(): percentage of the progress (this is a method) + """ + def __init__(self, maxval=100, widgets=default_widgets, term_width=None, + fd=sys.stderr): + assert maxval > 0 + self.maxval = maxval + self.widgets = widgets + self.fd = fd + self.signal_set = False + if term_width is None: + try: + self.handle_resize(None,None) + signal.signal(signal.SIGWINCH, self.handle_resize) + self.signal_set = True + except: + self.term_width = 79 + else: + self.term_width = term_width + + self.currval = 0 + self.finished = False + self.prev_percentage = -1 + self.start_time = None + self.seconds_elapsed = 0 + + def handle_resize(self, signum, frame): + h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2] + self.term_width = w + + def percentage(self): + "Returns the percentage of the progress." + return self.currval*100.0 / self.maxval + + def _format_widgets(self): + r = [] + hfill_inds = [] + num_hfill = 0 + currwidth = 0 + for i, w in enumerate(self.widgets): + if isinstance(w, ProgressBarWidgetHFill): + r.append(w) + hfill_inds.append(i) + num_hfill += 1 + elif isinstance(w, (str, unicode)): + r.append(w) + currwidth += len(w) + else: + weval = w.update(self) + currwidth += len(weval) + r.append(weval) + for iw in hfill_inds: + r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill) + return r + + def _format_line(self): + return ''.join(self._format_widgets()).ljust(self.term_width) + + def _need_update(self): + return int(self.percentage()) != int(self.prev_percentage) + + def update(self, value): + "Updates the progress bar to a new value." + assert 0 <= value <= self.maxval + self.currval = value + if not self._need_update() or self.finished: + return + if not self.start_time: + self.start_time = time.time() + self.seconds_elapsed = time.time() - self.start_time + self.prev_percentage = self.percentage() + if value != self.maxval: + self.fd.write(self._format_line() + '\r') + else: + self.finished = True + self.fd.write(self._format_line() + '\n') + + def start(self): + """Start measuring time, and prints the bar at 0%. + + It returns self so you can use it like this: + >>> pbar = ProgressBar().start() + >>> for i in xrange(100): + ... # do something + ... pbar.update(i+1) + ... + >>> pbar.finish() + """ + self.update(0) + return self + + def finish(self): + """Used to tell the progress is finished.""" + self.update(self.maxval) + if self.signal_set: + signal.signal(signal.SIGWINCH, signal.SIG_DFL) + + + + + + +if __name__=='__main__': + import os + + def example1(): + widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), + ' ', ETA(), ' ', FileTransferSpeed()] + pbar = ProgressBar(widgets=widgets, maxval=10000000).start() + for i in range(1000000): + # do something + pbar.update(10*i+1) + pbar.finish() + print + + def example2(): + class CrazyFileTransferSpeed(FileTransferSpeed): + "It's bigger between 45 and 80 percent" + def update(self, pbar): + if 45 < pbar.percentage() < 80: + return 'Bigger Now ' + FileTransferSpeed.update(self,pbar) + else: + return FileTransferSpeed.update(self,pbar) + + widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()] + pbar = ProgressBar(widgets=widgets, maxval=10000000) + # maybe do something + pbar.start() + for i in range(2000000): + # do something + pbar.update(5*i+1) + pbar.finish() + print + + def example3(): + widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')] + pbar = ProgressBar(widgets=widgets, maxval=10000000).start() + for i in range(1000000): + # do something + pbar.update(10*i+1) + pbar.finish() + print + + def example4(): + widgets = ['Test: ', Percentage(), ' ', + Bar(marker='0',left='[',right=']'), + ' ', ETA(), ' ', FileTransferSpeed()] + pbar = ProgressBar(widgets=widgets, maxval=500) + pbar.start() + for i in range(100,500+1,50): + time.sleep(0.2) + pbar.update(i) + pbar.finish() + print + + + example1() + example2() + example3() + example4() + + # def timedProgressBar(time_in_secs): + # widgets = ['Running T-Rex: ', Percentage(), ' ', + # Bar(marker='>',left='[',right=']'), + # ' ', ETA()] + # pbar = ProgressBar(widgets=widgets, maxval=time_in_secs*2) + # pbar.start() + # for i in range(0, time_in_secs*2 + 1): + # time.sleep(0.5) + # pbar.update(i) + # pbar.finish() + # print + + # timedProgressBar(20) diff --git a/scripts/external_libs/progressbar-2.2/setup.py b/scripts/external_libs/progressbar-2.2/setup.py new file mode 100755 index 00000000..97e29e15 --- /dev/null +++ b/scripts/external_libs/progressbar-2.2/setup.py @@ -0,0 +1,31 @@ +#!/usr/bin/python + +import os +from distutils.core import setup +import progressbar + +if os.stat('progressbar.py').st_mtime > os.stat('README').st_mtime: + file('README','w').write(progressbar.__doc__) + +setup( + name = 'progressbar', + version = progressbar.__version__, + description = progressbar.__doc__.splitlines()[0], + long_description = progressbar.__doc__, + maintainer = progressbar.__author__, + maintainer_email = progressbar.__author_email__, + url = 'http://qubit.ic.unicamp.br/~nilton', + py_modules = ['progressbar'], + classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: User Interfaces', + 'Topic :: Terminals', + ], +) diff --git a/scripts/external_libs/rednose-0.4.1/rednose.py b/scripts/external_libs/rednose-0.4.1/rednose.py new file mode 100755 index 00000000..1ff892ad --- /dev/null +++ b/scripts/external_libs/rednose-0.4.1/rednose.py @@ -0,0 +1,387 @@ +# Copyright (c) 2009, Tim Cuthbertson # All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the organisation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import print_function +import os +import sys +import linecache +import re +import time + +import nose + +import termstyle + +failure = 'FAILED' +error = 'ERROR' +success = 'passed' +skip = 'skipped' +line_length = 77 + +PY3 = sys.version_info[0] >= 3 +if PY3: + to_unicode = str +else: + def to_unicode(s): + try: + return unicode(s) + except UnicodeDecodeError: + return unicode(repr(str(s))) + +BLACKLISTED_WRITERS = [ + 'nose[\\/]result\\.pyc?$', + 'unittest[\\/]runner\\.pyc?$' +] +REDNOSE_DEBUG = False + + +class RedNose(nose.plugins.Plugin): + env_opt = 'NOSE_REDNOSE' + env_opt_color = 'NOSE_REDNOSE_COLOR' + score = 199 # just under the `coverage` module + + def __init__(self, *args): + super(RedNose, self).__init__(*args) + self.reports = [] + self.error = self.success = self.failure = self.skip = 0 + self.total = 0 + self.stream = None + self.verbose = False + self.enabled = False + self.tree = False + + def options(self, parser, env=os.environ): + global REDNOSE_DEBUG + rednose_on = bool(env.get(self.env_opt, False)) + rednose_color = env.get(self.env_opt_color, 'auto') + REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False)) + + parser.add_option( + "--rednose", + action="store_true", + default=rednose_on, + dest="rednose", + help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,) + ) + parser.add_option( + "--no-color", + action="store_false", + dest="rednose", + help="disable colour output" + ) + parser.add_option( + "--force-color", + action="store_const", + dest='rednose_color', + default=rednose_color, + const='force', + help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,) + ) + parser.add_option( + "--immediate", + action="store_true", + default=False, + help="print errors and failures as they happen, as well as at the end" + ) + + def configure(self, options, conf): + if options.rednose: + self.enabled = True + termstyle_init = { + 'force': termstyle.enable, + 'off': termstyle.disable + }.get(options.rednose_color, termstyle.auto) + termstyle_init() + + self.immediate = options.immediate + self.verbose = options.verbosity >= 2 + + def begin(self): + self.start_time = time.time() + self._in_test = False + + def _format_test_name(self, test): + return test.shortDescription() or to_unicode(test) + + def prepareTestResult(self, result): + result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS) + + def beforeTest(self, test): + self._in_test = True + if self.verbose: + self._out(self._format_test_name(test) + ' ... ') + + def afterTest(self, test): + if self._in_test: + self.addSkip() + + def _print_test(self, type_, color): + self.total += 1 + if self.verbose: + self._outln(color(type_)) + else: + if type_ == failure: + short_ = 'F' + elif type_ == error: + short_ = 'X' + elif type_ == skip: + short_ = '-' + else: + short_ = '.' + self._out(color(short_)) + if self.total % line_length == 0: + self._outln() + self._in_test = False + + def _add_report(self, report): + failure_type, test, err = report + self.reports.append(report) + if self.immediate: + self._outln() + self._report_test(len(self.reports), *report) + + def addFailure(self, test, err): + self.failure += 1 + self._add_report((failure, test, err)) + self._print_test(failure, termstyle.red) + + def addError(self, test, err): + if err[0].__name__ == 'SkipTest': + self.addSkip(test, err) + return + self.error += 1 + self._add_report((error, test, err)) + self._print_test(error, termstyle.yellow) + + def addSuccess(self, test): + self.success += 1 + self._print_test(success, termstyle.green) + + def addSkip(self, test=None, err=None): + self.skip += 1 + self._print_test(skip, termstyle.blue) + + def setOutputStream(self, stream): + self.stream = stream + + def report(self, stream): + """report on all registered failures and errors""" + self._outln() + if self.immediate: + for x in range(0, 5): + self._outln() + report_num = 0 + if len(self.reports) > 0: + for report_num, report in enumerate(self.reports): + self._report_test(report_num + 1, *report) + self._outln() + + self._summarize() + + def _summarize(self): + """summarize all tests - the number of failures, errors and successes""" + self._line(termstyle.black) + self._out("%s test%s run in %0.1f seconds" % ( + self.total, + self._plural(self.total), + time.time() - self.start_time)) + if self.total > self.success: + self._outln(". ") + additionals = [] + if self.failure > 0: + additionals.append(termstyle.red("%s FAILED" % ( + self.failure,))) + if self.error > 0: + additionals.append(termstyle.yellow("%s error%s" % ( + self.error, + self._plural(self.error) ))) + if self.skip > 0: + additionals.append(termstyle.blue("%s skipped" % ( + self.skip))) + self._out(', '.join(additionals)) + + self._out(termstyle.green(" (%s test%s passed)" % ( + self.success, + self._plural(self.success) ))) + self._outln() + + def _report_test(self, report_num, type_, test, err): + """report the results of a single (failing or errored) test""" + self._line(termstyle.black) + self._out("%s) " % (report_num)) + if type_ == failure: + color = termstyle.red + self._outln(color('FAIL: %s' % (self._format_test_name(test),))) + else: + color = termstyle.yellow + self._outln(color('ERROR: %s' % (self._format_test_name(test),))) + + exc_type, exc_instance, exc_trace = err + + self._outln() + self._outln(self._fmt_traceback(exc_trace)) + self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": ")) + self._outln(self._fmt_message(exc_instance, color)) + self._outln() + + def _relative_path(self, path): + """ + If path is a child of the current working directory, the relative + path is returned surrounded by bold xterm escape sequences. + If path is not a child of the working directory, path is returned + """ + try: + here = os.path.abspath(os.path.realpath(os.getcwd())) + fullpath = os.path.abspath(os.path.realpath(path)) + except OSError: + return path + if fullpath.startswith(here): + return termstyle.bold(fullpath[len(here)+1:]) + return path + + def _file_line(self, tb): + """formats the file / lineno / function line of a traceback element""" + prefix = "file://" + prefix = "" + + f = tb.tb_frame + if '__unittest' in f.f_globals: + # this is the magical flag that prevents unittest internal + # code from junking up the stacktrace + return None + + filename = f.f_code.co_filename + lineno = tb.tb_lineno + linecache.checkcache(filename) + function_name = f.f_code.co_name + + line_contents = linecache.getline(filename, lineno, f.f_globals).strip() + + return " %s line %s in %s\n %s" % ( + termstyle.blue(prefix, self._relative_path(filename)), + lineno, + termstyle.cyan(function_name), + line_contents) + + def _fmt_traceback(self, trace): + """format a traceback""" + ret = [] + ret.append(termstyle.default(" Traceback (most recent call last):")) + current_trace = trace + while current_trace is not None: + line = self._file_line(current_trace) + if line is not None: + ret.append(line) + current_trace = current_trace.tb_next + return '\n'.join(ret) + + def _fmt_message(self, exception, color): + orig_message_lines = to_unicode(exception).splitlines() + + if len(orig_message_lines) == 0: + return '' + message_lines = [color(orig_message_lines[0])] + for line in orig_message_lines[1:]: + match = re.match('^---.* begin captured stdout.*----$', line) + if match: + color = None + message_lines.append('') + line = ' ' + line + message_lines.append(color(line) if color is not None else line) + return '\n'.join(message_lines) + + def _out(self, msg='', newline=False): + self.stream.write(msg) + if newline: + self.stream.write('\n') + + def _outln(self, msg=''): + self._out(msg, True) + + def _plural(self, num): + return '' if num == 1 else 's' + + def _line(self, color=termstyle.reset, char='-'): + """ + print a line of separator characters (default '-') + in the given colour (default black) + """ + self._outln(color(char * line_length)) + + +import traceback +import sys + + +class FilteringStream(object): + """ + A wrapper for a stream that will filter + calls to `write` and `writeln` to ignore calls + from blacklisted callers + (implemented as a regex on their filename, according + to traceback.extract_stack()) + + It's super hacky, but there seems to be no other way + to suppress nose's default output + """ + def __init__(self, stream, excludes): + self.__stream = stream + self.__excludes = list(map(re.compile, excludes)) + + def __should_filter(self): + try: + stack = traceback.extract_stack(limit=3)[0] + filename = stack[0] + pattern_matches_filename = lambda pattern: pattern.search(filename) + should_filter = any(map(pattern_matches_filename, self.__excludes)) + if REDNOSE_DEBUG: + print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % ( + filename, should_filter) + return should_filter + except StandardError as e: + if REDNOSE_DEBUG: + print("\nError in rednose filtering: %s" % (e,), file=sys.stderr) + traceback.print_exc(sys.stderr) + return False + + def write(self, *a): + if self.__should_filter(): + return + return self.__stream.write(*a) + + def writeln(self, *a): + if self.__should_filter(): + return + return self.__stream.writeln(*a) + + # pass non-known methods through to self.__stream + def __getattr__(self, name): + if REDNOSE_DEBUG: + print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr) + return getattr(self.__stream, name) diff --git a/scripts/external_libs/rednose-0.4.1/setup.py b/scripts/external_libs/rednose-0.4.1/setup.py new file mode 100755 index 00000000..34cded4b --- /dev/null +++ b/scripts/external_libs/rednose-0.4.1/setup.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +## NOTE: ## +## this setup.py was generated by zero2pypi: +## http://gfxmonk.net/dist/0install/zero2pypi.xml + +from setuptools import * +setup( + packages = find_packages(exclude=['test', 'test.*']), + description='coloured output for nosetests', + entry_points={'nose.plugins.0.10': ['NOSETESTS_PLUGINS = rednose:RedNose']}, + install_requires=['setuptools', 'python-termstyle >=0.1.7'], + long_description="\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/rednose.xml\n\n----------------\n\n=========\nrednose\n=========\n\nrednose is a `nosetests`_\nplugin for adding colour (and readability) to nosetest console results.\n\nInstallation:\n-------------\n::\n\n\teasy_install rednose\n\t\nor from the source::\n\n\t./setup.py develop\n\nUsage:\n------\n::\n\n\tnosetests --rednose\n\nor::\n\n\texport NOSE_REDNOSE=1\n\tnosetests\n\nRednose by default uses auto-colouring, which will only use\ncolour if you're running it on a terminal (i.e not piping it\nto a file). To control colouring, use one of::\n\n\tnosetests --rednose --force-color\n\tnosetests --no-color\n\n(you can also control this by setting the environment variable NOSE_REDNOSE_COLOR to 'force' or 'no')\n\n.. _nosetests: http://somethingaboutorange.com/mrl/projects/nose/\n", + name='rednose', + py_modules=['rednose'], + url='http://gfxmonk.net/dist/0install/rednose.xml', + version='0.4.1', +classifiers=[ + "License :: OSI Approved :: BSD License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Software Development :: Testing", + ], + keywords='test nosetests nose nosetest output colour console', + license='BSD', +) diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py b/scripts/external_libs/yaml-3.11/yaml/__init__.py index c30973a3..76e19e13 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py +++ b/scripts/external_libs/yaml-3.11/yaml/__init__.py @@ -8,21 +8,35 @@ from nodes import * from loader import * from dumper import * +__version__ = '3.11' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + def scan(stream, Loader=Loader): """ Scan a YAML stream and produce scanning tokens. """ loader = Loader(stream) - while loader.check_token(): - yield loader.get_token() + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() def parse(stream, Loader=Loader): """ Parse a YAML stream and produce parsing events. """ loader = Loader(stream) - while loader.check_event(): - yield loader.get_event() + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() def compose(stream, Loader=Loader): """ @@ -30,26 +44,22 @@ def compose(stream, Loader=Loader): and produce the corresponding representation tree. """ loader = Loader(stream) - if loader.check_node(): - return loader.get_node() + try: + return loader.get_single_node() + finally: + loader.dispose() def compose_all(stream, Loader=Loader): """ Parse all YAML documents in a stream - and produce corresponsing representation trees. - """ - loader = Loader(stream) - while loader.check_node(): - yield loader.get_node() - -def load_all(stream, Loader=Loader): - """ - Parse all YAML documents in a stream - and produce corresponding Python objects. + and produce corresponding representation trees. """ loader = Loader(stream) - while loader.check_data(): - yield loader.get_data() + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() def load(stream, Loader=Loader): """ @@ -57,16 +67,22 @@ def load(stream, Loader=Loader): and produce the corresponding Python object. """ loader = Loader(stream) - if loader.check_data(): - return loader.get_data() + try: + return loader.get_single_data() + finally: + loader.dispose() -def safe_load_all(stream): +def load_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. - Resolve only basic YAML tags. """ - return load_all(stream, SafeLoader) + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() def safe_load(stream): """ @@ -76,6 +92,14 @@ def safe_load(stream): """ return load(stream, SafeLoader) +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + def emit(events, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): @@ -85,16 +109,16 @@ def emit(events, stream=None, Dumper=Dumper, """ getvalue = None if stream is None: - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO + from StringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) - for event in events: - dumper.emit(event) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() if getvalue: return getvalue() @@ -109,20 +133,23 @@ def serialize_all(nodes, stream=None, Dumper=Dumper, """ getvalue = None if stream is None: - try: - from cStringIO import StringIO - except ImportError: + if encoding is None: from StringIO import StringIO + else: + from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) - dumper.open() - for node in nodes: - dumper.serialize(node) - dumper.close() + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() if getvalue: return getvalue() @@ -145,10 +172,10 @@ def dump_all(documents, stream=None, Dumper=Dumper, """ getvalue = None if stream is None: - try: - from cStringIO import StringIO - except ImportError: + if encoding is None: from StringIO import StringIO + else: + from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, @@ -157,10 +184,13 @@ def dump_all(documents, stream=None, Dumper=Dumper, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) - dumper.open() - for data in documents: - dumper.represent(data) - dumper.close() + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() if getvalue: return getvalue() @@ -260,6 +290,7 @@ class YAMLObject(object): """ __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses yaml_loader = Loader yaml_dumper = Dumper diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py b/scripts/external_libs/yaml-3.11/yaml/composer.py index d256b054..06e5ac78 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py +++ b/scripts/external_libs/yaml-3.11/yaml/composer.py @@ -8,12 +8,16 @@ from nodes import * class ComposerError(MarkedYAMLError): pass -class Composer: +class Composer(object): def __init__(self): self.anchors = {} def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + # If there are more documents available? return not self.check_event(StreamEndEvent) @@ -22,17 +26,28 @@ class Composer: if not self.check_event(StreamEndEvent): return self.compose_document() - def __iter__(self): - # Iterator protocol. - while not self.check_event(StreamEndEvent): - yield self.compose_document() + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() - def compose_document(self): + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() @@ -42,7 +57,7 @@ class Composer: # Drop the DOCUMENT-END event. self.get_event() - self.complete_anchors = {} + self.anchors = {} return node def compose_node(self, parent, index): @@ -104,19 +119,20 @@ class Composer: tag = start_event.tag if tag is None or tag == u'!': tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, {}, + node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): - key_event = self.peek_event() + #key_event = self.peek_event() item_key = self.compose_node(node, None) - if item_key in node.value: - raise ComposerError("while composing a mapping", start_event.start_mark, - "found duplicate key", key_event.start_mark) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) - node.value[item_key] = item_value + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py b/scripts/external_libs/yaml-3.11/yaml/constructor.py index 57ad53d1..635faac3 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py +++ b/scripts/external_libs/yaml-3.11/yaml/constructor.py @@ -4,25 +4,15 @@ __all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', from error import * from nodes import * -from composer import * -try: - import datetime - datetime_available = True -except ImportError: - datetime_available = False +import datetime -try: - set -except NameError: - from sets import Set as set - -import binascii, re, sys +import binascii, re, sys, types class ConstructorError(MarkedYAMLError): pass -class BaseConstructor(Composer): +class BaseConstructor(object): yaml_constructors = {} yaml_multi_constructors = {} @@ -30,6 +20,8 @@ class BaseConstructor(Composer): def __init__(self): self.constructed_objects = {} self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False def check_data(self): # If there are more documents available? @@ -40,135 +32,117 @@ class BaseConstructor(Composer): if self.check_node(): return self.construct_document(self.get_node()) - def __iter__(self): - # Iterator protocol. - while self.check_node(): - yield self.construct_document(self.get_node()) + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None def construct_document(self, node): data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass self.constructed_objects = {} self.recursive_objects = {} + self.deep_construct = False return data - def construct_object(self, node): + def construct_object(self, node, deep=False): if node in self.constructed_objects: return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True if node in self.recursive_objects: raise ConstructorError(None, None, - "found recursive node", node.start_mark) + "found unconstructable recursive node", node.start_mark) self.recursive_objects[node] = None constructor = None + tag_suffix = None if node.tag in self.yaml_constructors: - constructor = lambda node: self.yaml_constructors[node.tag](self, node) + constructor = self.yaml_constructors[node.tag] else: for tag_prefix in self.yaml_multi_constructors: if node.tag.startswith(tag_prefix): tag_suffix = node.tag[len(tag_prefix):] - constructor = lambda node: \ - self.yaml_multi_constructors[tag_prefix](self, tag_suffix, node) + constructor = self.yaml_multi_constructors[tag_prefix] break else: if None in self.yaml_multi_constructors: - constructor = lambda node: \ - self.yaml_multi_constructors[None](self, node.tag, node) + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] elif None in self.yaml_constructors: - constructor = lambda node: \ - self.yaml_constructors[None](self, node) + constructor = self.yaml_constructors[None] elif isinstance(node, ScalarNode): - constructor = self.construct_scalar + constructor = self.__class__.construct_scalar elif isinstance(node, SequenceNode): - constructor = self.construct_sequence + constructor = self.__class__.construct_sequence elif isinstance(node, MappingNode): - constructor = self.construct_mapping - else: - print node.tag - data = constructor(node) + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) self.constructed_objects[node] = data del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep return data def construct_scalar(self, node): if not isinstance(node, ScalarNode): - if isinstance(node, MappingNode): - for key_node in node.value: - if key_node.tag == u'tag:yaml.org,2002:value': - return self.construct_scalar(node.value[key_node]) raise ConstructorError(None, None, "expected a scalar node, but found %s" % node.id, node.start_mark) return node.value - def construct_sequence(self, node): + def construct_sequence(self, node, deep=False): if not isinstance(node, SequenceNode): raise ConstructorError(None, None, "expected a sequence node, but found %s" % node.id, node.start_mark) - return [self.construct_object(child) for child in node.value] + return [self.construct_object(child, deep=deep) + for child in node.value] - def construct_mapping(self, node): + def construct_mapping(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = {} - merge = None - for key_node in node.value: - if key_node.tag == u'tag:yaml.org,2002:merge': - if merge is not None: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found duplicate merge key", key_node.start_mark) - value_node = node.value[key_node] - if isinstance(value_node, MappingNode): - merge = [self.construct_mapping(value_node)] - elif isinstance(value_node, SequenceNode): - merge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - merge.append(self.construct_mapping(subnode)) - merge.reverse() - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == u'tag:yaml.org,2002:value': - if '=' in mapping: - raise ConstructorError("while construction a mapping", node.start_mark, - "found duplicate value key", key_node.start_mark) - value = self.construct_object(node.value[key_node]) - mapping['='] = value - else: - key = self.construct_object(key_node) - try: - duplicate_key = key in mapping - except TypeError, exc: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found unacceptable key (%s)" % exc, key_node.start_mark) - if duplicate_key: - raise ConstructorError("while constructing a mapping", node.start_mark, - "found duplicate key", key_node.start_mark) - value = self.construct_object(node.value[key_node]) - mapping[key] = value - if merge is not None: - merge.append(mapping) - mapping = {} - for submapping in merge: - mapping.update(submapping) + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value return mapping - def construct_pairs(self, node): + def construct_pairs(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) pairs = [] - for key_node in node.value: - key = self.construct_object(key_node) - value = self.construct_object(node.value[key_node]) + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) pairs.append((key, value)) return pairs @@ -186,6 +160,53 @@ class BaseConstructor(Composer): class SafeConstructor(BaseConstructor): + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + def construct_yaml_null(self, node): self.construct_scalar(node) return None @@ -231,20 +252,22 @@ class SafeConstructor(BaseConstructor): else: return sign*int(value) - inf_value = 1e300000 - nan_value = inf_value/inf_value + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). def construct_yaml_float(self, node): value = str(self.construct_scalar(node)) - value = value.replace('_', '') + value = value.replace('_', '').lower() sign = +1 if value[0] == '-': sign = -1 if value[0] in '+-': value = value[1:] - if value.lower() == '.inf': + if value == '.inf': return sign*self.inf_value - elif value.lower() == '.nan': + elif value == '.nan': return self.nan_value elif ':' in value: digits = [float(part) for part in value.split(':')] @@ -256,7 +279,7 @@ class SafeConstructor(BaseConstructor): base *= 60 return sign*value else: - return float(value) + return sign*float(value) def construct_yaml_binary(self, node): value = self.construct_scalar(node) @@ -275,35 +298,47 @@ class SafeConstructor(BaseConstructor): :(?P<minute>[0-9][0-9]) :(?P<second>[0-9][0-9]) (?:\.(?P<fraction>[0-9]*))? - (?:[ \t]*(?:Z|(?P<tz_hour>[-+][0-9][0-9]?) - (?::(?P<tz_minute>[0-9][0-9])?)?))?)?$''', re.X) + (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) + (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) def construct_yaml_timestamp(self, node): value = self.construct_scalar(node) match = self.timestamp_regexp.match(node.value) values = match.groupdict() - for key in values: - if values[key]: - values[key] = int(values[key]) - else: - values[key] = 0 - fraction = values['fraction'] - if fraction: - while 10*fraction < 1000000: - fraction *= 10 - values['fraction'] = fraction - stamp = datetime.datetime(values['year'], values['month'], values['day'], - values['hour'], values['minute'], values['second'], values['fraction']) - diff = datetime.timedelta(hours=values['tz_hour'], minutes=values['tz_minute']) - return stamp-diff + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data def construct_yaml_omap(self, node): # Note: we do not check for duplicate keys, because it's too # CPU-expensive. + omap = [] + yield omap if not isinstance(node, SequenceNode): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) - omap = [] for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing an ordered map", node.start_mark, @@ -313,18 +348,18 @@ class SafeConstructor(BaseConstructor): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) - key_node = subnode.value.keys()[0] + key_node, value_node = subnode.value[0] key = self.construct_object(key_node) - value = self.construct_object(subnode.value[key_node]) + value = self.construct_object(value_node) omap.append((key, value)) - return omap def construct_yaml_pairs(self, node): # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs if not isinstance(node, SequenceNode): raise ConstructorError("while constructing pairs", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) - pairs = [] for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing pairs", node.start_mark, @@ -334,37 +369,44 @@ class SafeConstructor(BaseConstructor): raise ConstructorError("while constructing pairs", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) - key_node = subnode.value.keys()[0] + key_node, value_node = subnode.value[0] key = self.construct_object(key_node) - value = self.construct_object(subnode.value[key_node]) + value = self.construct_object(value_node) pairs.append((key, value)) - return pairs def construct_yaml_set(self, node): + data = set() + yield data value = self.construct_mapping(node) - return set(value) + data.update(value) def construct_yaml_str(self, node): value = self.construct_scalar(node) try: - return str(value) + return value.encode('ascii') except UnicodeEncodeError: return value def construct_yaml_seq(self, node): - return self.construct_sequence(node) + data = [] + yield data + data.extend(self.construct_sequence(node)) def construct_yaml_map(self, node): - return self.construct_mapping(node) + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) def construct_yaml_object(self, node, cls): - state = self.construct_mapping(node) data = cls.__new__(cls) + yield data if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) data.__setstate__(state) else: + state = self.construct_mapping(node) data.__dict__.update(state) - return data def construct_undefined(self, node): raise ConstructorError(None, None, @@ -391,10 +433,9 @@ SafeConstructor.add_constructor( u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary) -if datetime_available: - SafeConstructor.add_constructor( - u'tag:yaml.org,2002:timestamp', - SafeConstructor.construct_yaml_timestamp) +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) SafeConstructor.add_constructor( u'tag:yaml.org,2002:omap', @@ -438,7 +479,7 @@ class Constructor(SafeConstructor): return complex(self.construct_scalar(node)) def construct_python_tuple(self, node): - return tuple(self.construct_yaml_seq(node)) + return tuple(self.construct_sequence(node)) def find_python_module(self, name, mark): if not name: @@ -456,11 +497,7 @@ class Constructor(SafeConstructor): raise ConstructorError("while constructing a Python object", mark, "expected non-empty name appended to the tag", mark) if u'.' in name: - # Python 2.4 only - #module_name, object_name = name.rsplit('.', 1) - items = name.split('.') - object_name = items.pop() - module_name = '.'.join(items) + module_name, object_name = name.rsplit('.', 1) else: module_name = '__builtin__' object_name = name @@ -529,9 +566,10 @@ class Constructor(SafeConstructor): # Format: # !!python/object:module.name { ... state ... } instance = self.make_python_instance(suffix, node, newobj=True) - state = self.construct_mapping(node) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) self.set_python_instance_state(instance, state) - return instance def construct_python_object_apply(self, suffix, node, newobj=False): # Format: @@ -546,13 +584,13 @@ class Constructor(SafeConstructor): # The difference between !!python/object/apply and !!python/object/new # is how an object is created, check make_python_instance for details. if isinstance(node, SequenceNode): - args = self.construct_sequence(node) + args = self.construct_sequence(node, deep=True) kwds = {} state = {} listitems = [] dictitems = {} else: - value = self.construct_mapping(node) + value = self.construct_mapping(node, deep=True) args = value.get('args', []) kwds = value.get('kwds', {}) state = value.get('state', {}) @@ -571,7 +609,6 @@ class Constructor(SafeConstructor): def construct_python_object_new(self, suffix, node): return self.construct_python_object_apply(suffix, node, newobj=True) - Constructor.add_constructor( u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null) diff --git a/scripts/external_libs/yaml-3.11/yaml/cyaml.py b/scripts/external_libs/yaml-3.11/yaml/cyaml.py new file mode 100755 index 00000000..68dcd751 --- /dev/null +++ b/scripts/external_libs/yaml-3.11/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py b/scripts/external_libs/yaml-3.11/yaml/dumper.py index 355c1e2f..f811d2c9 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py +++ b/scripts/external_libs/yaml-3.11/yaml/dumper.py @@ -16,7 +16,7 @@ class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): version=None, tags=None): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, - allow_uncode=allow_unicode, line_break=line_break) + allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py b/scripts/external_libs/yaml-3.11/yaml/emitter.py index a34c4526..e5bcdccc 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py +++ b/scripts/external_libs/yaml-3.11/yaml/emitter.py @@ -11,12 +11,10 @@ __all__ = ['Emitter', 'EmitterError'] from error import YAMLError from events import * -import re - class EmitterError(YAMLError): pass -class ScalarAnalysis: +class ScalarAnalysis(object): def __init__(self, scalar, empty, multiline, allow_flow_plain, allow_block_plain, allow_single_quoted, allow_double_quoted, @@ -30,7 +28,7 @@ class ScalarAnalysis: self.allow_double_quoted = allow_double_quoted self.allow_block = allow_block -class Emitter: +class Emitter(object): DEFAULT_TAG_PREFIXES = { u'!' : u'!', @@ -78,6 +76,9 @@ class Emitter: self.whitespace = True self.indention = True + # Whether the document requires an explicit document indicator + self.open_ended = False + # Formatting details. self.canonical = canonical self.allow_unicode = allow_unicode @@ -102,6 +103,11 @@ class Emitter: self.analysis = None self.style = None + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + def emit(self, event): self.events.append(event) while not self.need_more_events(): @@ -153,7 +159,7 @@ class Emitter: def expect_stream_start(self): if isinstance(self.event, StreamStartEvent): - if self.event.encoding: + if self.event.encoding and not getattr(self.stream, 'encoding', None): self.encoding = self.event.encoding self.write_stream_start() self.state = self.expect_first_document_start @@ -171,6 +177,9 @@ class Emitter: def expect_document_start(self, first=False): if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() if self.event.version: version_text = self.prepare_version(self.event.version) self.write_version_directive(version_text) @@ -194,6 +203,9 @@ class Emitter: self.write_indent() self.state = self.expect_document_root elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() self.write_stream_end() self.state = self.expect_nothing else: @@ -492,7 +504,8 @@ class Emitter: or (not self.flow_level and self.analysis.allow_block_plain))): return '' if self.event.style and self.event.style in '|>': - if not self.flow_level and self.analysis.allow_block: + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): return self.event.style if not self.event.style or self.event.style == '\'': if (self.analysis.allow_single_quoted and @@ -537,7 +550,7 @@ class Emitter: raise EmitterError("tag handle must start and end with '!': %r" % (handle.encode('utf-8'))) for ch in handle[1:-1]: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_'): raise EmitterError("invalid character %r in the tag handle: %r" % (ch.encode('utf-8'), handle.encode('utf-8'))) @@ -552,7 +565,7 @@ class Emitter: end = 1 while end < len(prefix): ch = prefix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-;/?!:@&=+$,_.~*\'()[]': end += 1 else: @@ -573,7 +586,9 @@ class Emitter: return tag handle = None suffix = tag - for prefix in self.tag_prefixes: + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: if tag.startswith(prefix) \ and (prefix == u'!' or len(prefix) < len(tag)): handle = self.tag_prefixes[prefix] @@ -582,7 +597,7 @@ class Emitter: start = end = 0 while end < len(suffix): ch = suffix[end] - if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-;/?:@&=+$,_.~*\'()[]' \ or (ch == u'!' and handle != u'!'): end += 1 @@ -605,7 +620,7 @@ class Emitter: if not anchor: raise EmitterError("anchor must not be empty") for ch in anchor: - if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_'): raise EmitterError("invalid character %r in the anchor: %r" % (ch.encode('utf-8'), anchor.encode('utf-8'))) @@ -626,15 +641,13 @@ class Emitter: line_breaks = False special_characters = False - # Whitespaces. - inline_spaces = False # non-space space+ non-space - inline_breaks = False # non-space break+ non-space - leading_spaces = False # ^ space+ (non-space | $) - leading_breaks = False # ^ break+ (non-space | $) - trailing_spaces = False # (^ | non-space) space+ $ - trailing_breaks = False # (^ | non-space) break+ $ - inline_breaks_spaces = False # non-space break+ space+ non-space - mixed_breaks_spaces = False # anything else + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False # Check document indicators. if scalar.startswith(u'---') or scalar.startswith(u'...'): @@ -642,42 +655,33 @@ class Emitter: flow_indicators = True # First character or preceded by a whitespace. - preceeded_by_space = True + preceeded_by_whitespace = True # Last character or followed by a whitespace. - followed_by_space = (len(scalar) == 1 or + followed_by_whitespace = (len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') - # The current series of whitespaces contain plain spaces. - spaces = False - - # The current series of whitespaces contain line breaks. - breaks = False + # The previous character is a space. + previous_space = False - # The current series of whitespaces contain a space followed by a - # break. - mixed = False - - # The current series of whitespaces start at the beginning of the - # scalar. - leading = False + # The previous character is a break. + previous_break = False index = 0 while index < len(scalar): ch = scalar[index] # Check for indicators. - if index == 0: # Leading indicators are special characters. - if ch in u'#,[]{}#&*!|>\'\"%@`': + if ch in u'#,[]{}&*!|>\'\"%@`': flow_indicators = True block_indicators = True if ch in u'?:': flow_indicators = True - if followed_by_space: + if followed_by_whitespace: block_indicators = True - if ch == u'-' and followed_by_space: + if ch == u'-' and followed_by_whitespace: flow_indicators = True block_indicators = True else: @@ -686,14 +690,13 @@ class Emitter: flow_indicators = True if ch == u':': flow_indicators = True - if followed_by_space: + if followed_by_whitespace: block_indicators = True - if ch == u'#' and preceeded_by_space: + if ch == u'#' and preceeded_by_whitespace: flow_indicators = True block_indicators = True # Check for line breaks, special, and unicode characters. - if ch in u'\n\x85\u2028\u2029': line_breaks = True if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): @@ -705,65 +708,33 @@ class Emitter: else: special_characters = True - # Spaces, line breaks, and how they are mixed. State machine. - - # Start or continue series of whitespaces. - if ch in u' \n\x85\u2028\u2029': - if spaces and breaks: - if ch != u' ': # break+ (space+ break+) => mixed - mixed = True - elif spaces: - if ch != u' ': # (space+ break+) => mixed - breaks = True - mixed = True - elif breaks: - if ch == u' ': # break+ space+ - spaces = True - else: - leading = (index == 0) - if ch == u' ': # space+ - spaces = True - else: # break+ - breaks = True - - # Series of whitespaces ended with a non-space. - elif spaces or breaks: - if leading: - if spaces and breaks: - mixed_breaks_spaces = True - elif spaces: - leading_spaces = True - elif breaks: - leading_breaks = True - else: - if mixed: - mixed_breaks_spaces = True - elif spaces and breaks: - inline_breaks_spaces = True - elif spaces: - inline_spaces = True - elif breaks: - inline_breaks = True - spaces = breaks = mixed = leading = False - - # Series of whitespaces reach the end. - if (spaces or breaks) and (index == len(scalar)-1): - if spaces and breaks: - mixed_breaks_spaces = True - elif spaces: - trailing_spaces = True - if leading: - leading_spaces = True - elif breaks: - trailing_breaks = True - if leading: - leading_breaks = True - spaces = breaks = mixed = leading = False + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False # Prepare for the next character. index += 1 - preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029') - followed_by_space = (index+1 >= len(scalar) or + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') # Let's decide what styles are allowed. @@ -773,28 +744,28 @@ class Emitter: allow_double_quoted = True allow_block = True - # Leading and trailing whitespace are bad for plain scalars. We also - # do not want to mess with leading whitespaces for block scalars. - if leading_spaces or leading_breaks or trailing_spaces: - allow_flow_plain = allow_block_plain = allow_block = False - - # Trailing breaks are fine for block scalars, but unacceptable for - # plain scalars. - if trailing_breaks: + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): allow_flow_plain = allow_block_plain = False - # The combination of (space+ break+) is only acceptable for block + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block # scalars. - if inline_breaks_spaces: + if break_space: allow_flow_plain = allow_block_plain = allow_single_quoted = False - # Mixed spaces and breaks, as well as special character are only + # Spaces followed by breaks, as well as special character are only # allowed for double quoted scalars. - if mixed_breaks_spaces or special_characters: + if space_break or special_characters: allow_flow_plain = allow_block_plain = \ allow_single_quoted = allow_block = False - # We don't emit multiline plain scalars. + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. if line_breaks: allow_flow_plain = allow_block_plain = False @@ -823,7 +794,7 @@ class Emitter: def write_stream_start(self): # Write BOM if needed. if self.encoding and self.encoding.startswith('utf-16'): - self.stream.write(u'\xFF\xFE'.encode(self.encoding)) + self.stream.write(u'\uFEFF'.encode(self.encoding)) def write_stream_end(self): self.flush_stream() @@ -837,6 +808,7 @@ class Emitter: self.whitespace = whitespace self.indention = self.indention and indention self.column += len(data) + self.open_ended = False if self.encoding: data = data.encode(self.encoding) self.stream.write(data) @@ -922,13 +894,13 @@ class Emitter: data = data.encode(self.encoding) self.stream.write(data) start = end - if ch == u'\'': - data = u'\'\'' - self.column += 2 - if self.encoding: - data = data.encode(self.encoding) - self.stream.write(data) - start = end + 1 + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 if ch is not None: spaces = (ch == u' ') breaks = (ch in u'\n\x85\u2028\u2029') @@ -1007,25 +979,26 @@ class Emitter: end += 1 self.write_indicator(u'"', False) - def determine_chomp(self, text): - tail = text[-2:] - while len(tail) < 2: - tail = u' '+tail - if tail[-1] in u'\n\x85\u2028\u2029': - if tail[-2] in u'\n\x85\u2028\u2029': - return u'+' - else: - return u'' - else: - return u'-' + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints def write_folded(self, text): - chomp = self.determine_chomp(text) - self.write_indicator(u'>'+chomp, True) - self.write_indent() - leading_space = False + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True spaces = False - breaks = False + breaks = True start = end = 0 while end <= len(text): ch = None @@ -1059,6 +1032,7 @@ class Emitter: else: if ch is None or ch in u' \n\x85\u2028\u2029': data = text[start:end] + self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) @@ -1071,10 +1045,12 @@ class Emitter: end += 1 def write_literal(self, text): - chomp = self.determine_chomp(text) - self.write_indicator(u'|'+chomp, True) - self.write_indent() - breaks = False + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True start = end = 0 while end <= len(text): ch = None @@ -1104,6 +1080,8 @@ class Emitter: end += 1 def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True if not text: return if not self.whitespace: @@ -1112,7 +1090,7 @@ class Emitter: if self.encoding: data = data.encode(self.encoding) self.stream.write(data) - self.writespace = False + self.whitespace = False self.indention = False spaces = False breaks = False @@ -1125,7 +1103,7 @@ class Emitter: if ch != u' ': if start+1 == end and self.column > self.best_width and split: self.write_indent() - self.writespace = False + self.whitespace = False self.indention = False else: data = text[start:end] diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py b/scripts/external_libs/yaml-3.11/yaml/error.py index 8fa916b2..577686db 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py +++ b/scripts/external_libs/yaml-3.11/yaml/error.py @@ -1,7 +1,7 @@ __all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] -class Mark: +class Mark(object): def __init__(self, name, index, line, column, buffer, pointer): self.name = name diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py b/scripts/external_libs/yaml-3.11/yaml/events.py index 3f244fa0..f79ad389 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py +++ b/scripts/external_libs/yaml-3.11/yaml/events.py @@ -1,7 +1,7 @@ # Abstract classes. -class Event: +class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py b/scripts/external_libs/yaml-3.11/yaml/loader.py index 293ff467..293ff467 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py +++ b/scripts/external_libs/yaml-3.11/yaml/loader.py diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py b/scripts/external_libs/yaml-3.11/yaml/nodes.py index cb8c1cba..c4f070c4 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py +++ b/scripts/external_libs/yaml-3.11/yaml/nodes.py @@ -1,5 +1,5 @@ -class Node: +class Node(object): def __init__(self, tag, value, start_mark, end_mark): self.tag = tag self.value = value diff --git a/scripts/external_libs/yaml-3.11/yaml/parser.py b/scripts/external_libs/yaml-3.11/yaml/parser.py new file mode 100755 index 00000000..f9e3057f --- /dev/null +++ b/scripts/external_libs/yaml-3.11/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '<document start>', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py b/scripts/external_libs/yaml-3.11/yaml/reader.py index beb76d0a..3249e6b9 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py +++ b/scripts/external_libs/yaml-3.11/yaml/reader.py @@ -21,41 +21,6 @@ from error import YAMLError, Mark import codecs, re -# Unfortunately, codec functions in Python 2.3 does not support the `finish` -# arguments, so we have to write our own wrappers. - -try: - codecs.utf_8_decode('', 'strict', False) - from codecs import utf_8_decode, utf_16_le_decode, utf_16_be_decode - -except TypeError: - - def utf_16_le_decode(data, errors, finish=False): - if not finish and len(data) % 2 == 1: - data = data[:-1] - return codecs.utf_16_le_decode(data, errors) - - def utf_16_be_decode(data, errors, finish=False): - if not finish and len(data) % 2 == 1: - data = data[:-1] - return codecs.utf_16_be_decode(data, errors) - - def utf_8_decode(data, errors, finish=False): - if not finish: - # We are trying to remove a possible incomplete multibyte character - # from the suffix of the data. - # The first byte of a multi-byte sequence is in the range 0xc0 to 0xfd. - # All further bytes are in the range 0x80 to 0xbf. - # UTF-8 encoded UCS characters may be up to six bytes long. - count = 0 - while count < 5 and count < len(data) \ - and '\x80' <= data[-count-1] <= '\xBF': - count -= 1 - if count < 5 and count < len(data) \ - and '\xC0' <= data[-count-1] <= '\xFD': - data = data[:-count-1] - return codecs.utf_8_decode(data, errors) - class ReaderError(YAMLError): def __init__(self, name, position, character, encoding, reason): @@ -74,10 +39,10 @@ class ReaderError(YAMLError): else: return "unacceptable character #x%04x: %s\n" \ " in \"%s\", position %d" \ - % (ord(self.character), self.reason, + % (self.character, self.reason, self.name, self.position) -class Reader: +class Reader(object): # Reader: # - determines the data encoding and converts it to unicode, # - checks if characters are in allowed range, @@ -120,9 +85,11 @@ class Reader: self.determine_encoding() def peek(self, index=0): - if self.pointer+index+1 >= len(self.buffer): + try: + return self.buffer[self.pointer+index] + except IndexError: self.update(index+1) - return self.buffer[self.pointer+index] + return self.buffer[self.pointer+index] def prefix(self, length=1): if self.pointer+length >= len(self.buffer): @@ -132,16 +99,17 @@ class Reader: def forward(self, length=1): if self.pointer+length+1 >= len(self.buffer): self.update(length+1) - for k in range(length): + while length: ch = self.buffer[self.pointer] self.pointer += 1 self.index += 1 if ch in u'\n\x85\u2028\u2029' \ - or (ch == u'\r' and self.buffer[self.pointer+1] != u'\n'): + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): self.line += 1 self.column = 0 elif ch != u'\uFEFF': self.column += 1 + length -= 1 def get_mark(self): if self.stream is None: @@ -156,13 +124,13 @@ class Reader: self.update_raw() if not isinstance(self.raw_buffer, unicode): if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): - self.raw_decode = utf_16_le_decode + self.raw_decode = codecs.utf_16_le_decode self.encoding = 'utf-16-le' elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): - self.raw_decode = utf_16_be_decode + self.raw_decode = codecs.utf_16_be_decode self.encoding = 'utf-16-be' else: - self.raw_decode = utf_8_decode + self.raw_decode = codecs.utf_8_decode self.encoding = 'utf-8' self.update(1) @@ -172,7 +140,7 @@ class Reader: if match: character = match.group() position = self.index+(len(self.buffer)-self.pointer)+match.start() - raise ReaderError(self.name, position, character, + raise ReaderError(self.name, position, ord(character), 'unicode', "special characters are not allowed") def update(self, length): diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py b/scripts/external_libs/yaml-3.11/yaml/representer.py index cb37169d..5f4fc70d 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py +++ b/scripts/external_libs/yaml-3.11/yaml/representer.py @@ -5,23 +5,14 @@ __all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', from error import * from nodes import * -try: - import datetime - datetime_available = True -except ImportError: - datetime_available = False +import datetime -try: - set -except NameError: - from sets import Set as set - -import sys, copy_reg +import sys, copy_reg, types class RepresenterError(YAMLError): pass -class BaseRepresenter: +class BaseRepresenter(object): yaml_representers = {} yaml_multi_representers = {} @@ -30,21 +21,15 @@ class BaseRepresenter: self.default_style = default_style self.default_flow_style = default_flow_style self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None def represent(self, data): node = self.represent_data(data) self.serialize(node) self.represented_objects = {} - - class C: pass - c = C() - def f(): pass - classobj_type = type(C) - instance_type = type(c) - function_type = type(f) - builtin_function_type = type(abs) - module_type = type(sys) - del C, c, f + self.object_keeper = [] + self.alias_key = None def get_classobj_bases(self, cls): bases = [cls] @@ -54,18 +39,19 @@ class BaseRepresenter: def represent_data(self, data): if self.ignore_aliases(data): - alias_key = None + self.alias_key = None else: - alias_key = id(data) - if alias_key is not None: - if alias_key in self.represented_objects: - node = self.represented_objects[alias_key] - if node is None: - raise RepresenterError("recursive objects are not allowed: %r" % data) + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) return node - self.represented_objects[alias_key] = None + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) data_types = type(data).__mro__ - if type(data) is self.instance_type: + if type(data) is types.InstanceType: data_types = self.get_classobj_bases(data.__class__)+list(data_types) if data_types[0] in self.yaml_representers: node = self.yaml_representers[data_types[0]](self, data) @@ -81,8 +67,8 @@ class BaseRepresenter: node = self.yaml_representers[None](self, data) else: node = ScalarNode(None, unicode(data)) - if alias_key is not None: - self.represented_objects[alias_key] = node + #if alias_key is not None: + # self.represented_objects[alias_key] = node return node def add_representer(cls, data_type, representer): @@ -100,50 +86,52 @@ class BaseRepresenter: def represent_scalar(self, tag, value, style=None): if style is None: style = self.default_style - return ScalarNode(tag, value, style=style) + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node def represent_sequence(self, tag, sequence, flow_style=None): - best_style = True value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True for item in sequence: node_item = self.represent_data(item) if not (isinstance(node_item, ScalarNode) and not node_item.style): best_style = False - value.append(self.represent_data(item)) + value.append(node_item) if flow_style is None: - flow_style = self.default_flow_style - if flow_style is None: - flow_style = best_style - return SequenceNode(tag, value, flow_style=flow_style) + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node best_style = True - if hasattr(mapping, 'keys'): - value = {} - for item_key in mapping.keys(): - item_value = mapping[item_key] - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value[node_key] = node_value - else: - value = [] - for item_key, item_value in mapping: - node_key = self.represent_data(item_key) - node_value = self.represent_data(item_value) - if not (isinstance(node_key, ScalarNode) and not node_key.style): - best_style = False - if not (isinstance(node_value, ScalarNode) and not node_value.style): - best_style = False - value.append((node_key, node_value)) - if flow_style is None: - flow_style = self.default_flow_style + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) if flow_style is None: - flow_style = best_style - return MappingNode(tag, value, flow_style=flow_style) + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node def ignore_aliases(self, data): return False @@ -192,36 +180,44 @@ class SafeRepresenter(BaseRepresenter): def represent_long(self, data): return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) - repr_pos_inf = repr(1e300000) - repr_neg_inf = repr(-1e300000) - repr_nan = repr(1e300000/1e300000) + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value def represent_float(self, data): - repr_data = repr(data) - if repr_data == self.repr_pos_inf: + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: value = u'.inf' - elif repr_data == self.repr_neg_inf: + elif data == -self.inf_value: value = u'-.inf' - elif repr_data == self.repr_nan: - value = u'.nan' else: - value = unicode(repr_data) + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) return self.represent_scalar(u'tag:yaml.org,2002:float', value) def represent_list(self, data): - pairs = (len(data) > 0 and isinstance(data, list)) - if pairs: - for item in data: - if not isinstance(item, tuple) or len(item) != 2: - pairs = False - break - if not pairs: + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: return self.represent_sequence(u'tag:yaml.org,2002:seq', data) - value = [] - for item_key, item_value in data: - value.append(self.represent_mapping(u'tag:yaml.org,2002:map', - [(item_key, item_value)])) - return SequenceNode(u'tag:yaml.org,2002:pairs', value) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) def represent_dict(self, data): return self.represent_mapping(u'tag:yaml.org,2002:map', data) @@ -233,17 +229,11 @@ class SafeRepresenter(BaseRepresenter): return self.represent_mapping(u'tag:yaml.org,2002:set', value) def represent_date(self, data): - value = u'%04d-%02d-%02d' % (data.year, data.month, data.day) + value = unicode(data.isoformat()) return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) def represent_datetime(self, data): - value = u'%04d-%02d-%02d %02d:%02d:%02d' \ - % (data.year, data.month, data.day, - data.hour, data.minute, data.second) - if data.microsecond: - value += u'.' + unicode(data.microsecond/1000000.0).split(u'.')[1] - if data.utcoffset(): - value += unicode(data.utcoffset()) + value = unicode(data.isoformat(' ')) return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) def represent_yaml_object(self, tag, data, cls, flow_style=None): @@ -251,9 +241,6 @@ class SafeRepresenter(BaseRepresenter): state = data.__getstate__() else: state = data.__dict__.copy() - if isinstance(state, dict): - state = state.items() - state.sort() return self.represent_mapping(tag, state, flow_style=flow_style) def represent_undefined(self, data): @@ -292,11 +279,11 @@ SafeRepresenter.add_representer(dict, SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) -if datetime_available: - SafeRepresenter.add_representer(datetime.date, - SafeRepresenter.represent_date) - SafeRepresenter.add_representer(datetime.datetime, - SafeRepresenter.represent_datetime) +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) @@ -385,8 +372,6 @@ class Representer(SafeRepresenter): else: state = data.__dict__ if args is None and isinstance(state, dict): - state = state.items() - state.sort() return self.represent_mapping( u'tag:yaml.org,2002:python/object:'+class_name, state) if isinstance(state, dict) and not state: @@ -418,7 +403,7 @@ class Representer(SafeRepresenter): cls = type(data) if cls in copy_reg.dispatch_table: - reduce = copy_reg.dispatch_table[cls] + reduce = copy_reg.dispatch_table[cls](data) elif hasattr(data, '__reduce_ex__'): reduce = data.__reduce_ex__(2) elif hasattr(data, '__reduce__'): @@ -445,8 +430,6 @@ class Representer(SafeRepresenter): function_name = u'%s.%s' % (function.__module__, function.__name__) if not args and not listitems and not dictitems \ and isinstance(state, dict) and newobj: - state = state.items() - state.sort() return self.represent_mapping( u'tag:yaml.org,2002:python/object:'+function_name, state) if not listitems and not dictitems \ @@ -481,19 +464,19 @@ Representer.add_representer(tuple, Representer.add_representer(type, Representer.represent_name) -Representer.add_representer(Representer.classobj_type, +Representer.add_representer(types.ClassType, Representer.represent_name) -Representer.add_representer(Representer.function_type, +Representer.add_representer(types.FunctionType, Representer.represent_name) -Representer.add_representer(Representer.builtin_function_type, +Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) -Representer.add_representer(Representer.module_type, +Representer.add_representer(types.ModuleType, Representer.represent_module) -Representer.add_multi_representer(Representer.instance_type, +Representer.add_multi_representer(types.InstanceType, Representer.represent_instance) Representer.add_multi_representer(object, diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py b/scripts/external_libs/yaml-3.11/yaml/resolver.py index 7e580e98..6b5ab875 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py +++ b/scripts/external_libs/yaml-3.11/yaml/resolver.py @@ -9,7 +9,7 @@ import re class ResolverError(YAMLError): pass -class BaseResolver: +class BaseResolver(object): DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' @@ -32,6 +32,18 @@ class BaseResolver: add_implicit_resolver = classmethod(add_implicit_resolver) def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. if not 'yaml_path_resolvers' in cls.__dict__: cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() new_path = [] @@ -51,7 +63,7 @@ class BaseResolver: node_check = ScalarNode elif node_check is list: node_check = SequenceNode - elif node_check is map: + elif node_check is dict: node_check = MappingNode elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ and not isinstance(node_check, basestring) \ @@ -65,7 +77,7 @@ class BaseResolver: kind = ScalarNode elif kind is list: kind = SequenceNode - elif kind is map: + elif kind is dict: kind = MappingNode elif kind not in [ScalarNode, SequenceNode, MappingNode] \ and kind is not None: @@ -74,6 +86,8 @@ class BaseResolver: add_path_resolver = classmethod(add_path_resolver) def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return exact_paths = {} prefix_paths = [] if current_node: @@ -95,6 +109,8 @@ class BaseResolver: self.resolver_prefix_paths.append(prefix_paths) def ascend_resolver(self): + if not self.yaml_path_resolvers: + return self.resolver_exact_paths.pop() self.resolver_prefix_paths.pop() @@ -109,13 +125,14 @@ class BaseResolver: return if index_check is True and current_index is not None: return - if index_check in [False, None] and current_index is None: + if (index_check is False or index_check is None) \ + and current_index is None: return if isinstance(index_check, basestring): if not (isinstance(current_index, ScalarNode) and index_check == current_index.value): return - elif isinstance(index_check, int): + elif isinstance(index_check, int) and not isinstance(index_check, bool): if index_check != current_index: return return True @@ -131,11 +148,12 @@ class BaseResolver: if regexp.match(value): return tag implicit = implicit[1] - exact_paths = self.resolver_exact_paths[-1] - if kind in exact_paths: - return exact_paths[kind] - if None in exact_paths: - return exact_paths[None] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] if kind is ScalarNode: return self.DEFAULT_SCALAR_TAG elif kind is SequenceNode: @@ -148,14 +166,15 @@ class Resolver(BaseResolver): Resolver.add_implicit_resolver( u'tag:yaml.org,2002:bool', - re.compile(ur'''^(?:yes|Yes|YES|n|N|no|No|NO + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO |true|True|TRUE|false|False|FALSE |on|On|ON|off|Off|OFF)$''', re.X), list(u'yYnNtTfFoO')) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:float', - re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)? + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), @@ -173,7 +192,7 @@ Resolver.add_implicit_resolver( Resolver.add_implicit_resolver( u'tag:yaml.org,2002:merge', re.compile(ur'^(?:<<)$'), - ['<']) + [u'<']) Resolver.add_implicit_resolver( u'tag:yaml.org,2002:null', @@ -194,7 +213,7 @@ Resolver.add_implicit_resolver( Resolver.add_implicit_resolver( u'tag:yaml.org,2002:value', re.compile(ur'^(?:=)$'), - ['=']) + [u'=']) # The following resolver is only for documentation purposes. It cannot work # because plain scalars cannot start with '!', '&', or '*'. diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py b/scripts/external_libs/yaml-3.11/yaml/scanner.py index cf2478f9..5228fad6 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py +++ b/scripts/external_libs/yaml-3.11/yaml/scanner.py @@ -19,7 +19,7 @@ # ALIAS(value) # ANCHOR(value) # TAG(value) -# SCALAR(value, plain) +# SCALAR(value, plain, style) # # Read comments in the Scanner code for more details. # @@ -32,7 +32,7 @@ from tokens import * class ScannerError(MarkedYAMLError): pass -class SimpleKey: +class SimpleKey(object): # See below simple keys treatment. def __init__(self, token_number, required, index, line, column, mark): @@ -43,7 +43,7 @@ class SimpleKey: self.column = column self.mark = mark -class Scanner: +class Scanner(object): def __init__(self): """Initialize the scanner.""" @@ -137,16 +137,6 @@ class Scanner: self.tokens_taken += 1 return self.tokens.pop(0) - def __iter__(self): - # Iterator protocol. - while self.need_more_tokens(): - self.fetch_more_tokens() - while self.tokens: - self.tokens_taken += 1 - yield self.tokens.pop(0) - while self.need_more_tokens(): - self.fetch_more_tokens() - # Private methods. def need_more_tokens(self): @@ -214,11 +204,11 @@ class Scanner: return self.fetch_flow_mapping_end() # Is it the flow entry indicator? - if ch in u',': + if ch == u',': return self.fetch_flow_entry() # Is it the block entry indicator? - if ch in u'-' and self.check_block_entry(): + if ch == u'-' and self.check_block_entry(): return self.fetch_block_entry() # Is it the key indicator? @@ -325,11 +315,11 @@ class Scanner: if self.flow_level in self.possible_simple_keys: key = self.possible_simple_keys[self.flow_level] - # I don't think it's possible, but I could be wrong. - assert not key.required - #if key.required: - # raise ScannerError("while scanning a simple key", key.mark, - # "could not found expected ':'", self.get_mark()) + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] # Indentation functions. @@ -384,7 +374,8 @@ class Scanner: # Set the current intendation to -1. self.unwind_indent(-1) - # Reset everything (not really needed). + # Reset simple keys. + self.remove_possible_simple_key() self.allow_simple_key = False self.possible_simple_keys = {} @@ -588,6 +579,14 @@ class Scanner: "mapping values are not allowed here", self.get_mark()) + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + # Simple keys are allowed after ':' in the block context. self.allow_simple_key = not self.flow_level @@ -809,7 +808,7 @@ class Scanner: # See the specification for details. length = 0 ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) @@ -848,7 +847,7 @@ class Scanner: def scan_yaml_directive_number(self, start_mark): # See the specification for details. ch = self.peek() - if not (u'0' <= ch <= '9'): + if not (u'0' <= ch <= u'9'): raise ScannerError("while scanning a directive", start_mark, "expected a digit, but found %r" % ch.encode('utf-8'), self.get_mark()) @@ -914,14 +913,14 @@ class Scanner: # Therefore we restrict aliases to numbers and ASCII letters. start_mark = self.get_mark() indicator = self.peek() - if indicator == '*': + if indicator == u'*': name = 'alias' else: name = 'anchor' self.forward() length = 0 ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) @@ -1297,13 +1296,13 @@ class Scanner: ch = self.peek(length) if ch in u'\0 \t\r\n\x85\u2028\u2029' \ or (not self.flow_level and ch == u':' and - self.peek(length+1) in u'\0 \t\r\n\x28\u2028\u2029') \ + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ or (self.flow_level and ch in u',:?[]{}'): break length += 1 # It's not clear what we should do with ':' in the flow context. if (self.flow_level and ch == u':' - and self.peek(length+1) not in u'\0 \t\r\n\x28\u2028\u2029,[]{}'): + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): self.forward(length) raise ScannerError("while scanning a plain scalar", start_mark, "found unexpected ':'", self.get_mark(), @@ -1370,7 +1369,7 @@ class Scanner: length = 1 ch = self.peek(length) if ch != u' ': - while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) @@ -1390,7 +1389,7 @@ class Scanner: chunks = [] length = 0 ch = self.peek(length) - while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-;/?:@&=+$,_.!~*\'()[]%': if ch == u'%': chunks.append(self.prefix(length)) diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py b/scripts/external_libs/yaml-3.11/yaml/serializer.py index 937be9a9..0bf1e96d 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py +++ b/scripts/external_libs/yaml-3.11/yaml/serializer.py @@ -8,7 +8,7 @@ from nodes import * class SerializerError(YAMLError): pass -class Serializer: +class Serializer(object): ANCHOR_TEMPLATE = u'id%03d' @@ -55,7 +55,7 @@ class Serializer: self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) self.serialized_nodes = {} self.anchors = {} - self.last_alias_id = 0 + self.last_anchor_id = 0 def anchor_node(self, node): if node in self.anchors: @@ -67,14 +67,9 @@ class Serializer: for item in node.value: self.anchor_node(item) elif isinstance(node, MappingNode): - if hasattr(node.value, 'keys'): - for key in node.value.keys(): - self.anchor_node(key) - self.anchor_node(node.value[key]) - else: - for key, value in node.value: - self.anchor_node(key) - self.anchor_node(value) + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) def generate_anchor(self, node): self.last_anchor_id += 1 @@ -108,14 +103,9 @@ class Serializer: == self.resolve(MappingNode, node.value, True)) self.emit(MappingStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) - if hasattr(node.value, 'keys'): - for key in node.value.keys(): - self.serialize_node(key, node, None) - self.serialize_node(node.value[key], node, key) - else: - for key, value in node.value: - self.serialize_node(key, node, None) - self.serialize_node(value, node, key) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) self.emit(MappingEndEvent()) self.ascend_resolver() diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py b/scripts/external_libs/yaml-3.11/yaml/tokens.py index 4fe4522e..4d0b48a3 100644..100755 --- a/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py +++ b/scripts/external_libs/yaml-3.11/yaml/tokens.py @@ -1,5 +1,5 @@ -class Token: +class Token(object): def __init__(self, start_mark, end_mark): self.start_mark = start_mark self.end_mark = end_mark diff --git a/scripts/stl/ipv4_udp_9000.pcap b/scripts/stl/ipv4_udp_9000.pcap Binary files differnew file mode 100644 index 00000000..86385997 --- /dev/null +++ b/scripts/stl/ipv4_udp_9000.pcap diff --git a/scripts/stl/ipv4_udp_9k.pcap b/scripts/stl/ipv4_udp_9k.pcap Binary files differnew file mode 100644 index 00000000..c5466f78 --- /dev/null +++ b/scripts/stl/ipv4_udp_9k.pcap diff --git a/scripts/stl/syn_attack_sample.yaml b/scripts/stl/syn_attack_sample.yaml index 298638f1..c5734e43 100644 --- a/scripts/stl/syn_attack_sample.yaml +++ b/scripts/stl/syn_attack_sample.yaml @@ -69,7 +69,7 @@ "name" : "ip_dst", # varible to write "add_value" : 0x40000000, # 16.0.0.0 is src base "is_big_endian" : true, # write as big edian - "pkt_offset" : 32, # write tuple_gen.port into udp.src_port + "pkt_offset" : 30, # write tuple_gen.port into udp.src_port }, { diff --git a/scripts/stl/syn_packet.pcap b/scripts/stl/syn_packet.pcap Binary files differindex 6740e911..93325547 100644 --- a/scripts/stl/syn_packet.pcap +++ b/scripts/stl/syn_packet.pcap diff --git a/scripts/stl/udp_rand_size.yaml b/scripts/stl/udp_rand_size.yaml new file mode 100644 index 00000000..2bbad4a0 --- /dev/null +++ b/scripts/stl/udp_rand_size.yaml @@ -0,0 +1,42 @@ +### UDP 1500 random pkt size ### +##################################### +- name: "random" + stream: + self_start: True + packet: + binary: stl/udp_1518B_no_crc.pcap + mode: + type: continuous + pps: 100 + rx_stats: [] + + # random packet size 60-1500 update ip.len/checksum/udp.len + vm: + instructions: + - type : "flow_var" # define random varible 60-1500 + name : "var_rand_pkt_size" + op : "random" + init_value : 0 + min_value : 60 + max_value : 1500 # range must be smaller than pkt size + size : 2 + + - type : "trim_pkt_size" # trim packet size + name : "var_rand_pkt_size" + + - type : "write_flow_var" # update ip.len + name : "var_rand_pkt_size" + add_value : -14 + is_big_endian: true + pkt_offset : 16 + + - type : "fix_checksum_ipv4" # update ip.checksum + pkt_offset : 14 + + - type : "write_flow_var" # update udp.len + name : "var_rand_pkt_size" + add_value : -34 + is_big_endian : true + pkt_offset : 38 + + diff --git a/scripts/stl/udp_rand_size_9k.yaml b/scripts/stl/udp_rand_size_9k.yaml new file mode 100644 index 00000000..ecdd7ae0 --- /dev/null +++ b/scripts/stl/udp_rand_size_9k.yaml @@ -0,0 +1,42 @@ +### UDP 9K+14(MAC) random pkt size ### +##################################### +- name: "random" + stream: + self_start: True + packet: + binary: stl/ipv4_udp_9k.pcap # should not include CRC 9K ip+14 = 9230 +FCS(by NIC)=9234 + mode: + type: continuous + pps: 100 + rx_stats: [] + + # random packet size 60-max packet size update ip.len/checksum/udp.len + vm: + instructions: + - type : "flow_var" # define random varible 60-9230 + name : "var_rand_pkt_size" + op : "random" + init_value : 0 + min_value : 60 + max_value : 9230 # range must be smaller than pkt size + size : 2 + + - type : "trim_pkt_size" # trim packet size + name : "var_rand_pkt_size" + + - type : "write_flow_var" # update ip.len + name : "var_rand_pkt_size" + add_value : -14 + is_big_endian: true + pkt_offset : 16 + + - type : "fix_checksum_ipv4" # update ip.checksum + pkt_offset : 14 + + - type : "write_flow_var" # update udp.len + name : "var_rand_pkt_size" + add_value : -34 + is_big_endian : true + pkt_offset : 38 + + diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp index 6342b7f3..31ce0440 100755 --- a/src/bp_sim.cpp +++ b/src/bp_sim.cpp @@ -66,7 +66,7 @@ void CGlobalMemory::Dump(FILE *fd){ int i=0; for (i=0; i<MBUF_SIZE; i++) { - if ( (i>MBUF_2048) && (i<MBUF_DP_FLOWS)){ + if ( (i>MBUF_9k) && (i<MBUF_DP_FLOWS)){ continue; } if ( i<TRAFFIC_MBUF_64 ){ @@ -95,6 +95,8 @@ void CGlobalMemory::set(const CPlatformMemoryYamlInfo &info,float mul){ m_mbuf[MBUF_512] += info.m_mbuf[TRAFFIC_MBUF_512]; m_mbuf[MBUF_1024] += info.m_mbuf[TRAFFIC_MBUF_1024]; m_mbuf[MBUF_2048] += info.m_mbuf[TRAFFIC_MBUF_2048]; + m_mbuf[MBUF_4096] += info.m_mbuf[TRAFFIC_MBUF_4096]; + m_mbuf[MBUF_9k] += info.m_mbuf[MBUF_9k]; } @@ -494,7 +496,10 @@ void CRteMemPool::dump(FILE *fd){ DUMP_MBUF("mbuf_256",m_mbuf_pool_256); DUMP_MBUF("mbuf_512",m_mbuf_pool_512); DUMP_MBUF("mbuf_1024",m_mbuf_pool_1024); - DUMP_MBUF("mbuf_2048",m_big_mbuf_pool); + DUMP_MBUF("mbuf_2048",m_mbuf_pool_2048); + DUMP_MBUF("mbuf_4096",m_mbuf_pool_4096); + DUMP_MBUF("mbuf_9k",m_mbuf_pool_9k); + } //////////////////////////////////////// @@ -506,12 +511,14 @@ void CGlobalInfo::free_pools(){ for (i=0; i<(int)MAX_SOCKETS_SUPPORTED; i++) { if (lpSocket->is_sockets_enable((socket_id_t)i)) { lpmem= &m_mem_pool[i]; - utl_rte_mempool_delete(lpmem->m_big_mbuf_pool); utl_rte_mempool_delete(lpmem->m_small_mbuf_pool); utl_rte_mempool_delete(lpmem->m_mbuf_pool_128); utl_rte_mempool_delete(lpmem->m_mbuf_pool_256); utl_rte_mempool_delete(lpmem->m_mbuf_pool_512); utl_rte_mempool_delete(lpmem->m_mbuf_pool_1024); + utl_rte_mempool_delete(lpmem->m_mbuf_pool_2048); + utl_rte_mempool_delete(lpmem->m_mbuf_pool_4096); + utl_rte_mempool_delete(lpmem->m_mbuf_pool_9k); } utl_rte_mempool_delete(m_mem_pool[0].m_mbuf_global_nodes); } @@ -531,12 +538,6 @@ void CGlobalInfo::init_pools(uint32_t rx_buffers){ lpmem= &m_mem_pool[i]; lpmem->m_pool_id=i; - lpmem->m_big_mbuf_pool = utl_rte_mempool_create("big-pkt-const", - (lp->get_2k_num_blocks()+rx_buffers), - CONST_MBUF_SIZE, - 32, - (i<<5)+ 1,i); - assert(lpmem->m_big_mbuf_pool); /* this include the packet from 0-64 this is for small packets */ lpmem->m_small_mbuf_pool =utl_rte_mempool_create("small-pkt-const", @@ -577,6 +578,26 @@ void CGlobalInfo::init_pools(uint32_t rx_buffers){ assert(lpmem->m_mbuf_pool_1024); + lpmem->m_mbuf_pool_2048=utl_rte_mempool_create("_2048-pkt-const", + lp->m_mbuf[MBUF_2048], + CONST_2048_MBUF_SIZE, + 32,(i<<5)+ 5,i); + + assert(lpmem->m_mbuf_pool_2048); + + lpmem->m_mbuf_pool_4096=utl_rte_mempool_create("_4096-pkt-const", + lp->m_mbuf[MBUF_4096], + CONST_4096_MBUF_SIZE, + 32,(i<<5)+ 5,i); + + assert(lpmem->m_mbuf_pool_4096); + + lpmem->m_mbuf_pool_9k=utl_rte_mempool_create("_9k-pkt-const", + lp->m_mbuf[MBUF_9k]+rx_buffers, + CONST_9k_MBUF_SIZE, + 32,(i<<5)+ 5,i); + + assert(lpmem->m_mbuf_pool_9k); } } @@ -1190,9 +1211,9 @@ void CPacketIndication::ProcessIpPacket(CPacketParser *parser, return; } - if ( m_packet->pkt_len > MAX_BUF_SIZE -FIRST_PKT_SIZE ){ + if ( m_packet->pkt_len > MAX_PKT_SIZE ){ m_cnt->m_tcp_udp_pkt_length_error++; - printf("ERROR packet is too big, not supported jumbo packets that larger than %d \n",MAX_BUF_SIZE); + printf("ERROR packet is too big, not supported jumbo packets that larger than %d \n",MAX_PKT_SIZE); return; } @@ -1200,9 +1221,6 @@ void CPacketIndication::ProcessIpPacket(CPacketParser *parser, m_packet->pkt_len = l3.m_ipv4->getTotalLength() + getIpOffset(); if (m_packet->pkt_len < 60) { m_packet->pkt_len = 60; } - - - m_cnt->m_valid_udp_tcp++; m_payload_len = l3.m_ipv4->getTotalLength() - (payload_offset_from_ip); m_payload = (uint8_t *)(packetBase +offset); @@ -3146,7 +3164,8 @@ int CNodeGenerator::open_file(std::string file_name, /* ser preview mode */ m_v_if->set_review_mode(preview_mode); m_v_if->open_file(file_name); - m_cnt = 0; + m_cnt = 0; + m_limit = 0; return (0); } @@ -3161,10 +3180,12 @@ int CNodeGenerator::close_file(CFlowGenListPerThread * thread){ int CNodeGenerator::update_stl_stats(CGenNodeStateless *node_sl){ m_cnt++; + #ifdef _DEBUG if ( m_preview_mode.getVMode() >2 ){ fprintf(stdout," %4lu ,", (ulong)m_cnt); node_sl->Dump(stdout); } + #endif return (0); } @@ -3179,6 +3200,10 @@ int CNodeGenerator::update_stats(CGenNode * node){ return (0); } +bool CNodeGenerator::has_limit_reached() { + /* do we have a limit and has it passed ? */ + return ( (m_limit > 0) && (m_cnt >= m_limit) ); +} bool CFlowGenListPerThread::Create(uint32_t thread_id, uint32_t core_id, @@ -3514,9 +3539,6 @@ int CNodeGenerator::flush_file(dsec_t max_time, } } - //#ifndef RTE_DPDK - //thread->check_msgs(); - //#endif uint8_t type=node->m_type; @@ -3524,16 +3546,21 @@ int CNodeGenerator::flush_file(dsec_t max_time, m_p_queue.pop(); CGenNodeStateless *node_sl = (CGenNodeStateless *)node; - #ifdef _DEBUG - update_stl_stats(node_sl); - #endif - /* if the stream has been deactivated - end */ if ( unlikely( node_sl->is_mask_for_free() ) ) { thread->free_node(node); } else { node_sl->handle(thread); + + #ifdef TREX_SIM + update_stl_stats(node_sl); + if (has_limit_reached()) { + thread->m_stateless_dp_info.stop_traffic(node_sl->get_port_id(), false, 0); + } + #endif + } + }else{ if ( likely( type == CGenNode::FLOW_PKT ) ) { @@ -3974,9 +4001,11 @@ void CFlowGenListPerThread::check_msgs(void) { void CFlowGenListPerThread::start_stateless_simulation_file(std::string erf_file_name, - CPreviewMode &preview){ + CPreviewMode &preview, + uint64_t limit){ m_preview_mode = preview; m_node_gen.open_file(erf_file_name,&m_preview_mode); + m_node_gen.set_packet_limit(limit); } void CFlowGenListPerThread::stop_stateless_simulation_file(){ @@ -3987,7 +4016,6 @@ void CFlowGenListPerThread::start_stateless_daemon_simulation(){ m_cur_time_sec = 0; m_stateless_dp_info.run_once(); - } @@ -4677,13 +4705,11 @@ int CErfIFStl::send_node(CGenNode * _no_to_use){ rte_pktmbuf_free(m); } - - BP_ASSERT(m_writer); - bool res=m_writer->write_packet(m_raw); - - - BP_ASSERT(res); + + int rc = write_pkt(m_raw); + BP_ASSERT(rc == 0); } + return (0); } @@ -4721,13 +4747,9 @@ int CErfIF::send_node(CGenNode * node){ //utl_DumpBuffer(stdout,p, 12,0); - BP_ASSERT(m_writer); - - bool res=m_writer->write_packet(m_raw); - - //utl_DumpBuffer(stdout,m_raw->raw,m_raw->pkt_len,0); + int rc = write_pkt(m_raw); + BP_ASSERT(rc == 0); - BP_ASSERT(res); rte_pktmbuf_free(m); } return (0); diff --git a/src/bp_sim.h b/src/bp_sim.h index da8e8780..b9a42027 100755 --- a/src/bp_sim.h +++ b/src/bp_sim.h @@ -411,10 +411,6 @@ public: #define CONST_NB_MBUF 16380 - -#define MAX_BUF_SIZE (2048) -#define CONST_MBUF_SIZE (MAX_BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) - /* this is the first small part of the packet that we manipulate */ #define FIRST_PKT_SIZE 64 #define CONST_SMALL_MBUF_SIZE (FIRST_PKT_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) @@ -424,13 +420,19 @@ public: #define _256_MBUF_SIZE 256 #define _512_MBUF_SIZE 512 #define _1024_MBUF_SIZE 1024 +#define _2048_MBUF_SIZE 2048 +#define _4096_MBUF_SIZE 4096 +#define MAX_PKT_ALIGN_BUF_9K (9*1024+64) +#define MBUF_PKT_PREFIX ( sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM ) - -#define CONST_128_MBUF_SIZE (128 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) -#define CONST_256_MBUF_SIZE (256 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) -#define CONST_512_MBUF_SIZE (512 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) -#define CONST_1024_MBUF_SIZE (1024 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define CONST_128_MBUF_SIZE (128 + MBUF_PKT_PREFIX ) +#define CONST_256_MBUF_SIZE (256 + MBUF_PKT_PREFIX ) +#define CONST_512_MBUF_SIZE (512 + MBUF_PKT_PREFIX) +#define CONST_1024_MBUF_SIZE (1024 + MBUF_PKT_PREFIX) +#define CONST_2048_MBUF_SIZE (2048 + MBUF_PKT_PREFIX) +#define CONST_4096_MBUF_SIZE (4096 + MBUF_PKT_PREFIX) +#define CONST_9k_MBUF_SIZE (MAX_PKT_ALIGN_BUF_9K + MBUF_PKT_PREFIX) class CPreviewMode { @@ -774,6 +776,7 @@ public: uint16_t m_run_flags; uint8_t m_mac_splitter; uint8_t m_l_pkt_mode; + uint16_t m_debug_pkt_proto; trex_run_mode_e m_run_mode; @@ -1118,9 +1121,13 @@ public: m = _rte_pktmbuf_alloc(m_mbuf_pool_512); }else if (size < _1024_MBUF_SIZE) { m = _rte_pktmbuf_alloc(m_mbuf_pool_1024); + }else if (size < _2048_MBUF_SIZE) { + m = _rte_pktmbuf_alloc(m_mbuf_pool_2048); + }else if (size < _4096_MBUF_SIZE) { + m = _rte_pktmbuf_alloc(m_mbuf_pool_4096); }else{ - assert(size<MAX_BUF_SIZE); - m = _rte_pktmbuf_alloc(m_big_mbuf_pool); + assert(size<MAX_PKT_ALIGN_BUF_9K); + m = _rte_pktmbuf_alloc(m_mbuf_pool_9k); } return (m); } @@ -1129,21 +1136,22 @@ public: return ( _rte_pktmbuf_alloc(m_small_mbuf_pool) ); } - inline rte_mbuf_t * pktmbuf_alloc_big(){ - return ( _rte_pktmbuf_alloc(m_big_mbuf_pool) ); - } void dump(FILE *fd); void dump_in_case_of_error(FILE *fd); public: - rte_mempool_t * m_big_mbuf_pool; /* pool for const packets */ rte_mempool_t * m_small_mbuf_pool; /* pool for start packets */ + rte_mempool_t * m_mbuf_pool_128; rte_mempool_t * m_mbuf_pool_256; rte_mempool_t * m_mbuf_pool_512; rte_mempool_t * m_mbuf_pool_1024; + rte_mempool_t * m_mbuf_pool_2048; + rte_mempool_t * m_mbuf_pool_4096; + rte_mempool_t * m_mbuf_pool_9k; + rte_mempool_t * m_mbuf_global_nodes; uint32_t m_pool_id; }; @@ -1162,11 +1170,7 @@ public: return ( m_mem_pool[socket].pktmbuf_alloc_small() ); } - static inline rte_mbuf_t * pktmbuf_alloc_big(socket_id_t socket){ - return ( m_mem_pool[socket].pktmbuf_alloc_big() ); - } - - + /** * try to allocate small buffers too @@ -1832,7 +1836,7 @@ public: protected: - void fill_raw_packet(rte_mbuf_t * m,CGenNode * node,pkt_dir_t dir); + virtual void fill_raw_packet(rte_mbuf_t * m,CGenNode * node,pkt_dir_t dir); CFileWriterBase * m_writer; CCapPktRaw * m_raw; @@ -1846,6 +1850,41 @@ public: virtual int send_node(CGenNode * node); }; +/** + * same as regular STL but no I/O (dry run) + * + * @author imarom (07-Jan-16) + */ +class CErfIFStlNull : public CErfIFStl { +public: + + virtual int open_file(std::string file_name) { + return (0); + } + + virtual int write_pkt(CCapPktRaw *pkt_raw) { + return (0); + } + + virtual int close_file(void) { + return (0); + } + + virtual void fill_raw_packet(rte_mbuf_t * m,CGenNode * node,pkt_dir_t dir) { + + } + + virtual int update_mac_addr_from_global_cfg(pkt_dir_t dir, uint8_t * p){ + return (0); + } + + + virtual int flush_tx_queue(void){ + return (0); + + } + +}; static inline int fill_pkt(CCapPktRaw * raw,rte_mbuf_t * m){ @@ -1929,6 +1968,12 @@ public: add_node(node); } + /** + * set packet limit for the generator + */ + void set_packet_limit(uint64_t limit) { + m_limit = limit; + } void DumpHist(FILE *fd){ fprintf(fd,"\n"); @@ -1947,7 +1992,7 @@ private: } int update_stats(CGenNode * node); int update_stl_stats(CGenNodeStateless *node_sl); - + bool has_limit_reached(); FORCE_NO_INLINE bool handle_slow_messages(uint8_t type, CGenNode * node, @@ -1963,6 +2008,7 @@ public: CFlowGenListPerThread * m_parent; CPreviewMode m_preview_mode; uint64_t m_cnt; + uint64_t m_limit; CTimeHistogram m_realtime_his; }; @@ -3095,7 +3141,11 @@ public: SIZE_512 = 512, SIZE_1024 = 1024, SIZE_2048 = 2048, - MASK_SIZE =6 + SIZE_4096 = 4096, + SIZE_8192 = 8192, + SIZE_16384 = 16384, + + MASK_SIZE =9 }; void clear(){ @@ -3477,7 +3527,7 @@ public: void start_stateless_daemon_simulation(); /* open a file for simulation */ - void start_stateless_simulation_file(std::string erf_file_name,CPreviewMode &preview); + void start_stateless_simulation_file(std::string erf_file_name,CPreviewMode &preview, uint64_t limit = 0); /* close a file for simulation */ void stop_stateless_simulation_file(); diff --git a/src/common/captureFile.h b/src/common/captureFile.h index 027f1fcf..16a6120b 100755 --- a/src/common/captureFile.h +++ b/src/common/captureFile.h @@ -35,7 +35,7 @@ typedef enum capture_type { LAST_TYPE } capture_type_e; -#define MAX_PKT_SIZE (2048) +#define MAX_PKT_SIZE (9*1024+22) /* 9k IP +14+4 FCS +some spare */ #define READER_MAX_PACKET_SIZE MAX_PKT_SIZE diff --git a/src/debug.cpp b/src/debug.cpp new file mode 100644 index 00000000..e5e207e1 --- /dev/null +++ b/src/debug.cpp @@ -0,0 +1,330 @@ +/* + Copyright (c) 2016-2016 Cisco Systems, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// DPDK c++ issue +#define UINT8_MAX 255 +#define UINT16_MAX 0xFFFF +// DPDK c++ issue + +#include <stdio.h> +#include <unistd.h> +#include <string.h> +#include <assert.h> +#include <rte_mbuf.h> +#include <rte_pci.h> +#include <rte_ethdev.h> +#include <common/basic_utils.h> +#include "main_dpdk.h" +#include "debug.h" + +const uint8_t udp_pkt[] = { + 0x00,0x00,0x00,0x01,0x00,0x00, + 0x00,0x00,0x00,0x01,0x00,0x00, + 0x08,0x00, + + 0x45,0x00,0x00,0x81, + 0xaf,0x7e,0x00,0x00, + 0xfe,0x06,0xd9,0x23, + 0x01,0x01,0x01,0x01, + 0x3d,0xad,0x72,0x1b, + + 0x11,0x11, + 0x11,0x11, + 0x00,0x6d, + 0x00,0x00, + + 0x64,0x31,0x3a,0x61, + 0x64,0x32,0x3a,0x69,0x64, + 0x32,0x30,0x3a,0xd0,0x0e, + 0xa1,0x4b,0x7b,0xbd,0xbd, + 0x16,0xc6,0xdb,0xc4,0xbb,0x43, + 0xf9,0x4b,0x51,0x68,0x33,0x72, + 0x20,0x39,0x3a,0x69,0x6e,0x66,0x6f, + 0x5f,0x68,0x61,0x73,0x68,0x32,0x30,0x3a,0xee,0xc6,0xa3, + 0xd3,0x13,0xa8,0x43,0x06,0x03,0xd8,0x9e,0x3f,0x67,0x6f, + 0xe7,0x0a,0xfd,0x18,0x13,0x8d,0x65,0x31,0x3a,0x71,0x39, + 0x3a,0x67,0x65,0x74,0x5f,0x70,0x65,0x65,0x72,0x73,0x31, + 0x3a,0x74,0x38,0x3a,0x3d,0xeb,0x0c,0xbf,0x0d,0x6a,0x0d, + 0xa5,0x31,0x3a,0x79,0x31,0x3a,0x71,0x65,0x87,0xa6,0x7d, + 0xe7 +}; + +CTrexDebug::CTrexDebug(CPhyEthIF m_ports_arg[12], int max_ports) { + m_test = NULL; + m_ports = m_ports_arg; + m_max_ports = max_ports; +} + +int CTrexDebug::rcv_send(int port, int queue_id) { + CPhyEthIF * lp = &m_ports[port]; + rte_mbuf_t * rx_pkts[32]; + printf(" test rx port:%d queue:%d \n",port,queue_id); + printf(" --------------\n"); + uint16_t cnt = lp->rx_burst(queue_id,rx_pkts,32); + int i; + + for (i=0; i < (int)cnt; i++) { + rte_mbuf_t * m = rx_pkts[i]; + int pkt_size = rte_pktmbuf_pkt_len(m); + char *p = rte_pktmbuf_mtod(m, char*); + utl_DumpBuffer(stdout, p, pkt_size, 0); + rte_pktmbuf_free(m); + } + return 0; +} + +int CTrexDebug::rcv_send_all(int queue_id) { + int i; + for (i=0; i<m_max_ports; i++) { + rcv_send(i,queue_id); + } + return 0; +} + +// For playing around, and testing packet sending in debug mode +rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type) { + uint8_t proto; + int pkt_size = 0; + // uint8_t dst_mac[6] = {0xd4, 0x8c, 0xb5, 0xc9, 0x54, 0x2b}; + uint8_t dst_mac[6] = {0x79, 0xa2, 0xe6, 0xd5, 0x39, 0x25}; + uint8_t src_mac[6] = {0xa0, 0x36, 0x9f, 0x38, 0xa4, 0x02}; + uint16_t l2_proto = 0x0008; + uint8_t ip_header[] = { + 0x45,0x02,0x00,0x30, + 0x00,0x00,0x40,0x00, + 0xff,0x01,0xbd,0x04, + 0x10,0x0,0x0,0x1, //SIP + 0x30,0x0,0x0,0x1, //DIP + // 0x82, 0x0b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // IP option. change 45 to 48 (header len) if using it. + }; + uint8_t udp_header[] = {0x11, 0x11, 0x11,0x11, 0x00, 0x6d, 0x00, 0x00}; + uint8_t udp_data[] = {0x64,0x31,0x3a,0x61, + 0x64,0x32,0x3a,0x69,0x64, + 0x32,0x30,0x3a,0xd0,0x0e, + 0xa1,0x4b,0x7b,0xbd,0xbd, + 0x16,0xc6,0xdb,0xc4,0xbb,0x43, + 0xf9,0x4b,0x51,0x68,0x33,0x72, + 0x20,0x39,0x3a,0x69,0x6e,0x66,0x6f, + 0x5f,0x68,0x61,0x73,0x68,0x32,0x30,0x3a,0xee,0xc6,0xa3, + 0xd3,0x13,0xa8,0x43,0x06,0x03,0xd8,0x9e,0x3f,0x67,0x6f, + 0xe7,0x0a,0xfd,0x18,0x13,0x8d,0x65,0x31,0x3a,0x71,0x39, + 0x3a,0x67,0x65,0x74,0x5f,0x70,0x65,0x65,0x72,0x73,0x31, + 0x3a,0x74,0x38,0x3a,0x3d,0xeb,0x0c,0xbf,0x0d,0x6a,0x0d, + 0xa5,0x31,0x3a,0x79,0x31,0x3a,0x71,0x65,0x87,0xa6,0x7d, + 0xe7 + }; + uint8_t tcp_header[] = {0xab, 0xcd, 0x00, 0x80, // src, dst ports + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // seq num, ack num + 0x50, 0x00, 0xff, 0xff, // Header size, flags, window size + 0x00, 0x00, 0x00, 0x00, // checksum ,urgent pointer + }; + + uint8_t tcp_data[] = {0x8, 0xa, 0x1, 0x2, 0x3, 0x4, 0x3, 0x4, 0x6, 0x5}; + + uint8_t icmp_header[] = { + 0x08, 0x00, + 0xb8, 0x21, //checksum + 0xaa, 0xbb, // id + 0x00, 0x01, // Sequence number + }; + uint8_t icmp_data[] = { + 0xd6, 0x6e, 0x64, 0x34, // magic + 0x6a, 0xad, 0x0f, 0x00, //64 bit counter + 0x00, 0x56, 0x34, 0x12, + 0x78, 0x56, 0x34, 0x12, 0x00, 0x00 // seq + }; + + switch (pkt_type) { + case 1: + proto = IPPROTO_ICMP; + pkt_size = 14 + sizeof(ip_header) + sizeof(icmp_header) + sizeof (icmp_data); + break; + case 2: + proto = IPPROTO_UDP; + pkt_size = 14 + sizeof(ip_header) + sizeof(udp_header) + sizeof (udp_data); + break; + case 3: + proto = IPPROTO_TCP; + pkt_size = 14 + sizeof(ip_header) + sizeof(tcp_header) + sizeof (tcp_data); + break; + default: + return NULL; + } + + rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, pkt_size); + if ( unlikely(m == 0) ) { + printf("ERROR no packets \n"); + return (NULL); + } + char *p = rte_pktmbuf_append(m, pkt_size); + assert(p); + + /* set pkt data */ + memcpy(p, dst_mac, sizeof(dst_mac)); p += sizeof(dst_mac); + memcpy(p, src_mac, sizeof(src_mac)); p += sizeof(src_mac); + memcpy(p, &l2_proto, sizeof(l2_proto)); p += sizeof(l2_proto); + struct IPHeader *ip = (IPHeader *)p; + memcpy(p, ip_header, sizeof(ip_header)); p += sizeof(ip_header); + ip->setProtocol(proto); + ip->setTotalLength(pkt_size - 14); + + struct TCPHeader *tcp = (TCPHeader *)p; + struct ICMPHeader *icmp= (ICMPHeader *)p; + switch (pkt_type) { + case 1: + memcpy(p, icmp_header, sizeof(icmp_header)); p += sizeof(icmp_header); + memcpy(p, icmp_data, sizeof(icmp_data)); p += sizeof(icmp_data); + icmp->updateCheckSum(sizeof(icmp_header) + sizeof(icmp_data)); + break; + case 2: + memcpy(p, udp_header, sizeof(udp_header)); p += sizeof(udp_header); + memcpy(p, udp_data, sizeof(udp_data)); p += sizeof(udp_data); + break; + case 3: + memcpy(p, tcp_header, sizeof(tcp_header)); p += sizeof(tcp_header); + memcpy(p, tcp_data, sizeof(tcp_data)); p += sizeof(tcp_data); + tcp->setSynFlag(true); + printf("Sending TCP header:"); + tcp->dump(stdout); + break; + default: + return NULL; + } + + ip->updateCheckSum(); + return m; +} + +rte_mbuf_t *CTrexDebug::create_pkt(uint8_t *pkt, int pkt_size) { + rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, pkt_size); + if ( unlikely(m == 0) ) { + printf("ERROR no packets \n"); + return 0; + } + + char *p = rte_pktmbuf_append(m, pkt_size); + assert(p); + /* set pkt data */ + memcpy(p, pkt, pkt_size); + return m; +} + +rte_mbuf_t *CTrexDebug::create_pkt_indirect(rte_mbuf_t *m, uint32_t new_pkt_size){ + rte_mbuf_t *d = CGlobalInfo::pktmbuf_alloc(0, 60); + assert(d); + + rte_pktmbuf_attach(d, m); + d->data_len = new_pkt_size; + d->pkt_len = new_pkt_size; + return d; +} + +rte_mbuf_t *CTrexDebug::create_udp_9k_pkt() { + rte_mbuf_t *m; + uint16_t pkt_size = 9*1024+21; + uint8_t *p = (uint8_t *)malloc(9*1024+22); + assert(p); + memset(p, 0x55, pkt_size); + memcpy(p, (uint8_t*)udp_pkt, sizeof(udp_pkt)); + m = create_pkt(p, pkt_size); + free(p); + return m; +} + +int CTrexDebug::test_send_pkts(rte_mbuf_t *m, uint16_t queue_id, int num_pkts, int port) { + CPhyEthIF * lp = &m_ports[port]; + rte_mbuf_t * tx_pkts[32]; + if (num_pkts > 32) { + num_pkts = 32; + } + + int i; + for (i=0; i < num_pkts; i++) { + rte_mbuf_refcnt_update(m, 1); + tx_pkts[i] = m; + } + uint16_t res = lp->tx_burst(queue_id, tx_pkts, num_pkts); + if ((num_pkts - res) > 0) { + m_test_drop += (num_pkts - res); + } + return (0); +} + +int CTrexDebug::set_promisc_all(bool enable) { + int i; + for (i=0; i < m_max_ports; i++) { + CPhyEthIF *_if = &m_ports[i]; + _if->set_promiscuous(enable); + } + + return 0; +} + +int CTrexDebug::test_send(uint pkt_type) { + set_promisc_all(true); + rte_mbuf_t *m, *d; + if (pkt_type < 1 || pkt_type > 4) { + printf("Unsupported packet type %d\n", pkt_type); + printf("Supported packet types are: %d(ICMP), %d(UDP), %d(TCP) %d(9k UDP)\n", 1, 2, 3, 4); + exit(-1); + } + + if (pkt_type == 4) { + m = create_udp_9k_pkt(); + assert (m); + d = create_pkt_indirect(m, 9*1024+18); + } else { + d = create_test_pkt(pkt_type); + } + if (d == NULL) { + printf("Packet creation failed\n"); + exit(-1); + } + + printf("Sending packet:\n"); + utl_DumpBuffer(stdout, rte_pktmbuf_mtod(d, char *), 64, 0); + + test_send_pkts(d, 0, 2, 0); + test_send_pkts(d, 0, 1, 1); + + delay(1000); + + printf(" ---------\n"); + printf(" rx queue 0 \n"); + printf(" ---------\n"); + rcv_send_all(0); + printf("\n\n"); + + printf(" ---------\n"); + printf(" rx queue 1 \n"); + printf(" ---------\n"); + rcv_send_all(1); + printf(" ---------\n"); + + delay(1000); + + int j=0; + for (j=0; j<m_max_ports; j++) { + CPhyEthIF * lp=&m_ports[j]; + printf(" port : %d \n",j); + printf(" ----------\n"); + lp->update_counters(); + lp->get_stats().Dump(stdout); + lp->dump_stats_extended(stdout); + } + + return (0); +} diff --git a/src/debug.h b/src/debug.h new file mode 100644 index 00000000..fe37c186 --- /dev/null +++ b/src/debug.h @@ -0,0 +1,42 @@ +/* +Copyright (c) 2016-2016 Cisco Systems, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef _TREX_DEBUG_H +#define _TREX_DEBUG_H +#include "mbuf.h" + +class CTrexDebug { + rte_mbuf_t *m_test; + uint64_t m_test_drop; + CPhyEthIF *m_ports; + uint32_t m_max_ports; + + int rcv_send(int port,int queue_id); + int rcv_send_all(int queue_id); + rte_mbuf_t *create_pkt(uint8_t *pkt,int pkt_size); + rte_mbuf_t *create_pkt_indirect(rte_mbuf_t *m, uint32_t new_pkt_size); + rte_mbuf_t *create_udp_pkt(); + rte_mbuf_t *create_udp_9k_pkt(); + int set_promisc_all(bool enable); + int test_send_pkts(rte_mbuf_t *, uint16_t queue_id, int pkt, int port); + rte_mbuf_t *create_test_pkt(int proto); + + public: + CTrexDebug(CPhyEthIF *m_ports_arg, int max_ports); + int test_send(uint pkt_type); +}; + +#endif diff --git a/src/gtest/trex_stateless_gtest.cpp b/src/gtest/trex_stateless_gtest.cpp index 73b7536a..70f397b1 100644 --- a/src/gtest/trex_stateless_gtest.cpp +++ b/src/gtest/trex_stateless_gtest.cpp @@ -22,6 +22,7 @@ limitations under the License. #include "bp_sim.h" #include <common/gtest.h> #include <common/basic_utils.h> +#include <trex_stateless.h> #include <trex_stateless_dp_core.h> #include <trex_stateless_messaging.h> #include <trex_streams_compiler.h> @@ -1027,6 +1028,203 @@ TEST_F(basic_vm, vm_syn_attack) { } +void run_vm_program( StreamVm & vm, + std::string input_pcap_file, + std::string out_file_name, + int num_pkts + ){ + + CPcapLoader pcap; + pcap.load_pcap_file(input_pcap_file,0); + + printf(" packet size : %lu \n",(ulong)pcap.m_raw.pkt_len); + vm.compile(pcap.m_raw.pkt_len); + + StreamVmDp * lpDpVm =vm.generate_dp_object(); + + uint32_t program_size=vm.get_dp_instruction_buffer()->get_program_size(); + + printf (" program size : %lu \n",(ulong)program_size); + + vm.Dump(stdout); + + std::string out_file_full ="exp/"+out_file_name +".pcap"; + std::string out_file_ex_full ="exp/"+out_file_name +"-ex.pcap"; + + CFileWriterBase * lpWriter=CCapWriterFactory::CreateWriter(LIBPCAP,(char *)out_file_full.c_str()); + assert(lpWriter); + + + StreamDPVmInstructionsRunner runner; + + uint32_t random_per_thread=0; + + int i; + for (i=0; i<num_pkts; i++) { + + runner.run(&random_per_thread, + lpDpVm->get_program_size(), + lpDpVm->get_program(), + lpDpVm->get_bss(), + (uint8_t*)pcap.m_raw.raw); + uint16_t new_pkt_size=runner.get_new_pkt_size(); + assert(new_pkt_size>0); + if (new_pkt_size ==0) { + assert(lpWriter->write_packet(&pcap.m_raw)); + }else{ + /* we can only reduce */ + if (new_pkt_size>pcap.m_raw.pkt_len) { + new_pkt_size=pcap.m_raw.pkt_len; + } + CCapPktRaw np(new_pkt_size); + np.time_sec = pcap.m_raw.time_sec; + np.time_nsec = pcap.m_raw.time_nsec; + np.pkt_cnt = pcap.m_raw.pkt_cnt; + memcpy(np.raw,pcap.m_raw.raw,new_pkt_size); + assert(lpWriter->write_packet(&np)); + } + } + + delete lpWriter; + + CErfCmp cmp; + delete lpDpVm; + + bool res1=cmp.compare(out_file_full.c_str() ,out_file_ex_full.c_str()); + EXPECT_EQ(1, res1?1:0); +} + + +TEST_F(basic_vm, vm_inc_size_64_128) { + + StreamVm vm; + + vm.add_instruction( new StreamVmInstructionFlowMan( "rand_pkt_size_var", + 2, // size var must be 16bit size + StreamVmInstructionFlowMan::FLOW_VAR_OP_INC, + 127, + 128, + 256)); + + vm.add_instruction( new StreamVmInstructionChangePktSize( "rand_pkt_size_var")); + + /* src ip */ + /*14+ 2 , remove the */ + + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",16, -14,true) + ); + + vm.add_instruction( new StreamVmInstructionFixChecksumIpv4(14) ); + + /* update UDP length */ + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",32+6, -(14+20),true) + ); + + run_vm_program(vm,"stl/udp_1518B_no_crc.pcap","stl_vm_inc_size_64_128",20); +} + +TEST_F(basic_vm, vm_random_size_64_128) { + + StreamVm vm; + srand(0x1234); + + vm.add_instruction( new StreamVmInstructionFlowMan( "rand_pkt_size_var", + 2, // size var must be 16bit size + StreamVmInstructionFlowMan::FLOW_VAR_OP_RANDOM, + 0, + 128, + 256)); + + vm.add_instruction( new StreamVmInstructionChangePktSize( "rand_pkt_size_var")); + + /* src ip */ + /*14+ 2 , remove the */ + + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",16, -14,true) + ); + + vm.add_instruction( new StreamVmInstructionFixChecksumIpv4(14) ); + + /* update UDP length */ + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",32+6, -(14+20),true) + ); + + + run_vm_program(vm,"stl/udp_1518B_no_crc.pcap","stl_vm_rand_size_64_128",20); + +} + + + +/* should have exception packet size is smaller than range */ +TEST_F(basic_vm, vm_random_size_64_127_128) { + + StreamVm vm; + srand(0x1234); + + vm.add_instruction( new StreamVmInstructionFlowMan( "rand_pkt_size_var", + 2, // size var must be 16bit size + StreamVmInstructionFlowMan::FLOW_VAR_OP_RANDOM, + 127, + 128, + 256)); + + vm.add_instruction( new StreamVmInstructionChangePktSize( "rand_pkt_size_var")); + + /* src ip */ + /*14+ 2 , remove the */ + + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",16, -14,true) + ); + + vm.add_instruction( new StreamVmInstructionFixChecksumIpv4(14) ); + + /* update UDP length */ + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",32+6, -(14+20),true) + ); + + bool fail=false; + + try { + run_vm_program(vm,"stl/udp_64B_no_crc.pcap","stl_vm_rand_size_64B_127_128",20); + } catch (const TrexException &ex) { + fail=true; + } + + EXPECT_EQ(true, fail); + +} + + +TEST_F(basic_vm, vm_random_size_500b_0_9k) { + + StreamVm vm; + srand(0x1234); + + vm.add_instruction( new StreamVmInstructionFlowMan( "rand_pkt_size_var", + 2, // size var must be 16bit size + StreamVmInstructionFlowMan::FLOW_VAR_OP_RANDOM, + 0, + 0, + 9*1024)); + + vm.add_instruction( new StreamVmInstructionChangePktSize( "rand_pkt_size_var")); + + /* src ip */ + /*14+ 2 , remove the */ + + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",16, -14,true) + ); + + vm.add_instruction( new StreamVmInstructionFixChecksumIpv4(14) ); + + /* update UDP length */ + vm.add_instruction( new StreamVmInstructionWriteToPkt( "rand_pkt_size_var",32+6, -(14+20),true) + ); + + run_vm_program(vm,"stl/udp_594B_no_crc.pcap","stl_vm_rand_size_512B_64_128",10); + +} diff --git a/src/main.cpp b/src/main.cpp index ba6e258a..a2d06067 100755 --- a/src/main.cpp +++ b/src/main.cpp @@ -23,6 +23,7 @@ limitations under the License. #include "bp_sim.h" #include "os_time.h" +#include <unordered_map> #include <string> #include <common/arg/SimpleGlob.h> @@ -35,7 +36,8 @@ using namespace std; // An enum for all the option types enum { OPT_HELP, OPT_CFG, OPT_NODE_DUMP, OP_STATS, OPT_FILE_OUT, OPT_UT, OPT_PCAP, OPT_IPV6, OPT_MAC_FILE, - OPT_SL, OPT_DP_CORE_COUNT, OPT_DP_CORE_INDEX}; + OPT_SL, OPT_DP_CORE_COUNT, OPT_DP_CORE_INDEX, OPT_LIMIT, + OPT_DRY_RUN}; @@ -73,14 +75,13 @@ static CSimpleOpt::SOption parser_options[] = { OPT_SL, "--sl", SO_NONE }, { OPT_DP_CORE_COUNT, "--cores", SO_REQ_SEP }, { OPT_DP_CORE_INDEX, "--core_index", SO_REQ_SEP }, + { OPT_LIMIT, "--limit", SO_REQ_SEP }, + { OPT_DRY_RUN, "--dry", SO_NONE }, SO_END_OF_OPTIONS }; - - - static int usage(){ printf(" Usage: bp_sim [OPTION] -f cfg.yaml -o outfile.erf \n"); @@ -119,9 +120,7 @@ static int usage(){ static int parse_options(int argc, char *argv[], CParserOption* po, - opt_type_e &type, - int &dp_core_count, - int &dp_core_index) { + std::unordered_map<std::string, int> ¶ms) { CSimpleOpt args(argc, argv, parser_options); @@ -131,16 +130,13 @@ static int parse_options(int argc, po->preview.setFileWrite(true); /* by default - type is stateful */ - type = OPT_TYPE_SF; - - dp_core_count = 1; - dp_core_index = 0; + params["type"] = OPT_TYPE_SF; while ( args.Next() ){ if (args.LastError() == SO_SUCCESS) { switch (args.OptionId()) { case OPT_UT : - type = OPT_TYPE_GTEST; + params["type"] = OPT_TYPE_GTEST; return (0); break; @@ -149,7 +145,7 @@ static int parse_options(int argc, return -1; case OPT_SL: - type = OPT_TYPE_SL; + params["type"] = OPT_TYPE_SL; break; case OPT_CFG: @@ -179,11 +175,19 @@ static int parse_options(int argc, break; case OPT_DP_CORE_COUNT: - dp_core_count = atoi(args.OptionArg()); + params["dp_core_count"] = atoi(args.OptionArg()); break; case OPT_DP_CORE_INDEX: - dp_core_index = atoi(args.OptionArg()); + params["dp_core_index"] = atoi(args.OptionArg()); + break; + + case OPT_LIMIT: + params["limit"] = atoi(args.OptionArg()); + break; + + case OPT_DRY_RUN: + params["dry"] = 1; break; default: @@ -215,16 +219,18 @@ static int parse_options(int argc, } } - if (dp_core_count != -1) { - if ( (dp_core_count < 1) || (dp_core_count > 8) ) { + /* did the user configure dp core count or dp core index ? */ + + if (params.count("dp_core_count") > 0) { + if (!in_range(params["dp_core_count"], 1, 8)) { printf("dp core count must be a value between 1 and 8\n"); return (-1); } } - if (dp_core_index != -1) { - if ( (dp_core_index < 0) || (dp_core_index >= dp_core_count) ) { - printf("dp core count must be a value between 0 and cores - 1\n"); + if (params.count("dp_core_index") > 0) { + if (!in_range(params["dp_core_index"], 0, params["dp_core_count"] - 1)) { + printf("dp core index must be a value between 0 and cores - 1\n"); return (-1); } } @@ -235,14 +241,14 @@ static int parse_options(int argc, int main(int argc , char * argv[]){ - opt_type_e type; - int dp_core_count; - int dp_core_index; + std::unordered_map<std::string, int> params; - if ( parse_options(argc, argv, &CGlobalInfo::m_options , type, dp_core_count, dp_core_index) != 0) { + if ( parse_options(argc, argv, &CGlobalInfo::m_options , params) != 0) { exit(-1); } + opt_type_e type = (opt_type_e) params["type"]; + switch (type) { case OPT_TYPE_GTEST: { @@ -259,7 +265,31 @@ int main(int argc , char * argv[]){ case OPT_TYPE_SL: { SimStateless &st = SimStateless::get_instance(); - return st.run(CGlobalInfo::m_options.cfg_file, CGlobalInfo::m_options.out_file, 2, dp_core_count, dp_core_index); + + if (params.count("dp_core_count") == 0) { + params["dp_core_count"] = 1; + } + + if (params.count("dp_core_index") == 0) { + params["dp_core_index"] = -1; + } + + if (params.count("limit") == 0) { + params["limit"] = 5000; + } + + if (params.count("dry") == 0) { + params["dry"] = 0; + } + + return st.run(CGlobalInfo::m_options.cfg_file, + CGlobalInfo::m_options.out_file, + 2, + params["dp_core_count"], + params["dp_core_index"], + params["limit"], + (params["dry"] == 1) + ); } } } diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp index 0d534b2e..d3dcb0c5 100755 --- a/src/main_dpdk.cpp +++ b/src/main_dpdk.cpp @@ -4,7 +4,7 @@ */ /* -Copyright (c) 2015-2015 Cisco Systems, Inc. +Copyright (c) 2015-2016 Cisco Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -79,6 +79,8 @@ extern "C" { #include "msg_manager.h" #include "platform_cfg.h" #include "latency.h" +#include "main_dpdk.h" +#include "debug.h" #include <internal_api/trex_platform_api.h> @@ -119,8 +121,8 @@ static inline int get_is_latency_thread_enable(){ } struct port_cfg_t; -class CPhyEthIF; -class CPhyEthIFStats ; +//class CPhyEthIF; +//class CPhyEthIFStats ; class CTRexExtendedDriverBase { public: @@ -307,6 +309,27 @@ private: uint8_t ttl); }; +class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase40G { +public: + CTRexExtendedDriverBaseVIC(){ + } + + static CTRexExtendedDriverBase * create(){ + return ( new CTRexExtendedDriverBaseVIC() ); + } + + virtual bool is_hardware_filter_is_supported(){ + return (false); + } + + bool flow_control_disable_supported(){return false;} + + virtual void update_configuration(port_cfg_t * cfg); + +}; + + + typedef CTRexExtendedDriverBase * (*create_object_t) (void); @@ -361,7 +384,7 @@ private: register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBase1GVm::create); register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create); register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create); - register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBase1GVm::create); + register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create); @@ -481,8 +504,8 @@ enum { OPT_HELP, OPT_VLAN, OPT_VIRT_ONE_TX_RX_QUEUE, OPT_PREFIX, - OPT_MAC_SPLIT - + OPT_MAC_SPLIT, + OPT_SEND_DEBUG_PKT }; @@ -541,6 +564,7 @@ static CSimpleOpt::SOption parser_options[] = { OPT_VIRT_ONE_TX_RX_QUEUE, "--vm-sim", SO_NONE }, { OPT_PREFIX, "--prefix", SO_REQ_SEP }, { OPT_MAC_SPLIT, "--mac-spread", SO_REQ_SEP }, + { OPT_SEND_DEBUG_PKT, "--send-debug-pkt", SO_REQ_SEP }, SO_END_OF_OPTIONS }; @@ -594,6 +618,9 @@ static int usage(){ printf(" -m : factor of bandwidth \n"); printf(" \n"); + printf(" --send-debug-pkt [proto] : Do not run traffic generator. Just send debug packet and dump receive queue."); + printf(" Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for 9K UDP\n"); + printf(" \n"); printf(" -k [sec] : run latency test before starting the test. it will wait for x sec sending packet and x sec after that \n"); printf(" \n"); @@ -876,6 +903,12 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t po->preview.setDestMacSplit(true); break; + case OPT_SEND_DEBUG_PKT: + sscanf(args.OptionArg(),"%d", &tmp_data); + po->m_debug_pkt_proto = (uint8_t)tmp_data; + break; + + default: usage(); return -1; @@ -1017,7 +1050,7 @@ struct port_cfg_t { m_tx_conf.tx_thresh.wthresh = TX_WTHRESH; m_port_conf.rxmode.jumbo_frame=1; - m_port_conf.rxmode.max_rx_pkt_len =2000; + m_port_conf.rxmode.max_rx_pkt_len =9*1024+22; m_port_conf.rxmode.hw_strip_crc=1; } @@ -1111,31 +1144,6 @@ typedef struct cnt_name_ { #define MY_REG(a) {a,(char *)#a} - -class CPhyEthIFStats { - -public: - uint64_t ipackets; /**< Total number of successfully received packets. */ - uint64_t ibytes; /**< Total number of successfully received bytes. */ - - uint64_t f_ipackets; /**< Total number of successfully received packets - filter SCTP*/ - uint64_t f_ibytes; /**< Total number of successfully received bytes. - filter SCTP */ - - uint64_t opackets; /**< Total number of successfully transmitted packets.*/ - uint64_t obytes; /**< Total number of successfully transmitted bytes. */ - - uint64_t ierrors; /**< Total number of erroneous received packets. */ - uint64_t oerrors; /**< Total number of failed transmitted packets. */ - uint64_t imcasts; /**< Total number of multicast received packets. */ - uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */ - - -public: - void Clear(); - void Dump(FILE *fd); - void DumpAll(FILE *fd); -}; - void CPhyEthIFStats::Clear(){ ipackets =0; @@ -1184,165 +1192,6 @@ void CPhyEthIFStats::Dump(FILE *fd){ DP_A(rx_nombuf); } - - -class CPhyEthIF { -public: - CPhyEthIF (){ - m_port_id=0; - m_rx_queue=0; - } - bool Create(uint8_t portid){ - m_port_id = portid; - m_last_rx_rate = 0.0; - m_last_tx_rate = 0.0; - m_last_tx_pps = 0.0; - return (true); - } - void Delete(); - - void set_rx_queue(uint8_t rx_queue){ - m_rx_queue=rx_queue; - } - - - void configure(uint16_t nb_rx_queue, - uint16_t nb_tx_queue, - const struct rte_eth_conf *eth_conf); - - void macaddr_get(struct ether_addr *mac_addr); - - void get_stats(CPhyEthIFStats *stats); - - void get_stats_1g(CPhyEthIFStats *stats); - - - void rx_queue_setup(uint16_t rx_queue_id, - uint16_t nb_rx_desc, - unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mb_pool); - - void tx_queue_setup(uint16_t tx_queue_id, - uint16_t nb_tx_desc, - unsigned int socket_id, - const struct rte_eth_txconf *tx_conf); - - void configure_rx_drop_queue(); - - void configure_rx_duplicate_rules(); - - void start(); - - void stop(); - - void update_link_status(); - - bool is_link_up(){ - return (m_link.link_status?true:false); - } - - void dump_link(FILE *fd); - - void disable_flow_control(); - - void set_promiscuous(bool enable); - - void add_mac(char * mac); - - - bool get_promiscuous(); - - void dump_stats(FILE *fd); - - void update_counters(); - - - void stats_clear(); - - uint8_t get_port_id(){ - return (m_port_id); - } - - float get_last_tx_rate(){ - return (m_last_tx_rate); - } - - float get_last_rx_rate(){ - return (m_last_rx_rate); - } - - float get_last_tx_pps_rate(){ - return (m_last_tx_pps); - } - - float get_last_rx_pps_rate(){ - return (m_last_rx_pps); - } - - CPhyEthIFStats & get_stats(){ - return ( m_stats ); - } - - void flush_rx_queue(void); - -public: - - inline uint16_t tx_burst(uint16_t queue_id, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); - - inline uint16_t rx_burst(uint16_t queue_id, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); - - - inline uint32_t pci_reg_read(uint32_t reg_off){ - void *reg_addr; - uint32_t reg_v; - reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr + - reg_off); - reg_v = *((volatile uint32_t *)reg_addr); - return rte_le_to_cpu_32(reg_v); - } - - - inline void pci_reg_write(uint32_t reg_off, - uint32_t reg_v){ - void *reg_addr; - - reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr + - reg_off); - *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); - } - - void dump_stats_extended(FILE *fd); - - uint8_t get_rte_port_id(void){ - return ( m_port_id ); - } -private: - uint8_t m_port_id; - uint8_t m_rx_queue; - struct rte_eth_link m_link; - uint64_t m_sw_try_tx_pkt; - uint64_t m_sw_tx_drop_pkt; - CBwMeasure m_bw_tx; - CBwMeasure m_bw_rx; - CPPSMeasure m_pps_tx; - CPPSMeasure m_pps_rx; - - CPhyEthIFStats m_stats; - - float m_last_tx_rate; - float m_last_rx_rate; - float m_last_tx_pps; - float m_last_rx_pps; -public: - struct rte_eth_dev_info m_dev_info; -}; - - void CPhyEthIF::flush_rx_queue(void){ rte_mbuf_t * rx_pkts[32]; @@ -1467,8 +1316,8 @@ void CPhyEthIF::configure_rx_duplicate_rules(){ void CPhyEthIF::configure_rx_drop_queue(){ - - if ( get_vm_one_queue_enable() ) { + // In debug mode, we want to see all packets. Don't want to disable any queue. + if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) { return; } if ( CGlobalInfo::m_options.is_latency_disabled()==false ) { @@ -1790,25 +1639,6 @@ void CPhyEthIF::stats_clear(){ m_stats.Clear(); } -inline uint16_t CPhyEthIF::tx_burst(uint16_t queue_id, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts){ - uint16_t ret = rte_eth_tx_burst(m_port_id, queue_id, tx_pkts, nb_pkts); - return (ret); -} - - -inline uint16_t CPhyEthIF::rx_burst(uint16_t queue_id, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts){ - return (rte_eth_rx_burst(m_port_id, queue_id, - rx_pkts, nb_pkts)); - -} - - - - class CCorePerPort { public: CCorePerPort (){ @@ -2069,10 +1899,12 @@ int CCoreEthIF::send_pkt(CCorePerPort * lp_port, CVirtualIFPerSideStats * lp_stats ){ + //printf(" %lu \n",(ulong)rte_pktmbuf_pkt_len(m)); //rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m)); - lp_stats->m_tx_pkt +=1; - lp_stats->m_tx_bytes += (rte_pktmbuf_pkt_len(m)+4); + /* too expensive remove this for now */ + //lp_stats->m_tx_pkt +=1; + //lp_stats->m_tx_bytes += (rte_pktmbuf_pkt_len(m)+4); uint16_t len = lp_port->m_len; lp_port->m_table[len]=m; @@ -2137,6 +1969,7 @@ int CCoreEthIFStateless::send_node(CGenNode * no){ m=node_sl->alloc_node_with_vm(); assert(m); } + send_pkt(lp_port,m,lp_stats); return (0); @@ -2685,13 +2518,7 @@ void CGlobalStats::Dump(FILE *fd,DumpFormat mode){ } - - - - - - -struct CGlobalTRex { +class CGlobalTRex { public: CGlobalTRex (){ @@ -2699,50 +2526,37 @@ public: m_max_cores=1; m_cores_to_dual_ports=0; m_max_queues_per_port=0; - m_test =NULL; m_fl_was_init=false; m_expected_pps=0.0; m_expected_cps=0.0; m_expected_bps=0.0; m_trex_stateless = NULL; } -public: bool Create(); void Delete(); - int ixgbe_prob_init(); int cores_prob_init(); int queues_prob_init(); int ixgbe_start(); int ixgbe_rx_queue_flush(); int ixgbe_configure_mg(); - - bool is_all_links_are_up(bool dump=false); - int set_promisc_all(bool enable); - int reset_counters(); -public: - private: /* try to stop all datapath cores */ void try_stop_all_dp(); /* send message to all dp cores */ int send_message_all_dp(TrexStatelessCpToDpMsgBase *msg); - void check_for_dp_message_from_core(int thread_id); void check_for_dp_messages(); public: - int start_send_master(); int start_master_stateless(); - int run_in_core(virtual_thread_id_t virt_core_id); int stop_core(virtual_thread_id_t virt_core_id); - int core_for_latency(){ if ( (!get_is_latency_thread_enable()) ){ return (-1); @@ -2751,14 +2565,10 @@ public: } } - int run_in_laterncy_core(); - int run_in_master(); int stop_master(); - - - /* return the minimum number of dp cores need to support the active ports + /* return the minimum number of dp cores needed to support the active ports this is for c==1 or m_cores_mul==1 */ int get_base_num_cores(){ @@ -2768,8 +2578,6 @@ public: int get_cores_tx(){ /* 0 - master num_of_cores - - - last for latency */ if ( (!get_is_latency_thread_enable()) ){ return (m_max_cores - 1 ); @@ -2778,69 +2586,27 @@ public: } } - - - -public: - int test_send(); - - - - int rcv_send(int port,int queue_id); - int rcv_send_all(int queue_id); - private: bool is_all_cores_finished(); - int test_send_pkts(uint16_t queue_id, - int pkt, - int port); - - - int create_pkt(uint8_t *pkt,int pkt_size); - int create_udp_pkt(); - int create_icmp_pkt(); - - - public: void dump_stats(FILE *fd, std::string & json,CGlobalStats::DumpFormat format); - void dump_template_info(std::string & json); - bool sanity_check(); - void update_stats(void); void get_stats(CGlobalStats & stats); - - void dump_post_test_stats(FILE *fd); - void dump_config(FILE *fd); public: port_cfg_t m_port_cfg; - - /* - exaple1 : - req=4 ,m_max_ports =4 ,c=1 , l=1 - - ==> - m_max_cores = 4/2+1+1 =4; - m_cores_mul = 1 - - - */ - uint32_t m_max_ports; /* active number of ports supported options are 2,4,8,10,12 */ uint32_t m_max_cores; /* current number of cores , include master and latency ==> ( master)1+c*(m_max_ports>>1)+1( latency ) */ uint32_t m_cores_mul; /* how cores multipler given c=4 ==> m_cores_mul */ - uint32_t m_max_queues_per_port; uint32_t m_cores_to_dual_ports; /* number of ports that will handle dual ports */ uint16_t m_latency_tx_queue_id; - // statistic CPPSMeasure m_cps; float m_expected_pps; @@ -2848,32 +2614,20 @@ public: float m_expected_bps;//bps float m_last_total_cps; - - CPhyEthIF m_ports[BP_MAX_PORTS]; - CCoreEthIF m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserve - stateful */ - CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserve - stateless*/ + CCoreEthIF m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */ + CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/ CCoreEthIF * m_cores_vif[BP_MAX_CORES]; - - CParserOption m_po ; CFlowGenList m_fl; bool m_fl_was_init; - volatile uint8_t m_signal[BP_MAX_CORES] __rte_cache_aligned ; - CLatencyManager m_mg; CTrexGlobalIoMode m_io_modes; private: - -private: - rte_mbuf_t * m_test; - uint64_t m_test_drop; - CLatencyHWPort m_latency_vports[BP_MAX_PORTS]; /* read hardware driver */ CLatencyVmPort m_latency_vm_vports[BP_MAX_PORTS]; /* vm driver */ - CLatencyPktInfo m_latency_pkt; TrexPublisher m_zmq_publisher; @@ -2881,244 +2635,6 @@ public: TrexStateless *m_trex_stateless; }; - - -int CGlobalTRex::rcv_send(int port,int queue_id){ - - CPhyEthIF * lp=&m_ports[port]; - rte_mbuf_t * rx_pkts[32]; - printf(" test rx port:%d queue:%d \n",port,queue_id); - printf(" --------------\n"); - uint16_t cnt=lp->rx_burst(queue_id,rx_pkts,32); - - int i; - for (i=0; i<(int)cnt;i++) { - rte_mbuf_t * m=rx_pkts[i]; - int pkt_size=rte_pktmbuf_pkt_len(m); - char *p=rte_pktmbuf_mtod(m, char*); - utl_DumpBuffer(stdout,p,pkt_size,0); - rte_pktmbuf_free(m); - } - return (0); -} - -int CGlobalTRex::rcv_send_all(int queue_id){ - int i; - for (i=0; i<m_max_ports; i++) { - rcv_send(i,queue_id); - } - return (0); -} - - - - -int CGlobalTRex::test_send(){ - int i; - - set_promisc_all(true); - create_udp_pkt(); - - CRx_check_header rx_check_header; - (void)rx_check_header; - - rx_check_header.m_time_stamp=0x1234567; - rx_check_header.m_option_type=RX_CHECK_V4_OPT_TYPE; - rx_check_header.m_option_len=RX_CHECK_V4_OPT_LEN; - rx_check_header.m_magic=2; - rx_check_header.m_pkt_id=7; - rx_check_header.m_flow_id=9; - rx_check_header.m_flags=11; - - - assert(m_test); - for (i=0; i<1; i++) { - //test_send_pkts(0,1,0); - //test_send_pkts(m_latency_tx_queue_id,12,0); - //test_send_pkts(m_latency_tx_queue_id,1,1); - //test_send_pkts(m_latency_tx_queue_id,1,2); - //test_send_pkts(m_latency_tx_queue_id,1,3); - test_send_pkts(0,1,0); - test_send_pkts(0,2,1); - - /*delay(1000); - fprintf(stdout," --------------------------------\n"); - fprintf(stdout," after sending to port %d \n",i); - fprintf(stdout," --------------------------------\n"); - dump_stats(stdout); - fprintf(stdout," --------------------------------\n");*/ - } - //test_send_pkts(m_latency_tx_queue_id,1,1); - //test_send_pkts(m_latency_tx_queue_id,1,2); - //test_send_pkts(m_latency_tx_queue_id,1,3); - - - printf(" ---------\n"); - printf(" rx queue 0 \n"); - printf(" ---------\n"); - rcv_send_all(0); - printf("\n\n"); - - printf(" ---------\n"); - printf(" rx queue 1 \n"); - printf(" ---------\n"); - rcv_send_all(1); - printf(" ---------\n"); - - delay(1000); - - #if 1 - int j=0; - for (j=0; j<m_max_ports; j++) { - CPhyEthIF * lp=&m_ports[j]; - printf(" port : %d \n",j); - printf(" ----------\n"); - - lp->update_counters(); - lp->get_stats().Dump(stdout); - lp->dump_stats_extended(stdout); - } - /*for (j=0; j<4; j++) { - CPhyEthIF * lp=&m_ports[j]; - lp->dump_stats_extended(stdout); - }*/ - #endif - - fprintf(stdout," drop : %llu \n", (unsigned long long)m_test_drop); - return (0); -} - - - -const uint8_t udp_pkt[]={ - 0x00,0x00,0x00,0x01,0x00,0x00, - 0x00,0x00,0x00,0x01,0x00,0x00, - 0x08,0x00, - - 0x45,0x00,0x00,0x81, - 0xaf,0x7e,0x00,0x00, - 0xfe,0x06,0xd9,0x23, - 0x01,0x01,0x01,0x01, - 0x3d,0xad,0x72,0x1b, - - 0x11,0x11, - 0x11,0x11, - - 0x00,0x6d, - 0x00,0x00, - - 0x64,0x31,0x3a,0x61, - 0x64,0x32,0x3a,0x69,0x64, - 0x32,0x30,0x3a,0xd0,0x0e, - 0xa1,0x4b,0x7b,0xbd,0xbd, - 0x16,0xc6,0xdb,0xc4,0xbb,0x43, - 0xf9,0x4b,0x51,0x68,0x33,0x72, - 0x20,0x39,0x3a,0x69,0x6e,0x66,0x6f, - 0x5f,0x68,0x61,0x73,0x68,0x32,0x30,0x3a,0xee,0xc6,0xa3, - 0xd3,0x13,0xa8,0x43,0x06,0x03,0xd8,0x9e,0x3f,0x67,0x6f, - 0xe7,0x0a,0xfd,0x18,0x13,0x8d,0x65,0x31,0x3a,0x71,0x39, - 0x3a,0x67,0x65,0x74,0x5f,0x70,0x65,0x65,0x72,0x73,0x31, - 0x3a,0x74,0x38,0x3a,0x3d,0xeb,0x0c,0xbf,0x0d,0x6a,0x0d, - 0xa5,0x31,0x3a,0x79,0x31,0x3a,0x71,0x65,0x87,0xa6,0x7d, - 0xe7 -}; - - -const uint8_t icmp_pkt1[]={ - 0x00,0x00,0x00,0x01,0x00,0x00, - 0x00,0x00,0x00,0x01,0x00,0x00, - 0x08,0x00, - - 0x45,0x02,0x00,0x30, - 0x00,0x00,0x40,0x00, - 0xff,0x01,0xbd,0x04, - 0x9b,0xe6,0x18,0x9b, //SIP - 0xcb,0xff,0xfc,0xc2, //DIP - - 0x08, 0x00, - 0x01, 0x02, //checksum - 0xaa, 0xbb, // id - 0x00, 0x00, // Sequence number - - 0x11,0x22,0x33,0x44, // magic - 0x00,0x00,0x00,0x00, //64 bit counter - 0x00,0x00,0x00,0x00, - 0x00,0x01,0xa0,0x00, //seq - 0x00,0x00,0x00,0x00, - -}; - - - - -int CGlobalTRex::create_pkt(uint8_t *pkt,int pkt_size){ - rte_mempool_t * mp= CGlobalInfo::m_mem_pool[0].m_big_mbuf_pool ; - - rte_mbuf_t * m=rte_pktmbuf_alloc(mp); - if ( unlikely(m==0) ) { - printf("ERROR no packets \n"); - return (0); - } - char *p=rte_pktmbuf_append(m, pkt_size); - assert(p); - /* set pkt data */ - memcpy(p,pkt,pkt_size); - //m->ol_flags = PKT_TX_VLAN_PKT; - //m->pkt.vlan_tci =200; - - m_test = m; - - return (0); -} - -int CGlobalTRex::create_udp_pkt(){ - return (create_pkt((uint8_t*)udp_pkt,sizeof(udp_pkt))); -} - -int CGlobalTRex::create_icmp_pkt(){ - return (create_pkt((uint8_t*)icmp_pkt1,sizeof(icmp_pkt1))); -} - - -/* test by sending 10 packets ...*/ -int CGlobalTRex::test_send_pkts(uint16_t queue_id, - int pkt, - int port){ - - CPhyEthIF * lp=&m_ports[port]; - rte_mbuf_t * tx_pkts[32]; - if (pkt >32 ) { - pkt =32; - } - - int i; - for (i=0; i<pkt; i++) { - rte_mbuf_refcnt_update(m_test,1); - tx_pkts[i]=m_test; - } - uint16_t res=lp->tx_burst(queue_id,tx_pkts,pkt); - if ((pkt-res)>0) { - m_test_drop+=(pkt-res); - } - return (0); -} - - - - - -int CGlobalTRex::set_promisc_all(bool enable){ - int i; - for (i=0; i<m_max_ports; i++) { - CPhyEthIF * _if=&m_ports[i]; - _if->set_promiscuous(enable); - } - - return (0); -} - - - int CGlobalTRex::reset_counters(){ int i; for (i=0; i<m_max_ports; i++) { @@ -3290,6 +2806,11 @@ int CGlobalTRex::ixgbe_start(void){ /* last TX queue if for latency check */ if ( get_vm_one_queue_enable() ) { /* one tx one rx */ + + /* VMXNET3 does claim to support 16K but somehow does not work */ + /* reduce to 2000 */ + m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000; + _if->configure(1, 1, &m_port_cfg.m_port_conf); @@ -3298,7 +2819,7 @@ int CGlobalTRex::ixgbe_start(void){ m_latency_tx_queue_id= m_cores_to_dual_ports; socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i); - assert(CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool); + assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048); @@ -3307,7 +2828,7 @@ int CGlobalTRex::ixgbe_start(void){ RTE_TEST_RX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_rx_conf, - CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool); + CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048); int qid; for ( qid=0; qid<(m_max_queues_per_port); qid++) { @@ -3327,7 +2848,7 @@ int CGlobalTRex::ixgbe_start(void){ m_latency_tx_queue_id= m_cores_to_dual_ports; socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i); - assert(CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool); + assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048); /* drop queue */ @@ -3335,7 +2856,7 @@ int CGlobalTRex::ixgbe_start(void){ RTE_TEST_RX_DESC_DEFAULT, socket_id, &m_port_cfg.m_rx_conf, - CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool); + CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048); /* set the filter queue */ @@ -3345,7 +2866,7 @@ int CGlobalTRex::ixgbe_start(void){ RTE_TEST_RX_LATENCY_DESC_DEFAULT, socket_id, &m_port_cfg.m_rx_conf, - CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool); + CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k); int qid; for ( qid=0; qid<(m_max_queues_per_port+1); qid++) { @@ -4713,16 +4234,11 @@ int main_test(int argc , char * argv[]){ g_trex.start_send_master(); } - - /* TBD_FDIR */ -#if 0 - printf(" test_send \n"); - g_trex.test_send(); - // while (1) { - delay(10000); - exit(0); - // } -#endif + if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) { + CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports); + debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto); + exit(1); + } if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){ rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER); @@ -5072,6 +4588,14 @@ void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){ } + +void CTRexExtendedDriverBaseVIC::update_configuration(port_cfg_t * cfg){ + cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH; + cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH; + cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH; + cfg->m_port_conf.rxmode.max_rx_pkt_len =9*1000-10; +} + void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH; cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH; diff --git a/src/main_dpdk.h b/src/main_dpdk.h new file mode 100644 index 00000000..e2c0cdb2 --- /dev/null +++ b/src/main_dpdk.h @@ -0,0 +1,158 @@ +/* +Copyright (c) 2015-2016 Cisco Systems, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef MAIN_DPDK_H +#define MAIN_DPDK_H + +#include "bp_sim.h" + +class CPhyEthIFStats { + +public: + uint64_t ipackets; /**< Total number of successfully received packets. */ + uint64_t ibytes; /**< Total number of successfully received bytes. */ + uint64_t f_ipackets; /**< Total number of successfully received packets - filter SCTP*/ + uint64_t f_ibytes; /**< Total number of successfully received bytes. - filter SCTP */ + uint64_t opackets; /**< Total number of successfully transmitted packets.*/ + uint64_t obytes; /**< Total number of successfully transmitted bytes. */ + uint64_t ierrors; /**< Total number of erroneous received packets. */ + uint64_t oerrors; /**< Total number of failed transmitted packets. */ + uint64_t imcasts; /**< Total number of multicast received packets. */ + uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */ + +public: + void Clear(); + void Dump(FILE *fd); + void DumpAll(FILE *fd); +}; + +class CPhyEthIF { +public: + CPhyEthIF (){ + m_port_id=0; + m_rx_queue=0; + } + bool Create(uint8_t portid){ + m_port_id = portid; + m_last_rx_rate = 0.0; + m_last_tx_rate = 0.0; + m_last_tx_pps = 0.0; + return (true); + } + void Delete(); + + void set_rx_queue(uint8_t rx_queue){ + m_rx_queue=rx_queue; + } + + void configure(uint16_t nb_rx_queue, + uint16_t nb_tx_queue, + const struct rte_eth_conf *eth_conf); + void macaddr_get(struct ether_addr *mac_addr); + void get_stats(CPhyEthIFStats *stats); + void get_stats_1g(CPhyEthIFStats *stats); + void rx_queue_setup(uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + void tx_queue_setup(uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + void configure_rx_drop_queue(); + void configure_rx_duplicate_rules(); + void start(); + void stop(); + void update_link_status(); + bool is_link_up(){ + return (m_link.link_status?true:false); + } + void dump_link(FILE *fd); + void disable_flow_control(); + void set_promiscuous(bool enable); + void add_mac(char * mac); + bool get_promiscuous(); + void dump_stats(FILE *fd); + void update_counters(); + void stats_clear(); + uint8_t get_port_id(){ + return (m_port_id); + } + float get_last_tx_rate(){ + return (m_last_tx_rate); + } + float get_last_rx_rate(){ + return (m_last_rx_rate); + } + float get_last_tx_pps_rate(){ + return (m_last_tx_pps); + } + float get_last_rx_pps_rate(){ + return (m_last_rx_pps); + } + CPhyEthIFStats & get_stats(){ + return ( m_stats ); + } + void flush_rx_queue(void); + +public: + inline uint16_t tx_burst(uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_burst(m_port_id, queue_id, tx_pkts, nb_pkts); + } + inline uint16_t rx_burst(uint16_t queue_id, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { + return rte_eth_rx_burst(m_port_id, queue_id, rx_pkts, nb_pkts); + } + inline uint32_t pci_reg_read(uint32_t reg_off) { + void *reg_addr; + uint32_t reg_v; + reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr + + reg_off); + reg_v = *((volatile uint32_t *)reg_addr); + return rte_le_to_cpu_32(reg_v); + } + inline void pci_reg_write(uint32_t reg_off, + uint32_t reg_v) { + void *reg_addr; + + reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr + + reg_off); + *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); + } + void dump_stats_extended(FILE *fd); + uint8_t get_rte_port_id(void) { + return m_port_id; + } +private: + uint8_t m_port_id; + uint8_t m_rx_queue; + struct rte_eth_link m_link; + uint64_t m_sw_try_tx_pkt; + uint64_t m_sw_tx_drop_pkt; + CBwMeasure m_bw_tx; + CBwMeasure m_bw_rx; + CPPSMeasure m_pps_tx; + CPPSMeasure m_pps_rx; + CPhyEthIFStats m_stats; + float m_last_tx_rate; + float m_last_rx_rate; + float m_last_tx_pps; + float m_last_rx_pps; +public: + struct rte_eth_dev_info m_dev_info; +}; + +#endif diff --git a/src/pal/linux/mbuf.cpp b/src/pal/linux/mbuf.cpp index 26a54fe9..7e9f4775 100755 --- a/src/pal/linux/mbuf.cpp +++ b/src/pal/linux/mbuf.cpp @@ -89,7 +89,7 @@ void utl_rte_mempool_delete(rte_mempool_t * & pool){ uint16_t rte_mbuf_refcnt_update(rte_mbuf_t *m, int16_t value) { utl_rte_pktmbuf_check(m); - uint32_t a=sanb_atomic_add_return_32_old(&m->refcnt_reserved,1); + uint32_t a=sanb_atomic_add_return_32_old(&m->refcnt_reserved, value); return (a); } @@ -136,11 +136,11 @@ rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp){ m->buf_addr =(char *)((char *)m+sizeof(rte_mbuf_t)+RTE_PKTMBUF_HEADROOM) ; rte_pktmbuf_reset(m); + return (m); } - void rte_pktmbuf_free_seg(rte_mbuf_t *m){ utl_rte_pktmbuf_check(m); @@ -150,8 +150,10 @@ void rte_pktmbuf_free_seg(rte_mbuf_t *m){ if ( md != m ) { rte_pktmbuf_detach(m); - if (rte_mbuf_refcnt_update(md, -1) == 0) - free(md); + if (rte_mbuf_refcnt_update(md, -1) == 0) { + free(md); + } + } free(m); diff --git a/src/platform_cfg.cpp b/src/platform_cfg.cpp index ca42aa31..15834544 100755 --- a/src/platform_cfg.cpp +++ b/src/platform_cfg.cpp @@ -35,9 +35,17 @@ void CPlatformMemoryYamlInfo::reset(){ m_mbuf[MBUF_64] = m_mbuf[MBUF_64]*2; m_mbuf[MBUF_2048] = CONST_NB_MBUF_2_10G/2; + m_mbuf[MBUF_4096] = 128; + m_mbuf[MBUF_9k] = 512; + + m_mbuf[TRAFFIC_MBUF_64] = m_mbuf[MBUF_64] * 4; m_mbuf[TRAFFIC_MBUF_2048] = CONST_NB_MBUF_2_10G * 8; + m_mbuf[TRAFFIC_MBUF_4096] = 128; + m_mbuf[TRAFFIC_MBUF_9k] = 512; + + m_mbuf[MBUF_DP_FLOWS] = (1024*1024/2); m_mbuf[MBUF_GLOBAL_FLOWS] =(10*1024/2); } @@ -47,7 +55,11 @@ const std::string names []={ "MBUF_256", "MBUF_512", "MBUF_1024", - "MBUF_2048", + "MBUF_2048", + "MBUF_4096", + "MBUF_9K", + + "TRAFFIC_MBUF_64", "TRAFFIC_MBUF_128", @@ -55,6 +67,9 @@ const std::string names []={ "TRAFFIC_MBUF_512", "TRAFFIC_MBUF_1024", "TRAFFIC_MBUF_2048", + "TRAFFIC_MBUF_4096", + "TRAFFIC_MBUF_9K", + "MBUF_DP_FLOWS", "MBUF_GLOBAL_FLOWS" @@ -214,6 +229,15 @@ void operator >> (const YAML::Node& node, CPlatformMemoryYamlInfo & plat_info) { node["mbuf_2048"] >> plat_info.m_mbuf[MBUF_2048]; } + if ( node.FindValue("mbuf_4096") ){ + node["mbuf_4096"] >> plat_info.m_mbuf[MBUF_4096]; + } + + if ( node.FindValue("mbuf_9k") ){ + node["mbuf_9k"] >> plat_info.m_mbuf[MBUF_9k]; + } + + if ( node.FindValue("traffic_mbuf_64") ){ node["traffic_mbuf_64"] >> plat_info.m_mbuf[TRAFFIC_MBUF_64]; } @@ -238,6 +262,15 @@ void operator >> (const YAML::Node& node, CPlatformMemoryYamlInfo & plat_info) { node["traffic_mbuf_2048"] >> plat_info.m_mbuf[TRAFFIC_MBUF_2048]; } + if ( node.FindValue("traffic_mbuf_4096") ){ + node["traffic_mbuf_4096"] >> plat_info.m_mbuf[TRAFFIC_MBUF_4096]; + } + + if ( node.FindValue("traffic_mbuf_9k") ){ + node["traffic_mbuf_9k"] >> plat_info.m_mbuf[TRAFFIC_MBUF_9k]; + } + + if ( node.FindValue("dp_flows") ){ node["dp_flows"] >> plat_info.m_mbuf[MBUF_DP_FLOWS]; } diff --git a/src/platform_cfg.h b/src/platform_cfg.h index 4fc3c3dd..e8f93d0b 100755 --- a/src/platform_cfg.h +++ b/src/platform_cfg.h @@ -31,25 +31,31 @@ limitations under the License. #define CONST_NB_MBUF_2_10G (16380/4) -typedef enum { MBUF_64 =0, // per dual port, per NUMA +typedef enum { MBUF_64 , // per dual port, per NUMA + + MBUF_128 , + MBUF_256 , + MBUF_512 , + MBUF_1024 , + MBUF_2048 , + MBUF_4096 , + MBUF_9k , - MBUF_128 =1, - MBUF_256 =2, - MBUF_512 =3, - MBUF_1024 =4, - MBUF_2048 =5, // per NUMA - TRAFFIC_MBUF_64 =6, - TRAFFIC_MBUF_128 =7, - TRAFFIC_MBUF_256 =8, - TRAFFIC_MBUF_512 =9, - TRAFFIC_MBUF_1024 =10, - TRAFFIC_MBUF_2048 =11, - - MBUF_DP_FLOWS =12, - MBUF_GLOBAL_FLOWS =13, - MBUF_SIZE =14 + TRAFFIC_MBUF_64 , + TRAFFIC_MBUF_128 , + TRAFFIC_MBUF_256 , + TRAFFIC_MBUF_512 , + TRAFFIC_MBUF_1024 , + TRAFFIC_MBUF_2048 , + TRAFFIC_MBUF_4096 , + TRAFFIC_MBUF_9k , + + + MBUF_DP_FLOWS , + MBUF_GLOBAL_FLOWS , + MBUF_SIZE } mbuf_sizes_t; const std::string * get_mbuf_names(void); diff --git a/src/rpc-server/commands/trex_rpc_cmd_stream.cpp b/src/rpc-server/commands/trex_rpc_cmd_stream.cpp index 51db0b20..95cd895b 100644 --- a/src/rpc-server/commands/trex_rpc_cmd_stream.cpp +++ b/src/rpc-server/commands/trex_rpc_cmd_stream.cpp @@ -175,6 +175,16 @@ TrexRpcCmdAddStream::parse_vm_instr_checksum(const Json::Value &inst, TrexStream stream->m_vm.add_instruction(new StreamVmInstructionFixChecksumIpv4(pkt_offset)); } + +void +TrexRpcCmdAddStream::parse_vm_instr_trim_pkt_size(const Json::Value &inst, TrexStream *stream, Json::Value &result){ + + std::string flow_var_name = parse_string(inst, "name", result); + + stream->m_vm.add_instruction(new StreamVmInstructionChangePktSize(flow_var_name)); +} + + void TrexRpcCmdAddStream::parse_vm_instr_tuple_flow_var(const Json::Value &inst, TrexStream *stream, Json::Value &result){ @@ -255,7 +265,7 @@ TrexRpcCmdAddStream::parse_vm(const Json::Value &vm, TrexStream *stream, Json::V for (int i = 0; i < instructions.size(); i++) { const Json::Value & inst = parse_object(instructions, i, result); - auto vm_types = {"fix_checksum_ipv4", "flow_var", "write_flow_var","tuple_flow_var"}; + auto vm_types = {"fix_checksum_ipv4", "flow_var", "write_flow_var","tuple_flow_var","trim_pkt_size"}; std::string vm_type = parse_choice(inst, "type", vm_types, result); // checksum instruction @@ -269,7 +279,10 @@ TrexRpcCmdAddStream::parse_vm(const Json::Value &vm, TrexStream *stream, Json::V parse_vm_instr_write_flow_var(inst, stream, result); } else if (vm_type == "tuple_flow_var") { - parse_vm_instr_tuple_flow_var(inst, stream, result); + parse_vm_instr_tuple_flow_var(inst, stream, result); + + } else if (vm_type == "trim_pkt_size") { + parse_vm_instr_trim_pkt_size(inst, stream, result); } else { /* internal error */ throw TrexRpcException("internal error"); diff --git a/src/rpc-server/commands/trex_rpc_cmds.h b/src/rpc-server/commands/trex_rpc_cmds.h index f4651d7b..b1750053 100644 --- a/src/rpc-server/commands/trex_rpc_cmds.h +++ b/src/rpc-server/commands/trex_rpc_cmds.h @@ -96,6 +96,8 @@ void parse_vm(const Json::Value &vm, TrexStream *stream, Json::Value &result); void parse_vm_instr_checksum(const Json::Value &inst, TrexStream *stream, Json::Value &result); void parse_vm_instr_flow_var(const Json::Value &inst, TrexStream *stream, Json::Value &result); void parse_vm_instr_tuple_flow_var(const Json::Value &inst, TrexStream *stream, Json::Value &result); +void parse_vm_instr_trim_pkt_size(const Json::Value &inst, TrexStream *stream, Json::Value &result); + void parse_vm_instr_write_flow_var(const Json::Value &inst, TrexStream *stream, Json::Value &result); ); diff --git a/src/sim/trex_sim.h b/src/sim/trex_sim.h index cc02fd75..8feb7bc0 100644 --- a/src/sim/trex_sim.h +++ b/src/sim/trex_sim.h @@ -32,6 +32,12 @@ class TrexStateless; class TrexPublisher; class DpToCpHandler; + +static inline bool +in_range(int x, int low, int high) { + return ( (x >= low) && (x <= high) ); +} + /** * interface for a sim target * @@ -102,7 +108,9 @@ public: const std::string &out_filename, int port_count, int dp_core_count, - int dp_core_index); + int dp_core_index, + int limit, + bool is_dry_run); TrexStateless * get_stateless_obj() { return m_trex_stateless; @@ -121,12 +129,22 @@ private: void execute_json(const std::string &json_filename); void run_dp(const std::string &out_filename); - void run_dp_core(int core_index, const std::string &out_filename); + + void run_dp_core(int core_index, + const std::string &out_filename, + uint64_t &simulated_pkts, + uint64_t &written_pkts); void flush_dp_to_cp_messages_core(int core_index); void validate_response(const Json::Value &resp); + bool should_capture_core(int i); + bool is_multiple_capture(); + uint64_t get_limit_per_core(int core_index); + + void show_intro(const std::string &out_filename); + bool is_verbose() { return m_verbose; } @@ -136,11 +154,14 @@ private: TrexPublisher *m_publisher; CFlowGenList m_fl; CErfIFStl m_erf_vif; + CErfIFStlNull m_null_erf_vif; bool m_verbose; int m_port_count; int m_dp_core_count; int m_dp_core_index; + uint64_t m_limit; + bool m_is_dry_run; }; #endif /* __TREX_SIM_H__ */ diff --git a/src/sim/trex_sim_stateless.cpp b/src/sim/trex_sim_stateless.cpp index 2821644f..215315e0 100644 --- a/src/sim/trex_sim_stateless.cpp +++ b/src/sim/trex_sim_stateless.cpp @@ -26,9 +26,34 @@ limitations under the License. #include <json/json.h> #include <stdexcept> #include <sstream> +#include <trex_streams_compiler.h> using namespace std; +/****** utils ******/ +static string format_num(double num, const string &suffix = "") { + const char x[] = {' ','K','M','G','T','P'}; + + double my_num = num; + + for (int i = 0; i < sizeof(x); i++) { + if (std::abs(my_num) < 1000.0) { + stringstream ss; + + char buf[100]; + snprintf(buf, sizeof(buf), "%.2f", my_num); + + ss << buf << " " << x[i] << suffix; + return ss.str(); + + } else { + my_num /= 1000.0; + } + } + + return "NaN"; +} + TrexStateless * get_stateless_obj() { return SimStateless::get_instance().get_stateless_obj(); } @@ -57,8 +82,12 @@ public: virtual void get_global_stats(TrexPlatformGlobalStats &stats) const { } + virtual void get_interface_info(uint8_t interface_id, std::string &driver_name, driver_speed_e &speed) const { + driver_name = "TEST"; + speed = TrexPlatformApi::SPEED_10G; } + virtual void get_interface_stats(uint8_t interface_id, TrexPlatformInterfaceStats &stats) const { } @@ -120,6 +149,8 @@ SimStateless::SimStateless() { m_dp_core_count = -1; m_dp_core_index = -1; m_port_count = -1; + m_limit = 0; + m_is_dry_run = false; /* override ownership checks */ TrexRpcCommand::test_set_override_ownership(true); @@ -131,15 +162,20 @@ SimStateless::run(const string &json_filename, const string &out_filename, int port_count, int dp_core_count, - int dp_core_index) { + int dp_core_index, + int limit, + bool is_dry_run) { assert(dp_core_count > 0); - assert(dp_core_index >= 0); - assert(dp_core_index < dp_core_count); + + /* -1 means its not set or positive value between 0 and the dp core count - 1*/ + assert( (dp_core_index == -1) || ( in_range(dp_core_index, 0, dp_core_count - 1)) ); m_dp_core_count = dp_core_count; m_dp_core_index = dp_core_index; m_port_count = port_count; + m_limit = limit; + m_is_dry_run = is_dry_run; prepare_dataplane(); prepare_control_plane(); @@ -216,7 +252,11 @@ SimStateless::prepare_dataplane() { m_fl.generate_p_thread_info(m_dp_core_count); for (int i = 0; i < m_dp_core_count; i++) { - m_fl.m_threads_info[i]->set_vif(&m_erf_vif); + if (should_capture_core(i)) { + m_fl.m_threads_info[i]->set_vif(&m_erf_vif); + } else { + m_fl.m_threads_info[i]->set_vif(&m_null_erf_vif); + } } } @@ -263,36 +303,125 @@ SimStateless::validate_response(const Json::Value &resp) { } +static inline bool is_debug() { + #ifdef DEBUG + return true; + #else + return false; + #endif +} + +void +SimStateless::show_intro(const std::string &out_filename) { + uint64_t bps = 0; + uint64_t pps = 0; + + std::cout << "\nGeneral info:\n"; + std::cout << "------------\n\n"; + + std::cout << "image type: " << (is_debug() ? "debug" : "release") << "\n"; + std::cout << "I/O output: " << (m_is_dry_run ? "*DRY*" : out_filename) << "\n"; + + if (m_limit > 0) { + std::cout << "packet limit: " << m_limit << "\n"; + } else { + std::cout << "packet limit: " << "*NO LIMIT*" << "\n"; + } + + if (m_dp_core_index != -1) { + std::cout << "core recording: " << m_dp_core_index << "\n"; + } else { + std::cout << "core recording: merge all\n"; + } + + std::cout << "\nConfiguration info:\n"; + std::cout << "-------------------\n\n"; + + std::cout << "ports: " << m_port_count << "\n"; + std::cout << "cores: " << m_dp_core_count << "\n"; + + + std::cout << "\nPort Config:\n"; + std::cout << "------------\n\n"; + + TrexStatelessPort *port = get_stateless_obj()->get_port_by_id(0); + + std::cout << "stream count: " << port->get_stream_count() << "\n"; + + port->get_port_effective_rate(bps, pps); + + std::cout << "max BPS: " << format_num(bps, "bps") << "\n"; + std::cout << "max PPS: " << format_num(pps, "pps") << "\n"; + + std::cout << "\n\nStarting simulation...\n"; +} void SimStateless::run_dp(const std::string &out_filename) { + uint64_t simulated_pkts_cnt = 0; + uint64_t written_pkts_cnt = 0; - for (int i = 0; i < m_dp_core_count; i++) { - if (i == m_dp_core_index) { - run_dp_core(i, out_filename); - } else { - run_dp_core(i, "/dev/null"); + show_intro(out_filename); + + if (is_multiple_capture()) { + for (int i = 0; i < m_dp_core_count; i++) { + std::stringstream ss; + ss << out_filename << "-" << i; + run_dp_core(i, ss.str(), simulated_pkts_cnt, written_pkts_cnt); + } + + } else { + for (int i = 0; i < m_dp_core_count; i++) { + run_dp_core(i, out_filename, simulated_pkts_cnt, written_pkts_cnt); } } - CFlowGenListPerThread *lpt = m_fl.m_threads_info[m_dp_core_index]; - + std::cout << "\n\nSimulation summary:\n"; + std::cout << "-------------------\n\n"; + std::cout << "simulated " << simulated_pkts_cnt << " packets\n"; + + if (m_is_dry_run) { + std::cout << "*DRY RUN* - no packets were written\n"; + } else { + std::cout << "written " << written_pkts_cnt << " packets " << "to '" << out_filename << "'\n\n"; + } + std::cout << "\n"; - std::cout << "ports: " << m_port_count << "\n"; - std::cout << "cores: " << m_dp_core_count << "\n"; - std::cout << "core index: " << m_dp_core_index << "\n"; - std::cout << "\nwritten " << lpt->m_node_gen.m_cnt << " packets " << "to '" << out_filename << "'\n\n"; +} + + +uint64_t +SimStateless::get_limit_per_core(int core_index) { + /* global no limit ? */ + if (m_limit == 0) { + return (0); + } else { + uint64_t l = std::max((uint64_t)1, m_limit / m_dp_core_count); + if (core_index == 0) { + l += (m_limit % m_dp_core_count); + } + return l; + } } void -SimStateless::run_dp_core(int core_index, const std::string &out_filename) { +SimStateless::run_dp_core(int core_index, + const std::string &out_filename, + uint64_t &simulated_pkts, + uint64_t &written_pkts) { CFlowGenListPerThread *lpt = m_fl.m_threads_info[core_index]; - lpt->start_stateless_simulation_file((std::string)out_filename, CGlobalInfo::m_options.preview); + lpt->start_stateless_simulation_file((std::string)out_filename, CGlobalInfo::m_options.preview, get_limit_per_core(core_index)); lpt->start_stateless_daemon_simulation(); flush_dp_to_cp_messages_core(core_index); + + simulated_pkts += lpt->m_node_gen.m_cnt; + + if (should_capture_core(core_index)) { + written_pkts += lpt->m_node_gen.m_cnt; + } } @@ -316,3 +445,30 @@ SimStateless::flush_dp_to_cp_messages_core(int core_index) { delete msg; } } + +bool +SimStateless::should_capture_core(int i) { + + /* dry run - no core should be recordered */ + if (m_is_dry_run) { + return false; + } + + /* no specific core index ? record all */ + if (m_dp_core_index == -1) { + return true; + } else { + return (i == m_dp_core_index); + } +} + +bool +SimStateless::is_multiple_capture() { + /* dry run - no core should be recordered */ + if (m_is_dry_run) { + return false; + } + + return ( (m_dp_core_count > 1) && (m_dp_core_index == -1) ); +} + diff --git a/src/stateless/cp/trex_stateless_port.cpp b/src/stateless/cp/trex_stateless_port.cpp index aa34e87b..05283d5f 100644 --- a/src/stateless/cp/trex_stateless_port.cpp +++ b/src/stateless/cp/trex_stateless_port.cpp @@ -76,6 +76,11 @@ TrexStatelessPort::TrexStatelessPort(uint8_t port_id, const TrexPlatformApi *api m_graph_obj = NULL; } +TrexStatelessPort::~TrexStatelessPort() { + if (m_graph_obj) { + delete m_graph_obj; + } +} /** * acquire the port @@ -589,6 +594,22 @@ TrexStatelessPort::validate(void) { } + +void +TrexStatelessPort::get_port_effective_rate(uint64_t &bps, uint64_t &pps) { + + if (get_stream_count() == 0) { + return; + } + + if (!m_graph_obj) { + generate_streams_graph(); + } + + bps = m_graph_obj->get_max_bps() * m_factor; + pps = m_graph_obj->get_max_pps() * m_factor; +} + /************* Trex Port Owner **************/ TrexPortOwner::TrexPortOwner() { diff --git a/src/stateless/cp/trex_stateless_port.h b/src/stateless/cp/trex_stateless_port.h index a529d38f..c3785b0c 100644 --- a/src/stateless/cp/trex_stateless_port.h +++ b/src/stateless/cp/trex_stateless_port.h @@ -134,6 +134,8 @@ public: TrexStatelessPort(uint8_t port_id, const TrexPlatformApi *api); + ~TrexStatelessPort(); + /** * acquire port * throws TrexException in case of an error @@ -259,6 +261,10 @@ public: return m_stream_table.get_stream_by_id(stream_id); } + int get_stream_count() { + return m_stream_table.size(); + } + void get_id_list(std::vector<uint32_t> &id_list) { m_stream_table.get_id_list(id_list); } @@ -298,6 +304,17 @@ public: return m_owner; } + + /** + * get the port effective rate (on a started / paused port) + * + * @author imarom (07-Jan-16) + * + * @param bps + * @param pps + */ + void get_port_effective_rate(uint64_t &bps, uint64_t &pps); + private: diff --git a/src/stateless/cp/trex_stream.h b/src/stateless/cp/trex_stream.h index b4f19111..a164f266 100644 --- a/src/stateless/cp/trex_stream.h +++ b/src/stateless/cp/trex_stream.h @@ -32,6 +32,8 @@ limitations under the License. #include <trex_stream_vm.h> #include <stdio.h> #include <string.h> +#include <common/captureFile.h> + class TrexRpcCmdAddStream; @@ -123,8 +125,8 @@ public: virtual ~TrexStream(); /* defines the min max per packet supported */ - static const uint32_t MIN_PKT_SIZE_BYTES = 1; - static const uint32_t MAX_PKT_SIZE_BYTES = 9000; + static const uint32_t MIN_PKT_SIZE_BYTES = 60; + static const uint32_t MAX_PKT_SIZE_BYTES = MAX_PKT_SIZE; /* provides storage for the stream json*/ void store_stream_json(const Json::Value &stream_json); diff --git a/src/stateless/cp/trex_stream_vm.cpp b/src/stateless/cp/trex_stream_vm.cpp index a3f585ad..b992d1ab 100644 --- a/src/stateless/cp/trex_stream_vm.cpp +++ b/src/stateless/cp/trex_stream_vm.cpp @@ -107,11 +107,14 @@ void StreamVmInstructionFlowMan::Dump(FILE *fd){ void StreamVmInstructionWriteToPkt::Dump(FILE *fd){ - fprintf(fd," write_pkt , %s ,%lu, add, %ld, big, %lu \n",m_flow_var_name.c_str(),(ulong)m_pkt_offset,(long)m_add_value,(ulong)(m_is_big_endian?1:0)); } +void StreamVmInstructionChangePktSize::Dump(FILE *fd){ + fprintf(fd," pkt_size_change , %s \n",m_flow_var_name.c_str() ); +} + void StreamVmInstructionFlowClient::Dump(FILE *fd){ @@ -192,6 +195,7 @@ void StreamVm::build_flow_var_table() { m_cur_var_offset=0; uint32_t ins_id=0; m_is_random_var=false; + m_is_change_pkt_size=false; /* scan all flow var instruction and build */ for (auto inst : m_inst_list) { @@ -201,6 +205,10 @@ void StreamVm::build_flow_var_table() { m_is_random_var =true; } } + + if ( inst->get_instruction_type() == StreamVmInstruction::itPKT_SIZE_CHANGE ){ + m_is_change_pkt_size=true; + } } /* if we found allocate BSS +4 bytes */ @@ -296,6 +304,51 @@ void StreamVm::build_flow_var_table() { ins_id++; } + + ins_id=0; + + /* second interation for sanity check and fixups*/ + for (auto inst : m_inst_list) { + + + if (inst->get_instruction_type() == StreamVmInstruction::itPKT_SIZE_CHANGE ) { + StreamVmInstructionChangePktSize *lpPkt =(StreamVmInstructionChangePktSize *)inst; + + VmFlowVarRec var; + if ( var_lookup(lpPkt->m_flow_var_name ,var) == false){ + + std::stringstream ss; + ss << "instruction id '" << ins_id << "' packet size with no valid flow varible name '" << lpPkt->m_flow_var_name << "'" ; + err(ss.str()); + } + + if ( var.m_size_bytes != 2 ) { + std::stringstream ss; + ss << "instruction id '" << ins_id << "' packet size change should point to a flow varible with size 2 "; + err(ss.str()); + } + + if ( var.m_ins.m_ins_flowv->m_max_value > m_pkt_size) { + var.m_ins.m_ins_flowv->m_max_value =m_pkt_size; + } + + if (var.m_ins.m_ins_flowv->m_min_value > m_pkt_size) { + var.m_ins.m_ins_flowv->m_min_value = m_pkt_size; + } + + + if ( var.m_ins.m_ins_flowv->m_min_value >= var.m_ins.m_ins_flowv->m_max_value ) { + std::stringstream ss; + ss << "instruction id '" << ins_id << "' min packet size " << var.m_ins.m_ins_flowv->m_min_value << " is bigger or eq to max packet size " << var.m_ins.m_ins_flowv->m_max_value; + err(ss.str()); + } + + if ( var.m_ins.m_ins_flowv->m_min_value < 60) { + var.m_ins.m_ins_flowv->m_min_value =60; + } + } + }/* for */ + } void StreamVm::alloc_bss(){ @@ -474,6 +527,8 @@ void StreamVm::build_program(){ ss << "instruction id '" << ins_id << "' packet write with packet_offset " << lpPkt->m_pkt_offset + var.m_size_bytes << " bigger than packet size "<< m_pkt_size; err(ss.str()); } + + add_field_cnt(lpPkt->m_pkt_offset + var.m_size_bytes); @@ -555,6 +610,32 @@ void StreamVm::build_program(){ } } + + if (ins_type == StreamVmInstruction::itPKT_SIZE_CHANGE ) { + StreamVmInstructionChangePktSize *lpPkt =(StreamVmInstructionChangePktSize *)inst; + + VmFlowVarRec var; + if ( var_lookup(lpPkt->m_flow_var_name ,var) == false){ + + std::stringstream ss; + ss << "instruction id '" << ins_id << "' packet size with no valid flow varible name '" << lpPkt->m_flow_var_name << "'" ; + err(ss.str()); + } + + if ( var.m_size_bytes != 2 ) { + std::stringstream ss; + ss << "instruction id '" << ins_id << "' packet size change should point to a flow varible with size 2 "; + err(ss.str()); + } + + uint8_t flow_offset = get_var_offset(lpPkt->m_flow_var_name); + + StreamDPOpPktSizeChange pkt_size_ch; + pkt_size_ch.m_op =StreamDPVmInstructions::itPKT_SIZE_CHANGE; + pkt_size_ch.m_flow_offset = flow_offset; + m_instructions.add_command(&pkt_size_ch,sizeof(pkt_size_ch)); + } + ins_id++; } } @@ -790,6 +871,8 @@ void StreamDPVmInstructions::Dump(FILE *fd){ StreamDPOpPktWr64 *lpw64; StreamDPOpClientsLimit *lp_client; StreamDPOpClientsUnLimit *lp_client_unlimited; + StreamDPOpPktSizeChange *lp_pkt_size_change; + while ( p < p_end) { @@ -901,6 +984,12 @@ void StreamDPVmInstructions::Dump(FILE *fd){ p+=sizeof(StreamDPOpClientsUnLimit); break; + case itPKT_SIZE_CHANGE : + lp_pkt_size_change =(StreamDPOpPktSizeChange *)p; + lp_pkt_size_change->dump(fd,"pkt_size_c"); + p+=sizeof(StreamDPOpPktSizeChange); + break; + default: assert(0); @@ -955,4 +1044,7 @@ void StreamDPOpClientsUnLimit::dump(FILE *fd,std::string opt){ fprintf(fd," %10s op:%lu, flow_offset: %lu (%x-%x) flags:%x \n", opt.c_str(),(ulong)m_op,(ulong)m_flow_offset,m_min_ip,m_max_ip,m_flags); } +void StreamDPOpPktSizeChange::dump(FILE *fd,std::string opt){ + fprintf(fd," %10s op:%lu, flow_offset: %lu \n", opt.c_str(),(ulong)m_op,(ulong)m_flow_offset); +} diff --git a/src/stateless/cp/trex_stream_vm.h b/src/stateless/cp/trex_stream_vm.h index 891e5b51..edc4f730 100644 --- a/src/stateless/cp/trex_stream_vm.h +++ b/src/stateless/cp/trex_stream_vm.h @@ -396,6 +396,21 @@ public: } __attribute__((packed)); +struct StreamDPOpPktSizeChange { + uint8_t m_op; + uint8_t m_flow_offset; /* offset to the flow var */ + + inline void run(uint8_t * flow_var_base,uint16_t & new_size) { + uint16_t * p_flow_var = (uint16_t*)(flow_var_base+m_flow_offset); + new_size = (*p_flow_var); + } + + void dump(FILE *fd,std::string opt); + + +} __attribute__((packed)); ; + + /* datapath instructions */ class StreamDPVmInstructions { public: @@ -422,7 +437,9 @@ public: itPKT_WR32 , itPKT_WR64 , itCLIENT_VAR , - itCLIENT_VAR_UNLIMIT + itCLIENT_VAR_UNLIMIT , + itPKT_SIZE_CHANGE , + }; @@ -443,12 +460,20 @@ private: class StreamDPVmInstructionsRunner { public: + StreamDPVmInstructionsRunner(){ + m_new_pkt_size=0;; + } inline void run(uint32_t * per_thread_random, uint32_t program_size, uint8_t * program, /* program */ uint8_t * flow_var, /* flow var */ uint8_t * pkt); /* pkt */ + inline uint16_t get_new_pkt_size(){ + return (m_new_pkt_size); + } +private: + uint16_t m_new_pkt_size; }; @@ -456,7 +481,8 @@ inline void StreamDPVmInstructionsRunner::run(uint32_t * per_thread_random, uint32_t program_size, uint8_t * program, /* program */ uint8_t * flow_var, /* flow var */ - uint8_t * pkt){ + uint8_t * pkt + ){ uint8_t * p=program; @@ -473,6 +499,8 @@ inline void StreamDPVmInstructionsRunner::run(uint32_t * per_thread_random, StreamDPOpPktWr16 *lpw16; StreamDPOpPktWr32 *lpw32; StreamDPOpPktWr64 *lpw64; + StreamDPOpPktSizeChange *lpw_pkt_size; + StreamDPOpClientsLimit *lpcl; StreamDPOpClientsUnLimit *lpclu; } ua ; @@ -586,6 +614,13 @@ inline void StreamDPVmInstructionsRunner::run(uint32_t * per_thread_random, ua.lpw64->wr(flow_var,pkt); p+=sizeof(StreamDPOpPktWr64); break; + + case StreamDPVmInstructions::itPKT_SIZE_CHANGE : + ua.lpw_pkt_size =(StreamDPOpPktSizeChange *)p; + ua.lpw_pkt_size->run(flow_var,m_new_pkt_size); + p+=sizeof(StreamDPOpPktSizeChange); + break; + default: assert(0); } @@ -607,7 +642,8 @@ public: itFIX_IPV4_CS = 4, itFLOW_MAN = 5, itPKT_WR = 6, - itFLOW_CLIENT = 7 + itFLOW_CLIENT = 7 , + itPKT_SIZE_CHANGE = 8 }; @@ -880,7 +916,7 @@ public: /** - * write flow var to packet + * write flow var to packet, hhaim * */ class StreamVmInstructionWriteToPkt : public StreamVmInstruction { @@ -925,6 +961,36 @@ public: }; + +/** + * change packet size, + * + */ +class StreamVmInstructionChangePktSize : public StreamVmInstruction { +public: + + StreamVmInstructionChangePktSize(const std::string &flow_var_name) : + + m_flow_var_name(flow_var_name) + {} + + virtual instruction_type_t get_instruction_type() const { + return ( StreamVmInstruction::itPKT_SIZE_CHANGE ); + } + + virtual void Dump(FILE *fd); + + virtual StreamVmInstruction * clone() { + return new StreamVmInstructionChangePktSize(m_flow_var_name); + } + +public: + + /* flow var name to write */ + std::string m_flow_var_name; +}; + + /** * describes a VM program for DP * @@ -939,6 +1005,7 @@ public: m_program_size=0; m_max_pkt_offset_change=0; m_prefix_size = 0; + m_is_pkt_size_var=false; } StreamVmDp( uint8_t * bss, @@ -946,7 +1013,8 @@ public: uint8_t * prog, uint16_t prog_size, uint16_t max_pkt_offset, - uint16_t prefix_size + uint16_t prefix_size, + bool a_is_pkt_size_var ){ if (bss) { @@ -972,6 +1040,7 @@ public: m_max_pkt_offset_change = max_pkt_offset; m_prefix_size = prefix_size; + m_is_pkt_size_var=a_is_pkt_size_var; } ~StreamVmDp(){ @@ -993,7 +1062,8 @@ public: m_program_ptr, m_program_size, m_max_pkt_offset_change, - m_prefix_size + m_prefix_size, + m_is_pkt_size_var ); assert(lp); return (lp); @@ -1035,6 +1105,14 @@ public: m_prefix_size = prefix_size; } + void set_pkt_size_is_var(bool pkt_size_var){ + m_is_pkt_size_var=pkt_size_var; + } + bool is_pkt_size_var(){ + return (m_is_pkt_size_var); + } + + private: uint8_t * m_bss_ptr; /* pointer to the data section */ uint8_t * m_program_ptr; /* pointer to the program */ @@ -1042,6 +1120,7 @@ private: uint16_t m_program_size; /* program size*/ uint16_t m_max_pkt_offset_change; uint16_t m_prefix_size; + bool m_is_pkt_size_var; }; @@ -1093,7 +1172,8 @@ public: get_dp_instruction_buffer()->get_program(), get_dp_instruction_buffer()->get_program_size(), get_max_packet_update_offset(), - get_prefix_size() + get_prefix_size(), + is_var_pkt_size() ); assert(lp); return (lp); @@ -1142,6 +1222,11 @@ public: return m_prefix_size; } + bool is_var_pkt_size(){ + return (m_is_change_pkt_size); + } + + bool is_compiled() { return m_is_compiled; } @@ -1197,6 +1282,7 @@ private: private: bool m_is_random_var; + bool m_is_change_pkt_size; bool m_is_compiled; uint16_t m_prefix_size; uint16_t m_pkt_size; diff --git a/src/stateless/dp/trex_stateless_dp_core.cpp b/src/stateless/dp/trex_stateless_dp_core.cpp index 0a9a88ab..a80efc08 100644 --- a/src/stateless/dp/trex_stateless_dp_core.cpp +++ b/src/stateless/dp/trex_stateless_dp_core.cpp @@ -149,10 +149,34 @@ rte_mbuf_t * CGenNodeStateless::alloc_node_with_vm(){ m_vm_flow_var, (uint8_t*)p); + uint16_t pkt_new_size=runner.get_new_pkt_size(); + if ( likely( pkt_new_size == 0) ) { + /* no packet size change */ + rte_mbuf_t * m_const = get_const_mbuf(); + if ( m_const != NULL) { + utl_rte_pktmbuf_add_after(m,m_const); + } + return (m); + } + /* packet size change there are a few changes */ rte_mbuf_t * m_const = get_const_mbuf(); - if ( m_const != NULL) { - utl_rte_pktmbuf_add_after(m,m_const); + if ( (m_const == 0 ) || (pkt_new_size<=prefix_size) ) { + /* one mbuf , just trim it */ + m->data_len = pkt_new_size; + m->pkt_len = pkt_new_size; + return (m); + } + + rte_mbuf_t * mi= CGlobalInfo::pktmbuf_alloc_small(get_socket_id()); + assert(mi); + rte_pktmbuf_attach(mi,m_const); + utl_rte_pktmbuf_add_after2(m,mi); + + if ( pkt_new_size < m->pkt_len) { + /* need to trim it */ + mi->data_len = (pkt_new_size - prefix_size); + m->pkt_len = pkt_new_size; } return (m); } @@ -617,6 +641,12 @@ TrexStatelessDpCore::add_stream(TrexStatelessDpPerPort * lp_port, } + if ( lpDpVm->is_pkt_size_var() ) { + // mark the node as varible size + node->set_var_pkt_size(); + } + + if (lpDpVm->get_prefix_size() > pkt_size ) { lpDpVm->set_prefix_size(pkt_size); } diff --git a/src/stateless/dp/trex_stream_node.h b/src/stateless/dp/trex_stream_node.h index 70a66e6a..dfa4cc13 100644 --- a/src/stateless/dp/trex_stream_node.h +++ b/src/stateless/dp/trex_stream_node.h @@ -60,7 +60,9 @@ public: SL_NODE_FLAGS_DIR =1, //USED by master SL_NODE_FLAGS_MBUF_CACHE =2, //USED by master - SL_NODE_CONST_MBUF =4 + SL_NODE_CONST_MBUF =4, + + SL_NODE_VAR_PKT_SIZE =8 }; @@ -282,6 +284,14 @@ public: } } + inline void set_var_pkt_size(){ + m_flags |= SL_NODE_VAR_PKT_SIZE; + } + + inline bool is_var_pkt_size(){ + return ( ( m_flags &SL_NODE_VAR_PKT_SIZE )?true:false); + } + inline void set_const_mbuf(rte_mbuf_t * m){ m_cache_mbuf=(void *)m; m_flags |= SL_NODE_CONST_MBUF; |