diff options
Diffstat (limited to 'resources/libraries/python')
81 files changed, 5491 insertions, 5859 deletions
diff --git a/resources/libraries/python/Classify.py b/resources/libraries/python/Classify.py index 6d4b84c1cc..4d05079eec 100644 --- a/resources/libraries/python/Classify.py +++ b/resources/libraries/python/Classify.py @@ -13,7 +13,6 @@ """Classify utilities library.""" -import binascii import re from ipaddress import ip_address @@ -21,16 +20,16 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants -from resources.libraries.python.topology import Topology from resources.libraries.python.PapiExecutor import PapiSocketExecutor +from resources.libraries.python.topology import Topology -class Classify(object): +class Classify: """Classify utilities.""" @staticmethod - def _build_mac_mask(dst_mac='', src_mac='', ether_type=''): - """Build MAC ACL mask data in hexstring format. + def _build_mac_mask(dst_mac=u"", src_mac=u"", ether_type=u""): + """Build MAC ACL mask data in bytes format. :param dst_mac: Source MAC address <0-ffffffffffff>. :param src_mac: Destination MAC address <0-ffffffffffff>. @@ -38,18 +37,19 @@ class Classify(object): :type dst_mac: str :type src_mac: str :type ether_type: str - :returns MAC ACL mask in hexstring format. - :rtype: str + :returns MAC ACL mask in bytes format. + :rtype: bytes """ - - return ('{!s:0>12}{!s:0>12}{!s:0>4}'.format( - dst_mac.replace(':', ''), src_mac.replace(':', ''), - ether_type)).decode('hex').rstrip('\0') + return bytes.fromhex( + f"{dst_mac.replace(u':', u'')!s:0>12}" + f"{src_mac.replace(u':', u'')!s:0>12}" + f"{ether_type!s:0>4}" + ).rstrip(b'\0') @staticmethod - def _build_ip_mask(proto='', src_ip='', dst_ip='', src_port='', - dst_port=''): - """Build IP ACL mask data in hexstring format. + def _build_ip_mask( + proto=u"", src_ip=u"", dst_ip=u"", src_port=u"", dst_port=u""): + """Build IP ACL mask data in bytes format. :param proto: Protocol number <0-ff>. :param src_ip: Source ip address <0-ffffffff>. @@ -61,18 +61,18 @@ class Classify(object): :type dst_ip: str :type src_port: str :type dst_port:src - :returns: IP mask in hexstring format. - :rtype: str + :returns: IP mask in bytes format. + :rtype: bytes """ - - return ('{!s:0>20}{!s:0>12}{!s:0>8}{!s:0>4}{!s:0>4}'.format( - proto, src_ip, dst_ip, src_port, dst_port)).decode('hex').\ - rstrip('\0') + return bytes.fromhex( + f"{proto!s:0>20}{src_ip!s:0>12}{dst_ip!s:0>8}{src_port!s:0>4}" + f"{dst_port!s:0>4}" + ).rstrip(b'\0') @staticmethod - def _build_ip6_mask(next_hdr='', src_ip='', dst_ip='', src_port='', - dst_port=''): - """Build IPv6 ACL mask data in hexstring format. + def _build_ip6_mask( + next_hdr=u"", src_ip=u"", dst_ip=u"", src_port=u"", dst_port=u""): + """Build IPv6 ACL mask data in bytes format. :param next_hdr: Next header number <0-ff>. :param src_ip: Source ip address <0-ffffffff>. @@ -84,17 +84,17 @@ class Classify(object): :type dst_ip: str :type src_port: str :type dst_port: str - :returns: IPv6 ACL mask in hexstring format. - :rtype: str + :returns: IPv6 ACL mask in bytes format. + :rtype: bytes """ - - return ('{!s:0>14}{!s:0>34}{!s:0>32}{!s:0>4}{!s:0>4}'.format( - next_hdr, src_ip, dst_ip, src_port, dst_port)).decode('hex').\ - rstrip('\0') + return bytes.fromhex( + f"{next_hdr!s:0>14}{src_ip!s:0>34}{dst_ip!s:0>32}{src_port!s:0>4}" + f"{dst_port!s:0>4}" + ).rstrip(b'\0') @staticmethod - def _build_mac_match(dst_mac='', src_mac='', ether_type=''): - """Build MAC ACL match data in hexstring format. + def _build_mac_match(dst_mac=u"", src_mac=u"", ether_type=u""): + """Build MAC ACL match data in bytes format. :param dst_mac: Source MAC address <x:x:x:x:x:x>. :param src_mac: Destination MAC address <x:x:x:x:x:x>. @@ -102,17 +102,19 @@ class Classify(object): :type dst_mac: str :type src_mac: str :type ether_type: str - :returns: MAC ACL match data in hexstring format. - :rtype: str + :returns: MAC ACL match data in bytes format. + :rtype: bytes """ - - return ('{!s:0>12}{!s:0>12}{!s:0>4}'.format( - dst_mac.replace(':', ''), src_mac.replace(':', ''), - ether_type)).decode('hex').rstrip('\0') + return bytes.fromhex( + f"{dst_mac.replace(u':', u'')!s:0>12}" + f"{src_mac.replace(u':', u'')!s:0>12}" + f"{ether_type!s:0>4}" + ).rstrip(b'\0') @staticmethod - def _build_ip_match(proto=0, src_ip='', dst_ip='', src_port=0, dst_port=0): - """Build IP ACL match data in byte-string format. + def _build_ip_match( + proto=0, src_ip=4*b"\0", dst_ip=4*b"\0", src_port=0, dst_port=0): + """Build IP ACL match data in bytes format. :param proto: Protocol number with valid option "x". :param src_ip: Source ip address in packed format. @@ -120,21 +122,22 @@ class Classify(object): :param src_port: Source port number "x". :param dst_port: Destination port number "x". :type proto: int - :type src_ip: str - :type dst_ip: str + :type src_ip: bytes + :type dst_ip: bytes :type src_port: int :type dst_port: int :returns: IP ACL match data in byte-string format. :rtype: str """ - - return ('{!s:0>20}{!s:0>12}{!s:0>8}{!s:0>4}{!s:0>4}'.format( - hex(proto)[2:], src_ip, dst_ip, hex(src_port)[2:], - hex(dst_port)[2:])).decode('hex').rstrip('\0') + return bytes.fromhex( + f"{hex(proto)[2:]!s:0>20}{src_ip.hex()!s:0>12}{dst_ip.hex()!s:0>8}" + f"{hex(src_port)[2:]!s:0>4}{hex(dst_port)[2:]!s:0>4}" + ).rstrip(b'\0') @staticmethod - def _build_ip6_match(next_hdr=0, src_ip='', dst_ip='', src_port=0, - dst_port=0): + def _build_ip6_match( + next_hdr=0, src_ip=16*b"\0", dst_ip=16*b"\0", src_port=0, + dst_port=0): """Build IPv6 ACL match data in byte-string format. :param next_hdr: Next header number with valid option "x". @@ -143,17 +146,18 @@ class Classify(object): :param src_port: Source port number "x". :param dst_port: Destination port number "x". :type next_hdr: int - :type src_ip: str - :type dst_ip: str + :type src_ip: bytes + :type dst_ip: bytes :type src_port: int :type dst_port: int - :returns: IPv6 ACL match data in byte-string format. - :rtype: str + :returns: IPv6 ACL match data in bytes format. + :rtype: bytes """ - - return ('{!s:0>14}{!s:0>34}{!s:0>32}{!s:0>4}{!s:0>4}'.format( - hex(next_hdr)[2:], src_ip, dst_ip, hex(src_port)[2:], - hex(dst_port)[2:])).decode('hex').rstrip('\0') + return bytes.fromhex( + f"{hex(next_hdr)[2:]!s:0>14}{src_ip.hex()!s:0>34}" + f"{dst_ip.hex()!s:0>32}{hex(src_port)[2:]!s:0>4}" + f"{hex(dst_port)[2:]!s:0>4}" + ).rstrip(b'\0') @staticmethod def _classify_add_del_table( @@ -191,7 +195,7 @@ class Classify(object): (Default value = 0) :type node: dict :type is_add: int - :type mask: str + :type mask: bytes :type match_n_vectors: int :type table_index: int :type nbuckets: int @@ -207,7 +211,7 @@ class Classify(object): match_n: Number of match vectors. :rtype: tuple(int, int, int) """ - cmd = 'classify_add_del_table' + cmd = u"classify_add_del_table" args = dict( is_add=is_add, table_index=table_index, @@ -222,14 +226,13 @@ class Classify(object): mask_len=len(mask), mask=mask ) - err_msg = "Failed to create a classify table on host {host}".format( - host=node['host']) + err_msg = f"Failed to create a classify table on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg) - return int(reply["new_table_index"]), int(reply["skip_n_vectors"]),\ - int(reply["match_n_vectors"]) + return int(reply[u"new_table_index"]), int(reply[u"skip_n_vectors"]),\ + int(reply[u"match_n_vectors"]) @staticmethod def _classify_add_del_session( @@ -267,14 +270,14 @@ class Classify(object): :type node: dict :type is_add: int :type table_index: int - :type match: str + :type match: bytes :type opaque_index: int :type hit_next_index: int :type advance: int :type action: int :type metadata: int """ - cmd = 'classify_add_del_session' + cmd = u"classify_add_del_session" args = dict( is_add=is_add, table_index=table_index, @@ -286,8 +289,7 @@ class Classify(object): match_len=len(match), match=match ) - err_msg = "Failed to create a classify session on host {host}".format( - host=node['host']) + err_msg = f"Failed to create a classify session on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -303,15 +305,14 @@ class Classify(object): :type rules: list :type tag: str """ - cmd = "macip_acl_add" + cmd = u"macip_acl_add" args = dict( r=rules, count=len(rules), tag=tag ) - err_msg = "Failed to create a classify session on host {host}".format( - host=node['host']) + err_msg = f"Failed to add MACIP ACL on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -329,8 +330,8 @@ class Classify(object): :type acl_type: str :type acls: list """ - cmd = "acl_interface_set_acl_list" - n_input = len(acls) if acl_type == "input" else 0 + cmd = u"acl_interface_set_acl_list" + n_input = len(acls) if acl_type == u"input" else 0 args = dict( sw_if_index=sw_if_index, acls=acls, @@ -338,8 +339,8 @@ class Classify(object): count=len(acls) ) - err_msg = "Failed to set acl list for interface {idx} on host {host}".\ - format(idx=sw_if_index, host=node['host']) + err_msg = f"Failed to set acl list for interface {sw_if_index} " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -357,7 +358,7 @@ class Classify(object): :type rules: list :type tag: str """ - cmd = "acl_add_replace" + cmd = u"acl_add_replace" args = dict( tag=tag.encode("utf-8"), acl_index=4294967295 if acl_idx is None else acl_idx, @@ -365,8 +366,7 @@ class Classify(object): r=rules ) - err_msg = "Failed to add/replace acls on host {host}".format( - host=node['host']) + err_msg = f"Failed to add/replace ACLs on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -397,32 +397,30 @@ class Classify(object): ip6=Classify._build_ip6_mask ) - if ip_version == "ip4" or ip_version == "ip6": - netmask = binascii.hexlify(ip_address(unicode(netmask)).packed) + if ip_version in (u"ip4", u"ip6"): + netmask = ip_address(netmask).packed else: - raise ValueError("IP version {ver} is not supported.".format( - ver=ip_version)) + raise ValueError(f"IP version {ip_version} is not supported.") - if direction == "src": - mask = mask_f[ip_version](src_ip=netmask) - elif direction == "dst": - mask = mask_f[ip_version](dst_ip=netmask) + if direction == u"src": + mask = mask_f[ip_version](src_ip=netmask.hex()) + elif direction == u"dst": + mask = mask_f[ip_version](dst_ip=netmask.hex()) else: - raise ValueError("Direction {dir} is not supported.".format( - dir=direction)) + raise ValueError(f"Direction {direction} is not supported.") # Add l2 ethernet header to mask - mask = 14 * '\0' + mask + mask = 14 * b'\0' + mask # Get index of the first significant mask octet - i = len(mask) - len(mask.lstrip('\0')) + i = len(mask) - len(mask.lstrip(b'\0')) # Compute skip_n parameter skip_n = i // 16 # Remove octets to be skipped from the mask mask = mask[skip_n*16:] # Pad mask to an even multiple of the vector size - mask = mask + (16 - len(mask) % 16 if len(mask) % 16 else 0) * '\0' + mask = mask + (16 - len(mask) % 16 if len(mask) % 16 else 0) * b'\0' # Compute match_n parameter match_n = len(mask) // 16 @@ -473,27 +471,25 @@ class Classify(object): deny=1 ) - if ip_version == "ip4" or ip_version == "ip6": - address = binascii.hexlify(ip_address(unicode(address)).packed) + if ip_version in (u"ip4", u"ip6"): + address = ip_address(address).packed else: - raise ValueError("IP version {ver} is not supported.".format( - ver=ip_version)) + raise ValueError(f"IP version {ip_version} is not supported.") - if direction == "src": + if direction == u"src": match = match_f[ip_version](src_ip=address) - elif direction == "dst": + elif direction == u"dst": match = match_f[ip_version](dst_ip=address) else: - raise ValueError("Direction {dir} is not supported.".format( - dir=direction)) + raise ValueError(f"Direction {direction} is not supported.") # Prepend match with l2 ethernet header part - match = 14 * '\0' + match + match = 14 * b'\0' + match # Pad match to match skip_n_vector + match_n_vector size match = match + ((match_n + skip_n) * 16 - len(match) if len(match) < (match_n + skip_n) * 16 - else 0) * '\0' + else 0) * b'\0' Classify._classify_add_del_session( node, @@ -506,83 +502,6 @@ class Classify(object): ) @staticmethod - def compute_classify_hex_mask(ip_version, protocol, direction): - """Compute classify hex mask for TCP or UDP packet matching. - - :param ip_version: Version of IP protocol. - :param protocol: Type of protocol. - :param direction: Traffic direction. - :type ip_version: str - :type protocol: str - :type direction: str - :returns: Classify hex mask. - :rtype: str - :raises ValueError: If protocol is not TCP or UDP. - :raises ValueError: If direction is not source or destination or - source + destination. - """ - if protocol in ('TCP', 'UDP'): - base_mask = Classify._compute_base_mask(ip_version) - - if direction == 'source': - return base_mask + 'FFFF0000' - elif direction == 'destination': - return base_mask + '0000FFFF' - elif direction == 'source + destination': - return base_mask + 'FFFFFFFF' - else: - raise ValueError("Invalid direction!") - else: - raise ValueError("Invalid protocol!") - - @staticmethod - def compute_classify_hex_value(hex_mask, source_port, destination_port): - """Compute classify hex value for TCP or UDP packet matching. - - :param hex_mask: Classify hex mask. - :param source_port: Source TCP/UDP port. - :param destination_port: Destination TCP/UDP port. - :type hex_mask: str - :type source_port: str - :type destination_port: str - :returns: Classify hex value. - :rtype: str - """ - source_port_hex = Classify._port_convert(source_port) - destination_port_hex = Classify._port_convert(destination_port) - - return hex_mask[:-8] + source_port_hex + destination_port_hex - - @staticmethod - def _port_convert(port): - """Convert port number for classify hex table format. - - :param port: TCP/UDP port number. - :type port: str - :returns: TCP/UDP port number in 4-digit hexadecimal format. - :rtype: str - """ - return '{0:04x}'.format(int(port)) - - @staticmethod - def _compute_base_mask(ip_version): - """Compute base classify hex mask based on IP version. - - :param ip_version: Version of IP protocol. - :type ip_version: str - :returns: Base hex mask. - :rtype: str - """ - if ip_version == 'ip4': - return 68 * '0' - # base value of classify hex table for IPv4 TCP/UDP ports - elif ip_version == 'ip6': - return 108 * '0' - # base value of classify hex table for IPv6 TCP/UDP ports - else: - raise ValueError("Invalid IP version!") - - @staticmethod def get_classify_table_data(node, table_index): """Retrieve settings for classify table by ID. @@ -593,9 +512,8 @@ class Classify(object): :returns: Classify table settings. :rtype: dict """ - cmd = 'classify_table_info' - err_msg = "Failed to get 'classify_table_info' on host {host}".format( - host=node['host']) + cmd = u"classify_table_info" + err_msg = f"Failed to get 'classify_table_info' on host {node[u'host']}" args = dict( table_id=int(table_index) ) @@ -614,7 +532,7 @@ class Classify(object): :returns: List of classify session settings. :rtype: list or dict """ - cmd = "classify_session_dump" + cmd = u"classify_session_dump" args = dict( table_id=int(table_index) ) @@ -633,7 +551,8 @@ class Classify(object): :rtype: str """ return PapiSocketExecutor.run_cli_cmd( - node, "show classify tables verbose") + node, u"show classify tables verbose" + ) @staticmethod def vpp_log_plugin_acl_settings(node): @@ -643,7 +562,7 @@ class Classify(object): :param node: VPP node. :type node: dict """ - PapiSocketExecutor.dump_and_log(node, ["acl_dump", ]) + PapiSocketExecutor.dump_and_log(node, [u"acl_dump", ]) @staticmethod def vpp_log_plugin_acl_interface_assignment(node): @@ -653,7 +572,7 @@ class Classify(object): :param node: VPP node. :type node: dict """ - PapiSocketExecutor.dump_and_log(node, ["acl_interface_list_dump", ]) + PapiSocketExecutor.dump_and_log(node, [u"acl_interface_list_dump", ]) @staticmethod def set_acl_list_for_interface(node, interface, acl_type, acl_idx=None): @@ -669,20 +588,19 @@ class Classify(object): :type acl_type: str :type acl_idx: list """ - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = int(interface) acls = acl_idx if isinstance(acl_idx, list) else list() - Classify._acl_interface_set_acl_list(node=node, - sw_if_index=sw_if_index, - acl_type=acl_type, - acls=acls) + Classify._acl_interface_set_acl_list( + node=node, sw_if_index=sw_if_index, acl_type=acl_type, acls=acls + ) @staticmethod - def add_replace_acl_multi_entries(node, acl_idx=None, rules=None, tag=""): + def add_replace_acl_multi_entries(node, acl_idx=None, rules=None, tag=u""): """Add a new ACL or replace the existing one. To replace an existing ACL, pass the ID of this ACL. @@ -695,54 +613,54 @@ class Classify(object): :type rules: str :type tag: str """ - reg_ex_src_ip = re.compile(r'(src [0-9a-fA-F.:/\d{1,2}]*)') - reg_ex_dst_ip = re.compile(r'(dst [0-9a-fA-F.:/\d{1,2}]*)') - reg_ex_sport = re.compile(r'(sport \d{1,5})') - reg_ex_dport = re.compile(r'(dport \d{1,5})') - reg_ex_proto = re.compile(r'(proto \d{1,5})') + reg_ex_src_ip = re.compile(r"(src [0-9a-fA-F.:/\d{1,2}]*)") + reg_ex_dst_ip = re.compile(r"(dst [0-9a-fA-F.:/\d{1,2}]*)") + reg_ex_sport = re.compile(r"(sport \d{1,5})") + reg_ex_dport = re.compile(r"(dport \d{1,5})") + reg_ex_proto = re.compile(r"(proto \d{1,5})") acl_rules = list() - for rule in rules.split(", "): + for rule in rules.split(u", "): acl_rule = dict() - acl_rule["is_permit"] = 1 if "permit" in rule else 0 - acl_rule["is_ipv6"] = 1 if "ipv6" in rule else 0 + acl_rule[u"is_permit"] = 1 if u"permit" in rule else 0 + acl_rule[u"is_ipv6"] = 1 if u"ipv6" in rule else 0 groups = re.search(reg_ex_src_ip, rule) if groups: - grp = groups.group(1).split(' ')[1].split('/') - acl_rule["src_ip_addr"] = ip_address(unicode(grp[0])).packed - acl_rule["src_ip_prefix_len"] = int(grp[1]) + grp = groups.group(1).split(u" ")[1].split(u"/") + acl_rule[u"src_ip_addr"] = ip_address(grp[0]).packed + acl_rule[u"src_ip_prefix_len"] = int(grp[1]) groups = re.search(reg_ex_dst_ip, rule) if groups: - grp = groups.group(1).split(' ')[1].split('/') - acl_rule["dst_ip_addr"] = ip_address(unicode(grp[0])).packed - acl_rule["dst_ip_prefix_len"] = int(grp[1]) + grp = groups.group(1).split(u" ")[1].split(u"/") + acl_rule[u"dst_ip_addr"] = ip_address(grp[0]).packed + acl_rule[u"dst_ip_prefix_len"] = int(grp[1]) groups = re.search(reg_ex_sport, rule) if groups: - port = int(groups.group(1).split(' ')[1]) - acl_rule["srcport_or_icmptype_first"] = port - acl_rule["srcport_or_icmptype_last"] = port + port = int(groups.group(1).split(u" ")[1]) + acl_rule[u"srcport_or_icmptype_first"] = port + acl_rule[u"srcport_or_icmptype_last"] = port else: - acl_rule["srcport_or_icmptype_first"] = 0 - acl_rule["srcport_or_icmptype_last"] = 65535 + acl_rule[u"srcport_or_icmptype_first"] = 0 + acl_rule[u"srcport_or_icmptype_last"] = 65535 groups = re.search(reg_ex_dport, rule) if groups: - port = int(groups.group(1).split(' ')[1]) - acl_rule["dstport_or_icmpcode_first"] = port - acl_rule["dstport_or_icmpcode_last"] = port + port = int(groups.group(1).split(u" ")[1]) + acl_rule[u"dstport_or_icmpcode_first"] = port + acl_rule[u"dstport_or_icmpcode_last"] = port else: - acl_rule["dstport_or_icmpcode_first"] = 0 - acl_rule["dstport_or_icmpcode_last"] = 65535 + acl_rule[u"dstport_or_icmpcode_first"] = 0 + acl_rule[u"dstport_or_icmpcode_last"] = 65535 groups = re.search(reg_ex_proto, rule) if groups: proto = int(groups.group(1).split(' ')[1]) - acl_rule["proto"] = proto + acl_rule[u"proto"] = proto else: - acl_rule["proto"] = 0 + acl_rule[u"proto"] = 0 acl_rules.append(acl_rule) @@ -750,7 +668,7 @@ class Classify(object): node, acl_idx=acl_idx, rules=acl_rules, tag=tag) @staticmethod - def add_macip_acl_multi_entries(node, rules=""): + def add_macip_acl_multi_entries(node, rules=u""): """Add a new MACIP ACL. :param node: VPP node to set MACIP ACL on. @@ -758,31 +676,31 @@ class Classify(object): :type node: dict :type rules: str """ - reg_ex_ip = re.compile(r'(ip [0-9a-fA-F.:/\d{1,2}]*)') - reg_ex_mac = re.compile(r'(mac \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)') - reg_ex_mask = re.compile(r'(mask \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)') + reg_ex_ip = re.compile(r"(ip [0-9a-fA-F.:/\d{1,2}]*)") + reg_ex_mac = re.compile(r"(mac \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)") + reg_ex_mask = re.compile(r"(mask \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)") acl_rules = list() - for rule in rules.split(", "): + for rule in rules.split(u", "): acl_rule = dict() - acl_rule["is_permit"] = 1 if "permit" in rule else 0 - acl_rule["is_ipv6"] = 1 if "ipv6" in rule else 0 + acl_rule[u"is_permit"] = 1 if u"permit" in rule else 0 + acl_rule[u"is_ipv6"] = 1 if u"ipv6" in rule else 0 groups = re.search(reg_ex_mac, rule) if groups: - mac = groups.group(1).split(' ')[1].replace(':', '') - acl_rule["src_mac"] = binascii.unhexlify(unicode(mac)) + mac = groups.group(1).split(u" ")[1].replace(u":", u"") + acl_rule[u"src_mac"] = bytes.fromhex(mac) groups = re.search(reg_ex_mask, rule) if groups: - mask = groups.group(1).split(' ')[1].replace(':', '') - acl_rule["src_mac_mask"] = binascii.unhexlify(unicode(mask)) + mask = groups.group(1).split(u" ")[1].replace(u":", u"") + acl_rule[u"src_mac_mask"] = bytes.fromhex(mask) groups = re.search(reg_ex_ip, rule) if groups: - grp = groups.group(1).split(' ')[1].split('/') - acl_rule["src_ip_addr"] = ip_address(unicode(grp[0])).packed - acl_rule["src_ip_prefix_len"] = int(grp[1]) + grp = groups.group(1).split(u" ")[1].split(u"/") + acl_rule[u"src_ip_addr"] = ip_address((grp[0])).packed + acl_rule[u"src_ip_prefix_len"] = int(grp[1]) acl_rules.append(acl_rule) @@ -796,7 +714,7 @@ class Classify(object): :param node: VPP node. :type node: dict """ - PapiSocketExecutor.dump_and_log(node, ["macip_acl_dump", ]) + PapiSocketExecutor.dump_and_log(node, [u"macip_acl_dump", ]) @staticmethod def add_del_macip_acl_interface(node, interface, action, acl_idx): @@ -812,16 +730,15 @@ class Classify(object): :type acl_idx: str or int :raises RuntimeError: If unable to set MACIP ACL for the interface. """ - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = interface - is_add = 1 if action == "add" else 0 + is_add = 1 if action == u"add" else 0 - cmd = 'macip_acl_interface_add_del' - err_msg = "Failed to get 'macip_acl_interface' on host {host}".format( - host=node['host']) + cmd = u"macip_acl_interface_add_del" + err_msg = f"Failed to get 'macip_acl_interface' on host {node[u'host']}" args = dict( is_add=is_add, sw_if_index=int(sw_if_index), @@ -837,9 +754,8 @@ class Classify(object): :param node: VPP node. :type node: dict """ - cmd = 'macip_acl_interface_get' - err_msg = "Failed to get 'macip_acl_interface' on host {host}".format( - host=node['host']) + cmd = u"macip_acl_interface_get" + err_msg = f"Failed to get 'macip_acl_interface' on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) logger.info(reply) diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py index 3fa9ae28dc..eee9177a18 100644 --- a/resources/libraries/python/Constants.py +++ b/resources/libraries/python/Constants.py @@ -42,7 +42,7 @@ def get_str_from_env(env_var_names, default_value): :returns: The value read, or default value. :rtype: str """ - prefixes = ("FDIO_CSIT_", "CSIT_", "") + prefixes = (u"FDIO_CSIT_", u"CSIT_", u"") if not isinstance(env_var_names, (list, tuple)): env_var_names = [env_var_names] for name in env_var_names: @@ -65,7 +65,7 @@ def get_int_from_env(env_var_names, default_value): :returns: The value read, or default value. :rtype: int """ - env_str = get_str_from_env(env_var_names, "") + env_str = get_str_from_env(env_var_names, u"") try: return int(env_str) except ValueError: @@ -84,7 +84,7 @@ def get_float_from_env(env_var_names, default_value): :returns: The value read, or default value. :rtype: float """ - env_str = get_str_from_env(env_var_names, "") + env_str = get_str_from_env(env_var_names, u"") try: return float(env_str) except ValueError: @@ -101,8 +101,8 @@ def get_pessimistic_bool_from_env(env_var_names): :returns: The value read, or False. :rtype: bool """ - env_str = get_str_from_env(env_var_names, "").lower() - return True if env_str in ("true", "yes", "y", "1") else False + env_str = get_str_from_env(env_var_names, u"").lower() + return bool(env_str in (u"true", u"yes", u"y", u"1")) def get_optimistic_bool_from_env(env_var_names): @@ -115,11 +115,11 @@ def get_optimistic_bool_from_env(env_var_names): :returns: The value read, or True. :rtype: bool """ - env_str = get_str_from_env(env_var_names, "").lower() - return False if env_str in ("false", "no", "n", "0") else True + env_str = get_str_from_env(env_var_names, u"").lower() + return bool(env_str not in (u"false", u"no", u"n", u"0")) -class Constants(object): +class Constants: """Constants used in CSIT. TODO: Yaml files are easier for humans to edit. @@ -128,34 +128,34 @@ class Constants(object): """ # OpenVPP testing directory location at topology nodes - REMOTE_FW_DIR = '/tmp/openvpp-testing' + REMOTE_FW_DIR = u"/tmp/openvpp-testing" # shell scripts location - RESOURCES_LIB_SH = 'resources/libraries/bash' + RESOURCES_LIB_SH = u"resources/libraries/bash" # Python API provider location - RESOURCES_PAPI_PROVIDER = 'resources/tools/papi/vpp_papi_provider.py' + RESOURCES_PAPI_PROVIDER = u"resources/tools/papi/vpp_papi_provider.py" # vat templates location - RESOURCES_TPL_VAT = 'resources/templates/vat' + RESOURCES_TPL_VAT = u"resources/templates/vat" # Kubernetes templates location - RESOURCES_TPL_K8S = 'resources/templates/kubernetes' + RESOURCES_TPL_K8S = u"resources/templates/kubernetes" # KernelVM templates location - RESOURCES_TPL_VM = 'resources/templates/vm' + RESOURCES_TPL_VM = u"resources/templates/vm" # Container templates location - RESOURCES_TPL_CONTAINER = 'resources/templates/container' + RESOURCES_TPL_CONTAINER = u"resources/templates/container" # HTTP Server www root directory - RESOURCES_TP_WRK_WWW = 'resources/traffic_profiles/wrk/www' + RESOURCES_TP_WRK_WWW = u"resources/traffic_profiles/wrk/www" # OpenVPP VAT binary name - VAT_BIN_NAME = 'vpp_api_test' + VAT_BIN_NAME = u"vpp_api_test" # VPP service unit name - VPP_UNIT = 'vpp' + VPP_UNIT = u"vpp" # Number of system CPU cores. CPU_CNT_SYSTEM = 1 @@ -164,120 +164,122 @@ class Constants(object): CPU_CNT_MAIN = 1 # QEMU binary path - QEMU_BIN_PATH = '/usr/bin' + QEMU_BIN_PATH = u"/usr/bin" # QEMU VM kernel image path - QEMU_VM_KERNEL = '/opt/boot/vmlinuz' + QEMU_VM_KERNEL = u"/opt/boot/vmlinuz" # QEMU VM kernel initrd path - QEMU_VM_KERNEL_INITRD = '/opt/boot/initrd.img' + QEMU_VM_KERNEL_INITRD = u"/opt/boot/initrd.img" # QEMU VM nested image path - QEMU_VM_IMAGE = '/var/lib/vm/vhost-nested.img' + QEMU_VM_IMAGE = u"/var/lib/vm/vhost-nested.img" # QEMU VM DPDK path - QEMU_VM_DPDK = '/opt/dpdk-19.02' + QEMU_VM_DPDK = u"/opt/dpdk-19.02" # Docker container SUT image - DOCKER_SUT_IMAGE_UBUNTU = 'snergster/csit-sut:latest' + DOCKER_SUT_IMAGE_UBUNTU = u"snergster/csit-sut:latest" # Docker container arm SUT image - DOCKER_SUT_IMAGE_UBUNTU_ARM = 'snergster/csit-arm-sut:latest' + DOCKER_SUT_IMAGE_UBUNTU_ARM = u"snergster/csit-arm-sut:latest" # TRex install directory - TREX_INSTALL_DIR = '/opt/trex-core-2.61' + TREX_INSTALL_DIR = u"/opt/trex-core-2.61" # Sysctl kernel.core_pattern - KERNEL_CORE_PATTERN = '/tmp/%p-%u-%g-%s-%t-%h-%e.core' + KERNEL_CORE_PATTERN = u"/tmp/%p-%u-%g-%s-%t-%h-%e.core" # Core dump directory - CORE_DUMP_DIR = '/tmp' + CORE_DUMP_DIR = u"/tmp" # Equivalent to ~0 used in vpp code BITWISE_NON_ZERO = 0xffffffff # Default path to VPP API socket. - SOCKSVR_PATH = "/run/vpp/api.sock" + SOCKSVR_PATH = u"/run/vpp/api.sock" # Number of trials to execute in MRR test. - PERF_TRIAL_MULTIPLICITY = get_int_from_env("PERF_TRIAL_MULTIPLICITY", 10) + PERF_TRIAL_MULTIPLICITY = get_int_from_env(u"PERF_TRIAL_MULTIPLICITY", 10) # Duration of one trial in MRR test. - PERF_TRIAL_DURATION = get_float_from_env("PERF_TRIAL_DURATION", 1.0) + PERF_TRIAL_DURATION = get_float_from_env(u"PERF_TRIAL_DURATION", 1.0) # UUID string of DUT1 /tmp volume created outside of the # DUT1 docker in case of vpp-device test. ${EMPTY} value means that # /tmp directory is inside the DUT1 docker. - DUT1_UUID = get_str_from_env("DUT1_UUID", "") + DUT1_UUID = get_str_from_env(u"DUT1_UUID", u"") # Default path to VPP API Stats socket. - SOCKSTAT_PATH = "/run/vpp/stats.sock" + SOCKSTAT_PATH = u"/run/vpp/stats.sock" # Global "kill switch" for CRC checking during runtime. - FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env("FAIL_ON_CRC_MISMATCH") + FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env( + u"FAIL_ON_CRC_MISMATCH" + ) # Mapping from NIC name to its bps limit. NIC_NAME_TO_BPS_LIMIT = { - "Cisco-VIC-1227": 10000000000, - "Cisco-VIC-1385": 24500000000, - "Intel-X520-DA2": 10000000000, - "Intel-X553": 10000000000, - "Intel-X710": 10000000000, - "Intel-XL710": 24500000000, - "Intel-XXV710": 24500000000, - "Mellanox-CX556A": 100000000000, - "virtual": 100000000, + u"Cisco-VIC-1227": 10000000000, + u"Cisco-VIC-1385": 24500000000, + u"Intel-X520-DA2": 10000000000, + u"Intel-X553": 10000000000, + u"Intel-X710": 10000000000, + u"Intel-XL710": 24500000000, + u"Intel-XXV710": 24500000000, + u"Mellanox-CX556A": 100000000000, + u"virtual": 100000000, } # Mapping from NIC name to its pps limit. NIC_NAME_TO_PPS_LIMIT = { - "Cisco-VIC-1227": 14880952, - "Cisco-VIC-1385": 18750000, - "Intel-X520-DA2": 14880952, - "Intel-X553": 14880952, - "Intel-X710": 14880952, - "Intel-XL710": 18750000, - "Intel-XXV710": 18750000, - "Mellanox-CX556A": 60000000, #148809523, - "virtual": 14880952, + u"Cisco-VIC-1227": 14880952, + u"Cisco-VIC-1385": 18750000, + u"Intel-X520-DA2": 14880952, + u"Intel-X553": 14880952, + u"Intel-X710": 14880952, + u"Intel-XL710": 18750000, + u"Intel-XXV710": 18750000, + u"Mellanox-CX556A": 60000000, # 148809523, + u"virtual": 14880952, } # Suite file names use codes for NICs. NIC_NAME_TO_CODE = { - "Cisco-VIC-1227": "10ge2p1vic1227", - "Cisco-VIC-1385": "40ge2p1vic1385", - "Intel-X520-DA2": "10ge2p1x520", - "Intel-X553": "10ge2p1x553", - "Intel-X710": "10ge2p1x710", - "Intel-XL710": "40ge2p1xl710", - "Intel-XXV710": "25ge2p1xxv710", - "Mellanox-CX556A": "100ge2p1cx556a", + u"Cisco-VIC-1227": u"10ge2p1vic1227", + u"Cisco-VIC-1385": u"40ge2p1vic1385", + u"Intel-X520-DA2": u"10ge2p1x520", + u"Intel-X553": u"10ge2p1x553", + u"Intel-X710": u"10ge2p1x710", + u"Intel-XL710": u"40ge2p1xl710", + u"Intel-XXV710": u"25ge2p1xxv710", + u"Mellanox-CX556A": u"100ge2p1cx556a", } # TODO CSIT-1481: Crypto HW should be read from topology file instead. NIC_NAME_TO_CRYPTO_HW = { - "Intel-X553": "HW_C3xxx", - "Intel-X710": "HW_DH895xcc", - "Intel-XL710": "HW_DH895xcc", + u"Intel-X553": u"HW_C3xxx", + u"Intel-X710": u"HW_DH895xcc", + u"Intel-XL710": u"HW_DH895xcc", } PERF_TYPE_TO_KEYWORD = { - "mrr": "Traffic should pass with maximum rate", - "ndrpdr": "Find NDR and PDR intervals using optimized search", - "soak": "Find critical load using PLRsearch", + u"mrr": u"Traffic should pass with maximum rate", + u"ndrpdr": u"Find NDR and PDR intervals using optimized search", + u"soak": u"Find critical load using PLRsearch", } PERF_TYPE_TO_SUITE_DOC_VER = { - "mrr" : '''fication:* In MaxReceivedRate tests TG sends traffic\\ + u"mrr": u'''fication:* In MaxReceivedRate tests TG sends traffic\\ | ... | at line rate and reports total received packets over trial period.\\''', # TODO: Figure out how to include the full "*[Ver] TG verification:*" # while keeping this readable and without breaking line length limit. - "ndrpdr": '''fication:* TG finds and reports throughput NDR (Non Drop\\ + u"ndrpdr": u'''ication:* TG finds and reports throughput NDR (Non Drop\\ | ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\\ | ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\\ | ... | of packets transmitted. NDR and PDR are discovered for different\\ | ... | Ethernet L2 frame sizes using MLRsearch library.\\''', - "soak": '''fication:* TG sends traffic at dynamically computed\\ + u"soak": u'''fication:* TG sends traffic at dynamically computed\\ | ... | rate as PLRsearch algorithm gathers data and improves its estimate\\ | ... | of a rate at which a prescribed small fraction of packets\\ | ... | would be lost. After set time, the serarch stops\\ @@ -285,9 +287,8 @@ class Constants(object): } PERF_TYPE_TO_TEMPLATE_DOC_VER = { - "mrr": '''Measure MaxReceivedRate for ${frame_size}B frames\\ + u"mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames\\ | | ... | using burst trials throughput test.\\''', - "ndrpdr": '''Measure NDR and PDR values using MLRsearch algorithm.\\''', - "soak": '''Estimate critical rate using PLRsearch algorithm.\\''', + u"ndrpdr": u"Measure NDR and PDR values using MLRsearch algorithm.\\", + u"soak": u"Estimate critical rate using PLRsearch algorithm.\\", } - diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py index b552e4d9e6..fc61eea3bd 100644 --- a/resources/libraries/python/ContainerUtils.py +++ b/resources/libraries/python/ContainerUtils.py @@ -16,21 +16,24 @@ """Library to manipulate Containers.""" -from string import Template from collections import OrderedDict, Counter +from io import open +from string import Template -from resources.libraries.python.ssh import SSH from resources.libraries.python.Constants import Constants +from resources.libraries.python.ssh import SSH from resources.libraries.python.topology import Topology, SocketType from resources.libraries.python.VppConfigGenerator import VppConfigGenerator -__all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"] +__all__ = [ + u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container" +] -SUPERVISOR_CONF = '/etc/supervisor/supervisord.conf' +SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf" -class ContainerManager(object): +class ContainerManager: """Container lifecycle management class.""" def __init__(self, engine): @@ -43,8 +46,7 @@ class ContainerManager(object): try: self.engine = globals()[engine]() except KeyError: - raise NotImplementedError('{engine} is not implemented.'. - format(engine=engine)) + raise NotImplementedError(f"{engine} is not implemented.") self.containers = OrderedDict() def get_container_by_name(self, name): @@ -59,8 +61,7 @@ class ContainerManager(object): try: return self.containers[name] except KeyError: - raise RuntimeError('Failed to get container with name: {name}'. - format(name=name)) + raise RuntimeError(f"Failed to get container with name: {name}") def construct_container(self, **kwargs): """Construct container object on node with specified parameters. @@ -75,11 +76,13 @@ class ContainerManager(object): setattr(self.engine.container, key, kwargs[key]) # Set additional environmental variables - setattr(self.engine.container, 'env', - 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name'])) + setattr( + self.engine.container, u"env", + f"MICROSERVICE_LABEL={kwargs[u'name']}" + ) # Store container instance - self.containers[kwargs['name']] = self.engine.container + self.containers[kwargs[u"name"]] = self.engine.container def construct_containers(self, **kwargs): """Construct 1..N container(s) on node with specified name. @@ -90,10 +93,10 @@ class ContainerManager(object): :param kwargs: Named parameters. :param kwargs: dict """ - name = kwargs['name'] - for i in range(kwargs['count']): + name = kwargs[u"name"] + for i in range(kwargs[u"count"]): # Name will contain ordinal suffix - kwargs['name'] = ''.join([name, str(i+1)]) + kwargs[u"name"] = u"".join([name, str(i+1)]) # Create container self.construct_container(i=i, **kwargs) @@ -163,9 +166,15 @@ class ContainerManager(object): :param kwargs: dict """ # Count number of DUTs based on node's host information - dut_cnt = len(Counter([self.containers[container].node['host'] - for container in self.containers])) - mod = len(self.containers)/dut_cnt + dut_cnt = len( + Counter( + [ + self.containers[container].node[u"host"] + for container in self.containers + ] + ) + ) + mod = len(self.containers) // dut_cnt for i, container in enumerate(self.containers): mid1 = i % mod + 1 @@ -173,36 +182,37 @@ class ContainerManager(object): sid1 = i % mod * 2 + 1 sid2 = i % mod * 2 + 2 self.engine.container = self.containers[container] - guest_dir = self.engine.container.mnt[0].split(':')[1] - - if chain_topology == 'chain': - self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2, - sid1=sid1, sid2=sid2, - guest_dir=guest_dir, - **kwargs) - elif chain_topology == 'cross_horiz': - self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2, - sid1=sid1, sid2=sid2, - guest_dir=guest_dir, - **kwargs) - elif chain_topology == 'chain_functional': - self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2, - sid1=sid1, sid2=sid2, - guest_dir=guest_dir, - **kwargs) - elif chain_topology == 'chain_ip4': - self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2, - sid1=sid1, sid2=sid2, - guest_dir=guest_dir, - **kwargs) - elif chain_topology == 'pipeline_ip4': - self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2, - sid1=sid1, sid2=sid2, - guest_dir=guest_dir, - **kwargs) + guest_dir = self.engine.container.mnt[0].split(u":")[1] + + if chain_topology == u"chain": + self._configure_vpp_chain_l2xc( + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, + guest_dir=guest_dir, **kwargs + ) + elif chain_topology == u"cross_horiz": + self._configure_vpp_cross_horiz( + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, + guest_dir=guest_dir, **kwargs + ) + elif chain_topology == u"chain_functional": + self._configure_vpp_chain_functional( + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, + guest_dir=guest_dir, **kwargs + ) + elif chain_topology == u"chain_ip4": + self._configure_vpp_chain_ip4( + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, + guest_dir=guest_dir, **kwargs + ) + elif chain_topology == u"pipeline_ip4": + self._configure_vpp_pipeline_ip4( + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, + guest_dir=guest_dir, **kwargs + ) else: - raise RuntimeError('Container topology {name} not implemented'. - format(name=chain_topology)) + raise RuntimeError( + f"Container topology {chain_topology} not implemented" + ) def _configure_vpp_chain_l2xc(self, **kwargs): """Configure VPP in chain topology with l2xc. @@ -212,13 +222,14 @@ class ContainerManager(object): """ self.engine.create_vpp_startup_config() self.engine.create_vpp_exec_config( - 'memif_create_chain_l2xc.exec', - mid1=kwargs['mid1'], mid2=kwargs['mid2'], - sid1=kwargs['sid1'], sid2=kwargs['sid2'], - socket1='{guest_dir}/memif-{c.name}-{sid1}'. - format(c=self.engine.container, **kwargs), - socket2='{guest_dir}/memif-{c.name}-{sid2}'. - format(c=self.engine.container, **kwargs)) + u"memif_create_chain_l2xc.exec", + mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"], + sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"], + socket1=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid1']}", + socket2=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid2']}" + ) def _configure_vpp_cross_horiz(self, **kwargs): """Configure VPP in cross horizontal topology (single memif). @@ -226,22 +237,23 @@ class ContainerManager(object): :param kwargs: Named parameters. :param kwargs: dict """ - if 'DUT1' in self.engine.container.name: + if u"DUT1" in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( - self.engine.container.node, kwargs['dut1_if']) + self.engine.container.node, kwargs[u"dut1_if"]) if_name = Topology.get_interface_name( - self.engine.container.node, kwargs['dut1_if']) - if 'DUT2' in self.engine.container.name: + self.engine.container.node, kwargs[u"dut1_if"]) + if u"DUT2" in self.engine.container.name: if_pci = Topology.get_interface_pci_addr( - self.engine.container.node, kwargs['dut2_if']) + self.engine.container.node, kwargs[u"dut2_if"]) if_name = Topology.get_interface_name( - self.engine.container.node, kwargs['dut2_if']) + self.engine.container.node, kwargs[u"dut2_if"]) self.engine.create_vpp_startup_config_dpdk_dev(if_pci) self.engine.create_vpp_exec_config( - 'memif_create_cross_horizon.exec', - mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name, - socket1='{guest_dir}/memif-{c.name}-{sid1}'. - format(c=self.engine.container, **kwargs)) + u"memif_create_cross_horizon.exec", + mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name, + socket1=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid1']}" + ) def _configure_vpp_chain_functional(self, **kwargs): """Configure VPP in chain topology with l2xc (functional). @@ -251,14 +263,15 @@ class ContainerManager(object): """ self.engine.create_vpp_startup_config_func_dev() self.engine.create_vpp_exec_config( - 'memif_create_chain_functional.exec', - mid1=kwargs['mid1'], mid2=kwargs['mid2'], - sid1=kwargs['sid1'], sid2=kwargs['sid2'], - socket1='{guest_dir}/memif-{c.name}-{sid1}'. - format(c=self.engine.container, **kwargs), - socket2='{guest_dir}/memif-{c.name}-{sid2}'. - format(c=self.engine.container, **kwargs), - rx_mode='interrupt') + u"memif_create_chain_functional.exec", + mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"], + sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"], + socket1=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid1']}", + socket2=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid2']}", + rx_mode=u"interrupt" + ) def _configure_vpp_chain_ip4(self, **kwargs): """Configure VPP in chain topology with ip4. @@ -268,23 +281,24 @@ class ContainerManager(object): """ self.engine.create_vpp_startup_config() - vif1_mac = kwargs['tg_if1_mac'] \ - if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \ - else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1) - vif2_mac = kwargs['tg_if2_mac'] \ - if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \ - else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1) + vif1_mac = kwargs[u"tg_if1_mac"] \ + if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \ + else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02" + vif2_mac = kwargs[u"tg_if2_mac"] \ + if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\ + else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01" self.engine.create_vpp_exec_config( - 'memif_create_chain_ip4.exec', - mid1=kwargs['mid1'], mid2=kwargs['mid2'], - sid1=kwargs['sid1'], sid2=kwargs['sid2'], - socket1='{guest_dir}/memif-{c.name}-{sid1}'. - format(c=self.engine.container, **kwargs), - socket2='{guest_dir}/memif-{c.name}-{sid2}'. - format(c=self.engine.container, **kwargs), - mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']), - mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']), - vif1_mac=vif1_mac, vif2_mac=vif2_mac) + u"memif_create_chain_ip4.exec", + mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"], + sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"], + socket1=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid1']}", + socket2=f"{kwargs[u'guest_dir']}/memif-" + f"{self.engine.container.name}-{kwargs[u'sid2']}", + mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01", + mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02", + vif1_mac=vif1_mac, vif2_mac=vif2_mac + ) def _configure_vpp_pipeline_ip4(self, **kwargs): """Configure VPP in pipeline topology with ip4. @@ -293,40 +307,36 @@ class ContainerManager(object): :param kwargs: dict """ self.engine.create_vpp_startup_config() - node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 - mid1 = kwargs['mid1'] - mid2 = kwargs['mid2'] - role1 = 'master' - role2 = 'master' \ - if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\ - else 'slave' - kwargs['mid2'] = kwargs['mid2'] \ - if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\ - else kwargs['mid2'] + 1 - vif1_mac = kwargs['tg_if1_mac'] \ - if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \ - else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1) - vif2_mac = kwargs['tg_if2_mac'] \ - if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \ - else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1) - socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\ - format(c=self.engine.container, **kwargs) \ - if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\ - format(c=self.engine.container, **kwargs) - socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\ - format(c=self.engine.container, **kwargs) \ - if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \ - else '{guest_dir}/memif-pipe-{mid2}'.\ - format(c=self.engine.container, **kwargs) + node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 + mid1 = kwargs[u"mid1"] + mid2 = kwargs[u"mid2"] + role1 = u"master" + role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave" + kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \ + else kwargs[u"mid2"] + 1 + vif1_mac = kwargs[u"tg_if1_mac"] \ + if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \ + else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02" + vif2_mac = kwargs[u"tg_if2_mac"] \ + if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\ + else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01" + socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\ + f"{kwargs[u'sid1']}" if node == 1 \ + else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}" + socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\ + f"{kwargs[u'sid2']}" \ + if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \ + else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}" self.engine.create_vpp_exec_config( - 'memif_create_pipeline_ip4.exec', - mid1=kwargs['mid1'], mid2=kwargs['mid2'], - sid1=kwargs['sid1'], sid2=kwargs['sid2'], + u"memif_create_pipeline_ip4.exec", + mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"], + sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"], socket1=socket1, socket2=socket2, role1=role1, role2=role2, - mac1='52:54:00:00:{0:02X}:01'.format(mid1), - mac2='52:54:00:00:{0:02X}:02'.format(mid2), - vif1_mac=vif1_mac, vif2_mac=vif2_mac) + mac1=f"52:54:00:00:{mid1:02X}:01", + mac2=f"52:54:00:00:{mid2:02X}:02", + vif1_mac=vif1_mac, vif2_mac=vif2_mac + ) def stop_all_containers(self): """Stop all containers.""" @@ -341,7 +351,7 @@ class ContainerManager(object): self.engine.destroy() -class ContainerEngine(object): +class ContainerEngine: """Abstract class for container engine.""" def __init__(self): @@ -395,61 +405,67 @@ class ContainerEngine(object): def install_supervisor(self): """Install supervisord inside a container.""" if isinstance(self, LXC): - self.execute('sleep 3; apt-get update') - self.execute('apt-get install -y supervisor') - self.execute('echo "{config}" > {config_file} && ' - 'supervisord -c {config_file}'. - format( - config='[unix_http_server]\n' - 'file = /tmp/supervisor.sock\n\n' - '[rpcinterface:supervisor]\n' - 'supervisor.rpcinterface_factory = supervisor.' - 'rpcinterface:make_main_rpcinterface\n\n' - '[supervisorctl]\n' - 'serverurl = unix:///tmp/supervisor.sock\n\n' - '[supervisord]\n' - 'pidfile = /tmp/supervisord.pid\n' - 'identifier = supervisor\n' - 'directory = /tmp\n' - 'logfile = /tmp/supervisord.log\n' - 'loglevel = debug\n' - 'nodaemon = false\n\n', - config_file=SUPERVISOR_CONF)) + self.execute(u"sleep 3; apt-get update") + self.execute(u"apt-get install -y supervisor") + config = \ + u"[unix_http_server]\n" \ + u"file = /tmp/supervisor.sock\n\n" \ + u"[rpcinterface:supervisor]\n" \ + u"supervisor.rpcinterface_factory = " \ + u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \ + u"[supervisorctl]\n" \ + u"serverurl = unix:///tmp/supervisor.sock\n\n" \ + u"[supervisord]\n" \ + u"pidfile = /tmp/supervisord.pid\n" \ + u"identifier = supervisor\n" \ + u"directory = /tmp\n" \ + u"logfile = /tmp/supervisord.log\n" \ + u"loglevel = debug\n" \ + u"nodaemon = false\n\n" + self.execute( + f'echo "{config}" > {SUPERVISOR_CONF} && ' + f'supervisord -c {SUPERVISOR_CONF}' + ) def start_vpp(self): """Start VPP inside a container.""" - self.execute('echo "{config}" >> {config_file} && ' - 'supervisorctl reload'. - format( - config='[program:vpp]\n' - 'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n' - 'autostart = false\n' - 'autorestart = false\n' - 'redirect_stderr = true\n' - 'priority = 1', - config_file=SUPERVISOR_CONF)) - self.execute('supervisorctl start vpp') + config = \ + u"[program:vpp]\n" \ + u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \ + u"autostart = false\n" \ + u"autorestart = false\n" \ + u"redirect_stderr = true\n" \ + u"priority = 1" + self.execute( + f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload' + ) + self.execute(u"supervisorctl start vpp") + + # pylint: disable=import-outside-toplevel from robot.libraries.BuiltIn import BuiltIn topo_instance = BuiltIn().get_library_instance( - 'resources.libraries.python.topology.Topology') + u"resources.libraries.python.topology.Topology" + ) topo_instance.add_new_socket( self.container.node, SocketType.PAPI, self.container.name, - '{root}/tmp/vpp_sockets/{name}/api.sock'. - format(root=self.container.root, name=self.container.name)) + f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/" + f"api.sock" + ) topo_instance.add_new_socket( self.container.node, SocketType.STATS, self.container.name, - '{root}/tmp/vpp_sockets/{name}/stats.sock'. - format(root=self.container.root, name=self.container.name)) + f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/" + f"stats.sock" + ) def restart_vpp(self): """Restart VPP service inside a container.""" - self.execute('supervisorctl restart vpp') - self.execute('cat /tmp/supervisord.log') + self.execute(u"supervisorctl restart vpp") + self.execute(u"cat /tmp/supervisord.log") def create_base_vpp_startup_config(self): """Create base startup configuration of VPP on container. @@ -464,14 +480,14 @@ class ContainerEngine(object): vpp_config.set_node(self.container.node) vpp_config.add_unix_cli_listen() vpp_config.add_unix_nodaemon() - vpp_config.add_unix_exec('/tmp/running.exec') + vpp_config.add_unix_exec(u"/tmp/running.exec") vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH) - vpp_config.add_statseg_per_node_counters(value='on') + vpp_config.add_statseg_per_node_counters(value=u"on") # We will pop the first core from the list to be a main core vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0))) # If more cores in the list, the rest will be used as workers. if cpuset_cpus: - corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus) + corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus) vpp_config.add_cpu_corelist_workers(corelist_workers) return vpp_config @@ -480,12 +496,14 @@ class ContainerEngine(object): """Create startup configuration of VPP without DPDK on container. """ vpp_config = self.create_base_vpp_startup_config() - vpp_config.add_plugin('disable', 'dpdk_plugin.so') + vpp_config.add_plugin(u"disable", u"dpdk_plugin.so") # Apply configuration - self.execute('mkdir -p /etc/vpp/') - self.execute('echo "{config}" | tee /etc/vpp/startup.conf' - .format(config=vpp_config.get_config_str())) + self.execute(u"mkdir -p /etc/vpp/") + self.execute( + f'echo "{vpp_config.get_config_str()}" | ' + f'tee /etc/vpp/startup.conf' + ) def create_vpp_startup_config_dpdk_dev(self, *devices): """Create startup configuration of VPP with DPDK on container. @@ -496,15 +514,17 @@ class ContainerEngine(object): vpp_config = self.create_base_vpp_startup_config() vpp_config.add_dpdk_dev(*devices) vpp_config.add_dpdk_no_tx_checksum_offload() - vpp_config.add_dpdk_log_level('debug') - vpp_config.add_plugin('disable', 'default') - vpp_config.add_plugin('enable', 'dpdk_plugin.so') - vpp_config.add_plugin('enable', 'memif_plugin.so') + vpp_config.add_dpdk_log_level(u"debug") + vpp_config.add_plugin(u"disable", u"default") + vpp_config.add_plugin(u"enable", u"dpdk_plugin.so") + vpp_config.add_plugin(u"enable", u"memif_plugin.so") # Apply configuration - self.execute('mkdir -p /etc/vpp/') - self.execute('echo "{config}" | tee /etc/vpp/startup.conf' - .format(config=vpp_config.get_config_str())) + self.execute(u"mkdir -p /etc/vpp/") + self.execute( + f'echo "{vpp_config.get_config_str()}" | ' + f'tee /etc/vpp/startup.conf' + ) def create_vpp_startup_config_func_dev(self): """Create startup configuration of VPP on container for functional @@ -515,15 +535,17 @@ class ContainerEngine(object): vpp_config.set_node(self.container.node) vpp_config.add_unix_cli_listen() vpp_config.add_unix_nodaemon() - vpp_config.add_unix_exec('/tmp/running.exec') + vpp_config.add_unix_exec(u"/tmp/running.exec") vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH) - vpp_config.add_statseg_per_node_counters(value='on') - vpp_config.add_plugin('disable', 'dpdk_plugin.so') + vpp_config.add_statseg_per_node_counters(value=u"on") + vpp_config.add_plugin(u"disable", u"dpdk_plugin.so") # Apply configuration - self.execute('mkdir -p /etc/vpp/') - self.execute('echo "{config}" | tee /etc/vpp/startup.conf' - .format(config=vpp_config.get_config_str())) + self.execute(u"mkdir -p /etc/vpp/") + self.execute( + f'echo "{vpp_config.get_config_str()}" | ' + f'tee /etc/vpp/startup.conf' + ) def create_vpp_exec_config(self, template_file, **kwargs): """Create VPP exec configuration on container. @@ -533,15 +555,13 @@ class ContainerEngine(object): :type template_file: str :type kwargs: dict """ - running = '/tmp/running.exec' + running = u"/tmp/running.exec" - template = '{res}/{tpl}'.format( - res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file) + template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}" - with open(template, 'r') as src_file: + with open(template, "r") as src_file: src = Template(src_file.read()) - self.execute('echo "{out}" > {running}'.format( - out=src.safe_substitute(**kwargs), running=running)) + self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}') def is_container_running(self): """Check if container is running.""" @@ -566,29 +586,34 @@ class ContainerEngine(object): :raises RuntimeError: If applying cgroup settings via cgset failed. """ ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgset -r cpuset.cpu_exclusive=0 /') + u"cgset -r cpuset.cpu_exclusive=0 /" + ) if int(ret) != 0: - raise RuntimeError('Failed to apply cgroup settings.') + raise RuntimeError(u"Failed to apply cgroup settings.") ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgset -r cpuset.mem_exclusive=0 /') + u"cgset -r cpuset.mem_exclusive=0 /" + ) if int(ret) != 0: - raise RuntimeError('Failed to apply cgroup settings.') + raise RuntimeError(u"Failed to apply cgroup settings.") ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgcreate -g cpuset:/{name}'.format(name=name)) + f"cgcreate -g cpuset:/{name}" + ) if int(ret) != 0: - raise RuntimeError('Failed to copy cgroup settings from root.') + raise RuntimeError(u"Failed to copy cgroup settings from root.") ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name)) + f"cgset -r cpuset.cpu_exclusive=0 /{name}" + ) if int(ret) != 0: - raise RuntimeError('Failed to apply cgroup settings.') + raise RuntimeError(u"Failed to apply cgroup settings.") ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name)) + f"cgset -r cpuset.mem_exclusive=0 /{name}" + ) if int(ret) != 0: - raise RuntimeError('Failed to apply cgroup settings.') + raise RuntimeError(u"Failed to apply cgroup settings.") class LXC(ContainerEngine): @@ -611,21 +636,21 @@ class LXC(ContainerEngine): else: return - target_arch = 'arm64' \ - if Topology.get_node_arch(self.container.node) == 'aarch64' \ - else 'amd64' + target_arch = u"arm64" \ + if Topology.get_node_arch(self.container.node) == u"aarch64" \ + else u"amd64" - image = self.container.image if self.container.image else\ - "-d ubuntu -r bionic -a {arch}".format(arch=target_arch) + image = self.container.image if self.container.image \ + else f"-d ubuntu -r bionic -a {target_arch}" - cmd = 'lxc-create -t download --name {c.name} -- {image} '\ - '--no-validate'.format(c=self.container, image=image) + cmd = f"lxc-create -t download --name {self.container.name} " \ + f"-- {image} --no-validate" ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800) if int(ret) != 0: - raise RuntimeError('Failed to create container.') + raise RuntimeError(u"Failed to create container.") - self._configure_cgroup('lxc') + self._configure_cgroup(u"lxc") def create(self): """Create/deploy an application inside a container on system. @@ -635,54 +660,61 @@ class LXC(ContainerEngine): if self.container.mnt: # LXC fix for tmpfs # https://github.com/lxc/lxc/issues/434 + mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults" ret, _, _ = self.container.ssh.exec_command_sudo( - "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'". - format(e="lxc.mount.entry = tmpfs run tmpfs defaults", - c=self.container)) + f"sh -c \"echo '{mnt_e}' >> " + f"/var/lib/lxc/{self.container.name}/config\"" + ) if int(ret) != 0: - raise RuntimeError('Failed to write {c.name} config.'. - format(c=self.container)) + raise RuntimeError( + f"Failed to write {self.container.name} config." + ) for mount in self.container.mnt: - host_dir, guest_dir = mount.split(':') - options = 'bind,create=dir' \ - if guest_dir.endswith('/') else 'bind,create=file' - entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \ - '{options} 0 0'.format( - host_dir=host_dir, guest_dir=guest_dir[1:], - options=options) + host_dir, guest_dir = mount.split(u":") + options = u"bind,create=dir" if guest_dir.endswith(u"/") \ + else u"bind,create=file" + entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \ + f"none {options} 0 0" self.container.ssh.exec_command_sudo( - "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir)) + f"sh -c \"mkdir -p {host_dir}\"" + ) ret, _, _ = self.container.ssh.exec_command_sudo( - "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'". - format(e=entry, c=self.container)) + f"sh -c \"echo '{entry}' " + f">> /var/lib/lxc/{self.container.name}/config\"" + ) if int(ret) != 0: - raise RuntimeError('Failed to write {c.name} config.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to write {self.container.name} config." + ) - cpuset_cpus = '{0}'.format( - ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\ - if self.container.cpuset_cpus else '' + cpuset_cpus = u",".join( + f"{cpu!s}" for cpu in self.container.cpuset_cpus) \ + if self.container.cpuset_cpus else u"" ret, _, _ = self.container.ssh.exec_command_sudo( - 'lxc-start --name {c.name} --daemon'.format(c=self.container)) + f"lxc-start --name {self.container.name} --daemon" + ) if int(ret) != 0: - raise RuntimeError('Failed to start container {c.name}.'. - format(c=self.container)) - self._lxc_wait('RUNNING') + raise RuntimeError( + f"Failed to start container {self.container.name}." + ) + self._lxc_wait(u"RUNNING") # Workaround for LXC to be able to allocate all cpus including isolated. ret, _, _ = self.container.ssh.exec_command_sudo( - 'cgset --copy-from / lxc/') + u"cgset --copy-from / lxc/" + ) if int(ret) != 0: - raise RuntimeError('Failed to copy cgroup to LXC') + raise RuntimeError(u"Failed to copy cgroup to LXC") ret, _, _ = self.container.ssh.exec_command_sudo( - 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'. - format(c=self.container, cpus=cpuset_cpus)) + f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}" + ) if int(ret) != 0: - raise RuntimeError('Failed to set cpuset.cpus to container ' - '{c.name}.'.format(c=self.container)) + raise RuntimeError( + f"Failed to set cpuset.cpus to container {self.container.name}." + ) def execute(self, command): """Start a process inside a running container. @@ -694,65 +726,69 @@ class LXC(ContainerEngine): :type command: str :raises RuntimeError: If running the command failed. """ - env = '--keep-env {0}'.format( - ' '.join('--set-var %s' % env for env in self.container.env))\ - if self.container.env else '' + env = u"--keep-env " + u" ".join( + f"--set-var {env!s}" for env in self.container.env) \ + if self.container.env else u"" - cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\ - "exit $?'".format(env=env, c=self.container, command=command) + cmd = f"lxc-attach {env} --name {self.container.name} " \ + f"-- /bin/sh -c '{command}; exit $?'" ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180) if int(ret) != 0: - raise RuntimeError('Failed to run command inside container ' - '{c.name}.'.format(c=self.container)) + raise RuntimeError( + f"Failed to run command inside container {self.container.name}." + ) def stop(self): """Stop a container. :raises RuntimeError: If stopping the container failed. """ - cmd = 'lxc-stop --name {c.name}'.format(c=self.container) + cmd = f"lxc-stop --name {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to stop container {c.name}.' - .format(c=self.container)) - self._lxc_wait('STOPPED|FROZEN') + raise RuntimeError( + f"Failed to stop container {self.container.name}." + ) + self._lxc_wait(u"STOPPED|FROZEN") def destroy(self): """Destroy a container. :raises RuntimeError: If destroying container failed. """ - cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container) + cmd = f"lxc-destroy --force --name {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to destroy container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to destroy container {self.container.name}." + ) def info(self): """Query and shows information about a container. :raises RuntimeError: If getting info about a container failed. """ - cmd = 'lxc-info --name {c.name}'.format(c=self.container) + cmd = f"lxc-info --name {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get info about container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to get info about container {self.container.name}." + ) def system_info(self): """Check the current kernel for LXC support. :raises RuntimeError: If checking LXC support failed. """ - cmd = 'lxc-checkconfig' + cmd = u"lxc-checkconfig" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to check LXC support.') + raise RuntimeError(u"Failed to check LXC support.") def is_container_running(self): """Check if container is running on node. @@ -761,14 +797,14 @@ class LXC(ContainerEngine): :rtype: bool :raises RuntimeError: If getting info about a container failed. """ - cmd = 'lxc-info --no-humanize --state --name {c.name}'\ - .format(c=self.container) + cmd = f"lxc-info --no-humanize --state --name {self.container.name}" ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get info about container {c.name}.' - .format(c=self.container)) - return True if 'RUNNING' in stdout else False + raise RuntimeError( + f"Failed to get info about container {self.container.name}." + ) + return u"RUNNING" in stdout def is_container_present(self): """Check if container is existing on node. @@ -777,10 +813,10 @@ class LXC(ContainerEngine): :rtype: bool :raises RuntimeError: If getting info about a container failed. """ - cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container) + cmd = f"lxc-info --no-humanize --name {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) - return False if int(ret) else True + return not ret def _lxc_wait(self, state): """Wait for a specific container state. @@ -789,13 +825,14 @@ class LXC(ContainerEngine): :type state: str :raises RuntimeError: If waiting for state of a container failed. """ - cmd = 'lxc-wait --name {c.name} --state "{s}"'\ - .format(c=self.container, s=state) + cmd = f"lxc-wait --name {self.container.name} --state '{state}'" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to wait for state "{s}" of container ' - '{c.name}.'.format(s=state, c=self.container)) + raise RuntimeError( + f"Failed to wait for state '{state}' " + f"of container {self.container.name}." + ) class Docker(ContainerEngine): @@ -818,61 +855,58 @@ class Docker(ContainerEngine): if not self.container.image: img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \ - if Topology.get_node_arch(self.container.node) == 'aarch64' \ + if Topology.get_node_arch(self.container.node) == u"aarch64" \ else Constants.DOCKER_SUT_IMAGE_UBUNTU - setattr(self.container, 'image', img) + setattr(self.container, u"image", img) - cmd = 'docker pull {image}'.format(image=self.container.image) + cmd = f"docker pull {self.container.image}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800) if int(ret) != 0: - raise RuntimeError('Failed to create container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to create container {self.container.name}." + ) if self.container.cpuset_cpus: - self._configure_cgroup('docker') + self._configure_cgroup(u"docker") def create(self): """Create/deploy container. :raises RuntimeError: If creating a container failed. """ - cpuset_cpus = '--cpuset-cpus={0}'.format( - ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\ - if self.container.cpuset_cpus else '' + cpuset_cpus = u"--cpuset-cpus=" + u",".join( + f"{cpu!s}" for cpu in self.container.cpuset_cpus) \ + if self.container.cpuset_cpus else u"" - cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\ - if self.container.cpuset_mems is not None else '' + cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \ + if self.container.cpuset_mems is not None else u"" # Temporary workaround - disabling due to bug in memif - cpuset_mems = '' + cpuset_mems = u"" - env = '{0}'.format( - ' '.join('--env %s' % env for env in self.container.env))\ - if self.container.env else '' + env = u" ".join(f"--env {env!s}" for env in self.container.env) \ + if self.container.env else u"" - command = '{0}'.format(self.container.command)\ - if self.container.command else '' + command = str(self.container.command) if self.container.command else u"" - publish = '{0}'.format( - ' '.join('--publish %s' % var for var in self.container.publish))\ - if self.container.publish else '' + publish = u" ".join( + f"--publish {var!s}" for var in self.container.publish + ) if self.container.publish else u"" - volume = '{0}'.format( - ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\ - if self.container.mnt else '' + volume = u" ".join( + f"--volume {mnt!s}" for mnt in self.container.mnt) \ + if self.container.mnt else u"" - cmd = 'docker run '\ - '--privileged --detach --interactive --tty --rm '\ - '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\ - '{env} {volume} --name {container.name} {container.image} '\ - '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems, - container=self.container, command=command, - env=env, publish=publish, volume=volume) + cmd = f"docker run --privileged --detach --interactive --tty --rm " \ + f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \ + f"{env} {volume} --name {self.container.name} " \ + f"{self.container.image} {command}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to create container {c.name}' - .format(c=self.container)) + raise RuntimeError( + f"Failed to create container {self.container.name}" + ) self.info() @@ -886,60 +920,64 @@ class Docker(ContainerEngine): :type command: str :raises RuntimeError: If running the command in a container failed. """ - cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\ - "exit $?'".format(c=self.container, command=command) + cmd = f"docker exec --interactive {self.container.name} " \ + f"/bin/sh -c '{command}; exit $?'" ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180) if int(ret) != 0: - raise RuntimeError('Failed to execute command in container ' - '{c.name}.'.format(c=self.container)) + raise RuntimeError( + f"Failed to execute command in container {self.container.name}." + ) def stop(self): """Stop running container. :raises RuntimeError: If stopping a container failed. """ - cmd = 'docker stop {c.name}'.format(c=self.container) + cmd = f"docker stop {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to stop container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to stop container {self.container.name}." + ) def destroy(self): """Remove a container. :raises RuntimeError: If removing a container failed. """ - cmd = 'docker rm --force {c.name}'.format(c=self.container) + cmd = f"docker rm --force {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to destroy container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to destroy container {self.container.name}." + ) def info(self): """Return low-level information on Docker objects. :raises RuntimeError: If getting info about a container failed. """ - cmd = 'docker inspect {c.name}'.format(c=self.container) + cmd = f"docker inspect {self.container.name}" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get info about container {c.name}.' - .format(c=self.container)) + raise RuntimeError( + f"Failed to get info about container {self.container.name}." + ) def system_info(self): """Display the docker system-wide information. :raises RuntimeError: If displaying system information failed. """ - cmd = 'docker system info' + cmd = u"docker system info" ret, _, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get system info.') + raise RuntimeError(u"Failed to get system info.") def is_container_present(self): """Check if container is present on node. @@ -948,14 +986,14 @@ class Docker(ContainerEngine): :rtype: bool :raises RuntimeError: If getting info about a container failed. """ - cmd = 'docker ps --all --quiet --filter name={c.name}'\ - .format(c=self.container) + cmd = f"docker ps --all --quiet --filter name={self.container.name}" ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get info about container {c.name}.' - .format(c=self.container)) - return True if stdout else False + raise RuntimeError( + f"Failed to get info about container {self.container.name}." + ) + return bool(stdout) def is_container_running(self): """Check if container is running on node. @@ -964,23 +1002,19 @@ class Docker(ContainerEngine): :rtype: bool :raises RuntimeError: If getting info about a container failed. """ - cmd = 'docker ps --quiet --filter name={c.name}'\ - .format(c=self.container) + cmd = f"docker ps --quiet --filter name={self.container.name}" ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd) if int(ret) != 0: - raise RuntimeError('Failed to get info about container {c.name}.' - .format(c=self.container)) - return True if stdout else False + raise RuntimeError( + f"Failed to get info about container {self.container.name}." + ) + return bool(stdout) -class Container(object): +class Container: """Container class.""" - def __init__(self): - """Initialize Container object.""" - pass - def __getattr__(self, attr): """Get attribute custom implementation. @@ -1007,9 +1041,9 @@ class Container(object): self.__dict__[attr] except KeyError: # Creating new attribute - if attr == 'node': - self.__dict__['ssh'] = SSH() - self.__dict__['ssh'].connect(value) + if attr == u"node": + self.__dict__[u"ssh"] = SSH() + self.__dict__[u"ssh"].connect(value) self.__dict__[attr] = value else: # Updating attribute base of type diff --git a/resources/libraries/python/Cop.py b/resources/libraries/python/Cop.py index 24b0debdc4..29530b1277 100644 --- a/resources/libraries/python/Cop.py +++ b/resources/libraries/python/Cop.py @@ -17,7 +17,7 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.topology import Topology -class Cop(object): +class Cop: """COP utilities.""" @staticmethod @@ -38,17 +38,17 @@ class Cop(object): :type default_cop: int :raises ValueError: If parameter 'ip_version' has incorrect value. """ - if ip_version not in ('ip4', 'ip6'): - raise ValueError('IP version is not in correct format') + if ip_version not in (u"ip4", u"ip6"): + raise ValueError(u"IP version is not in correct format") - cmd = 'cop_whitelist_enable_disable' - err_msg = 'Failed to add COP whitelist on interface {ifc} on host' \ - ' {host}'.format(ifc=interface, host=node['host']) + cmd = u"cop_whitelist_enable_disable" + err_msg = f"Failed to add COP whitelist on interface {interface} " \ + f"on host {node[u'host']}" args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface), fib_id=int(fib_id), - ip4=True if ip_version == 'ip4' else False, - ip6=True if ip_version == 'ip6' else False, + ip4=bool(ip_version == u"ip4"), + ip6=bool(ip_version == u"ip6"), default_cop=default_cop ) @@ -68,14 +68,14 @@ class Cop(object): :raises ValueError: If parameter 'state' has incorrect value. """ state = state.lower() - if state in ('enable', 'disable'): - enable = True if state == 'enable' else False + if state in (u"enable", u"disable"): + enable = bool(state == u"enable") else: - raise ValueError("Possible state values are 'enable' or 'disable'") + raise ValueError(u"Possible state values are 'enable' or 'disable'") - cmd = 'cop_interface_enable_disable' - err_msg = 'Failed to enable/disable COP on interface {ifc} on host' \ - ' {host}'.format(ifc=interface, host=node['host']) + cmd = u"cop_interface_enable_disable" + err_msg = f"Failed to enable/disable COP on interface {interface} " \ + f"on host {node[u'host']}" args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface), enable_disable=enable diff --git a/resources/libraries/python/CoreDumpUtil.py b/resources/libraries/python/CoreDumpUtil.py index 63c6d92a4c..9d9369aa56 100644 --- a/resources/libraries/python/CoreDumpUtil.py +++ b/resources/libraries/python/CoreDumpUtil.py @@ -13,25 +13,23 @@ """Core dump library.""" -from time import time - from resources.libraries.python.Constants import Constants from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.LimitUtil import LimitUtil from resources.libraries.python.SysctlUtil import SysctlUtil -from resources.libraries.python.ssh import exec_cmd_no_error, scp_node +from resources.libraries.python.ssh import exec_cmd_no_error from resources.libraries.python.topology import NodeType -__all__ = ["CoreDumpUtil"] +__all__ = [u"CoreDumpUtil"] -class CoreDumpUtil(object): +class CoreDumpUtil: """Class contains methods for processing core dumps.""" # Use one instance of class for all tests. If the functionality should # be enabled per suite or per test case, change the scope to "TEST SUITE" or # "TEST CASE" respectively. - ROBOT_LIBRARY_SCOPE = 'GLOBAL' + ROBOT_LIBRARY_SCOPE = u"GLOBAL" def __init__(self): """Initialize CoreDumpUtil class.""" @@ -72,9 +70,9 @@ class CoreDumpUtil(object): # environment, and either have a core dump pipe handler that knows # to treat privileged core dumps with care, or specific directory # defined for catching core dumps. If a core dump happens without a - # pipe handler or fully qualifid path, a message will be emitted to + # pipe handler or fully qualified path, a message will be emitted to # syslog warning about the lack of a correct setting. - SysctlUtil.set_sysctl_value(node, 'fs.suid_dumpable', 2) + SysctlUtil.set_sysctl_value(node, u"fs.suid_dumpable", 2) # Specify a core dumpfile pattern name (for the output filename). # %p pid @@ -84,8 +82,9 @@ class CoreDumpUtil(object): # %t UNIX time of dump # %h hostname # %e executable filename (may be shortened) - SysctlUtil.set_sysctl_value(node, 'kernel.core_pattern', - Constants.KERNEL_CORE_PATTERN) + SysctlUtil.set_sysctl_value( + node, u"kernel.core_pattern", Constants.KERNEL_CORE_PATTERN + ) self._corekeeper_configured = True @@ -100,10 +99,10 @@ class CoreDumpUtil(object): """ if isinstance(pid, list): for item in pid: - LimitUtil.set_pid_limit(node, item, 'core', 'unlimited') + LimitUtil.set_pid_limit(node, item, u"core", u"unlimited") LimitUtil.get_pid_limit(node, item) else: - LimitUtil.set_pid_limit(node, pid, 'core', 'unlimited') + LimitUtil.set_pid_limit(node, pid, u"core", u"unlimited") LimitUtil.get_pid_limit(node, pid) def enable_coredump_limit_vpp_on_all_duts(self, nodes): @@ -114,7 +113,7 @@ class CoreDumpUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT and self.is_core_limit_enabled(): + if node[u"type"] == NodeType.DUT and self.is_core_limit_enabled(): vpp_pid = DUTSetup.get_vpp_pid(node) self.enable_coredump_limit(node, vpp_pid) @@ -129,18 +128,17 @@ class CoreDumpUtil(object): :type disable_on_success: bool """ for node in nodes.values(): - command = ('for f in {dir}/*.core; do ' - 'sudo gdb /usr/bin/vpp ${{f}} ' - '--eval-command="set pagination off" ' - '--eval-command="thread apply all bt" ' - '--eval-command="quit"; ' - 'sudo rm -f ${{f}}; done' - .format(dir=Constants.CORE_DUMP_DIR)) + command = f"for f in {Constants.CORE_DUMP_DIR}/*.core; do " \ + f"sudo gdb /usr/bin/vpp ${{f}} " \ + f"--eval-command=\"set pagination off\" " \ + f"--eval-command=\"thread apply all bt\" " \ + f"--eval-command=\"quit\"; " \ + f"sudo rm -f ${{f}}; done" try: exec_cmd_no_error(node, command, timeout=3600) if disable_on_success: self.set_core_limit_disabled() except RuntimeError: - # If compress was not sucessfull ignore error and skip further + # If compress was not successful ignore error and skip further # processing. continue diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index 91db83eb5c..842c16d7ef 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -19,10 +19,10 @@ from resources.libraries.python.Constants import Constants from resources.libraries.python.ssh import exec_cmd_no_error from resources.libraries.python.topology import Topology -__all__ = ["CpuUtils"] +__all__ = [u"CpuUtils"] -class CpuUtils(object): +class CpuUtils: """CPU utilities""" # Number of threads per core. @@ -54,7 +54,7 @@ class CpuUtils(object): :rtype: bool """ cpu_mems = [item[-4:] for item in cpu_info] - cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS + cpu_mems_len = len(cpu_mems) // CpuUtils.NR_OF_THREADS count = 0 for cpu_mem in cpu_mems[:cpu_mems_len]: if cpu_mem in cpu_mems[cpu_mems_len:]: @@ -71,17 +71,18 @@ class CpuUtils(object): :param nodes: DICT__nodes from Topology.DICT__nodes. :type nodes: dict :raises RuntimeError: If an ssh command retrieving cpu information - fails. + fails. """ for node in nodes.values(): - stdout, _ = exec_cmd_no_error(node, 'uname -m') - node['arch'] = stdout.strip() - stdout, _ = exec_cmd_no_error(node, 'lscpu -p') - node['cpuinfo'] = list() - for line in stdout.split("\n"): - if line and line[0] != "#": - node['cpuinfo'].append([CpuUtils.__str2int(x) for x in - line.split(",")]) + stdout, _ = exec_cmd_no_error(node, u"uname -m") + node[u"arch"] = stdout.strip() + stdout, _ = exec_cmd_no_error(node, u"lscpu -p") + node[u"cpuinfo"] = list() + for line in stdout.split(u"\n"): + if line and line[0] != u"#": + node[u"cpuinfo"].append( + [CpuUtils.__str2int(x) for x in line.split(u",")] + ) @staticmethod def cpu_node_count(node): @@ -93,11 +94,11 @@ class CpuUtils(object): :rtype: int :raises RuntimeError: If node cpuinfo is not available. """ - cpu_info = node.get("cpuinfo") + cpu_info = node.get(u"cpuinfo") if cpu_info is not None: - return node["cpuinfo"][-1][3] + 1 - else: - raise RuntimeError("Node cpuinfo not available.") + return node[u"cpuinfo"][-1][3] + 1 + + raise RuntimeError(u"Node cpuinfo not available.") @staticmethod def cpu_list_per_node(node, cpu_node, smt_used=False): @@ -115,13 +116,13 @@ class CpuUtils(object): or if SMT is not enabled. """ cpu_node = int(cpu_node) - cpu_info = node.get("cpuinfo") + cpu_info = node.get(u"cpuinfo") if cpu_info is None: - raise RuntimeError("Node cpuinfo not available.") + raise RuntimeError(u"Node cpuinfo not available.") smt_enabled = CpuUtils.is_smt_enabled(cpu_info) if not smt_enabled and smt_used: - raise RuntimeError("SMT is not enabled.") + raise RuntimeError(u"SMT is not enabled.") cpu_list = [] for cpu in cpu_info: @@ -133,13 +134,13 @@ class CpuUtils(object): if smt_enabled and not smt_used: cpu_list_len = len(cpu_list) - cpu_list = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] return cpu_list @staticmethod - def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0, - smt_used=False): + def cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False): """Return string of node related list of CPU numbers. :param node: Node dictionary with cpuinfo. @@ -160,26 +161,25 @@ class CpuUtils(object): cpu_list_len = len(cpu_list) if cpu_cnt + skip_cnt > cpu_list_len: - raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).") + raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).") if cpu_cnt == 0: cpu_cnt = cpu_list_len - skip_cnt if smt_used: - cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] - cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] - cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]] - cpu_list_ex = [cpu for cpu in - cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:] + cpu_list = cpu_list_0[skip_cnt:skip_cnt + cpu_cnt] + cpu_list_ex = cpu_list_1[skip_cnt:skip_cnt + cpu_cnt] cpu_list.extend(cpu_list_ex) else: - cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list = cpu_list[skip_cnt:skip_cnt + cpu_cnt] return cpu_list @staticmethod - def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", - smt_used=False): + def cpu_list_per_node_str( + node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False): """Return string of node related list of CPU numbers. :param node: Node dictionary with cpuinfo. @@ -197,15 +197,15 @@ class CpuUtils(object): :returns: Cpu numbers related to numa from argument. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=cpu_cnt, - smt_used=smt_used) + cpu_list = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=smt_used + ) return sep.join(str(cpu) for cpu in cpu_list) @staticmethod - def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-", - smt_used=False): + def cpu_range_per_node_str( + node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False): """Return string of node related range of CPU numbers, e.g. 0-4. :param node: Node dictionary with cpuinfo. @@ -223,27 +223,25 @@ class CpuUtils(object): :returns: String of node related range of CPU numbers. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, - skip_cnt=skip_cnt, - cpu_cnt=cpu_cnt, - smt_used=smt_used) + cpu_list = CpuUtils.cpu_slice_of_list_per_node( + node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, + smt_used=smt_used + ) if smt_used: cpu_list_len = len(cpu_list) - cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] - cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] - cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep, - cpu_list_0[-1], - cpu_list_1[0], sep, - cpu_list_1[-1]) + cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:] + cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \ + f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}" else: - cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1]) + cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}" return cpu_range @staticmethod - def cpu_slice_of_list_for_nf(node, cpu_node, nf_chains=1, nf_nodes=1, - nf_chain=1, nf_node=1, nf_dtc=1, nf_mtcr=2, - nf_dtcr=1, skip_cnt=0): + def cpu_slice_of_list_for_nf( + node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, + nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0): """Return list of DUT node related list of CPU numbers. The main computing unit is physical core count. @@ -253,9 +251,9 @@ class CpuUtils(object): :param nf_nodes: Number of NF nodes in chain. :param nf_chain: Chain number indexed from 1. :param nf_node: Node number indexed from 1. - :param nf_dtc: Amount of physical cores for NF dataplane. + :param nf_dtc: Amount of physical cores for NF data plane. :param nf_mtcr: NF main thread per core ratio. - :param nf_dtcr: NF dataplane thread per core ratio. + :param nf_dtcr: NF data plane thread per core ratio. :param skip_cnt: Skip first "skip_cnt" CPUs. :type node: dict :param cpu_node: int. @@ -273,18 +271,18 @@ class CpuUtils(object): placement is not possible due to wrong parameters. """ if not 1 <= nf_chain <= nf_chains: - raise RuntimeError("ChainID is out of range!") + raise RuntimeError(u"ChainID is out of range!") if not 1 <= nf_node <= nf_nodes: - raise RuntimeError("NodeID is out of range!") + raise RuntimeError(u"NodeID is out of range!") - smt_used = CpuUtils.is_smt_enabled(node['cpuinfo']) + smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"]) cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) # CPU thread sibling offset. - sib = len(cpu_list) / CpuUtils.NR_OF_THREADS + sib = len(cpu_list) // CpuUtils.NR_OF_THREADS dtc_is_integer = isinstance(nf_dtc, int) if not smt_used and not dtc_is_integer: - raise RuntimeError("Cannot allocate if SMT is not enabled!") + raise RuntimeError(u"Cannot allocate if SMT is not enabled!") # TODO: Please reword the following todo if it is still relevant # TODO: Workaround as we are using physical core as main unit, we must # adjust number of physical dataplane cores in case of float for further @@ -298,7 +296,7 @@ class CpuUtils(object): dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)): - raise RuntimeError("Not enough CPU cores available for placement!") + raise RuntimeError(u"Not enough CPU cores available for placement!") offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes mt_skip = skip_cnt + (offset % mt_req) @@ -319,8 +317,9 @@ class CpuUtils(object): return result @staticmethod - def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, - nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1): + def get_affinity_nf( + nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1, + vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1): """Get affinity of NF (network function). Result will be used to compute the amount of CPUs and also affinity. @@ -331,10 +330,10 @@ class CpuUtils(object): :param nf_nodes: Number of NF nodes in chain. :param nf_chain: Chain number indexed from 1. :param nf_node: Node number indexed from 1. - :param vs_dtc: Amount of physical cores for vswitch dataplane. - :param nf_dtc: Amount of physical cores for NF dataplane. + :param vs_dtc: Amount of physical cores for vswitch data plane. + :param nf_dtc: Amount of physical cores for NF data plane. :param nf_mtcr: NF main thread per core ratio. - :param nf_dtcr: NF dataplane thread per core ratio. + :param nf_dtcr: NF data plane thread per core ratio. :type nodes: dict :type node: dict :type nf_chains: int @@ -350,11 +349,9 @@ class CpuUtils(object): """ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc - interface_list = [] - interface_list.append( - BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node))) - interface_list.append( - BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node))) + interface_list = list() + interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}")) + interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}")) cpu_node = Topology.get_interfaces_numa_node( nodes[node], *interface_list) @@ -362,5 +359,5 @@ class CpuUtils(object): return CpuUtils.cpu_slice_of_list_for_nf( node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains, nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node, - nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt) - + nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt + ) diff --git a/resources/libraries/python/DMM/DMMConstants.py b/resources/libraries/python/DMM/DMMConstants.py index 265320a1d8..c4899df84a 100644 --- a/resources/libraries/python/DMM/DMMConstants.py +++ b/resources/libraries/python/DMM/DMMConstants.py @@ -13,7 +13,7 @@ """This file defines the constants variables for the DMM test.""" -class DMMConstants(object): +class DMMConstants: """Define the directory path for the DMM test.""" # DMM testing directory location at topology nodes diff --git a/resources/libraries/python/DMM/SetupDMMTest.py b/resources/libraries/python/DMM/SetupDMMTest.py index 993e11924e..d84cbd544f 100644 --- a/resources/libraries/python/DMM/SetupDMMTest.py +++ b/resources/libraries/python/DMM/SetupDMMTest.py @@ -139,7 +139,7 @@ def setup_node(args): logger.console('Setup of node {0} done'.format(node['host'])) return True -class SetupDMMTest(object): +class SetupDMMTest: """Setup suite run on topology nodes. Many VAT/CLI based tests need the scripts at remote hosts before executing diff --git a/resources/libraries/python/DMM/SingleCliSer.py b/resources/libraries/python/DMM/SingleCliSer.py index 8d7b648ea4..aa77eaa445 100644 --- a/resources/libraries/python/DMM/SingleCliSer.py +++ b/resources/libraries/python/DMM/SingleCliSer.py @@ -25,7 +25,7 @@ from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error from resources.libraries.python.DMM.DMMConstants import DMMConstants as con from resources.libraries.python.topology import Topology -class SingleCliSer(object): +class SingleCliSer: """Test DMM with single client-server topology.""" @staticmethod diff --git a/resources/libraries/python/DPDK/DPDKTools.py b/resources/libraries/python/DPDK/DPDKTools.py index d7a780b223..ecb23fb4c3 100644 --- a/resources/libraries/python/DPDK/DPDKTools.py +++ b/resources/libraries/python/DPDK/DPDKTools.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,12 +16,12 @@ from robot.api import logger -from resources.libraries.python.ssh import SSH, exec_cmd_no_error from resources.libraries.python.Constants import Constants +from resources.libraries.python.ssh import SSH, exec_cmd_no_error from resources.libraries.python.topology import NodeType, Topology -class DPDKTools(object): +class DPDKTools: """This class implements: - Initialization of DPDK environment, - Cleanup of DPDK environment. @@ -41,7 +41,7 @@ class DPDKTools(object): :type dut_if2: str :raises RuntimeError: If it fails to bind the interfaces to igb_uio. """ - if dut_node['type'] == NodeType.DUT: + if dut_node[u"type"] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) @@ -49,17 +49,15 @@ class DPDKTools(object): ssh.connect(dut_node) arch = Topology.get_node_arch(dut_node) - cmd = '{fwdir}/tests/dpdk/dpdk_scripts/init_dpdk.sh '\ - '{pci1} {pci2} {arch}'.format(fwdir=Constants.REMOTE_FW_DIR, - pci1=pci_address1, - pci2=pci_address2, - arch=arch) + cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \ + f"/init_dpdk.sh {pci_address1} {pci_address2} {arch}" ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: - raise RuntimeError('Failed to bind the interfaces to igb_uio ' - 'at node {name}'.\ - format(name=dut_node['host'])) + raise RuntimeError( + f"Failed to bind the interfaces to igb_uio at node " + f"{dut_node['host']}" + ) @staticmethod def cleanup_dpdk_environment(dut_node, dut_if1, dut_if2): @@ -75,7 +73,7 @@ class DPDKTools(object): :type dut_if2: str :raises RuntimeError: If it fails to cleanup the dpdk. """ - if dut_node['type'] == NodeType.DUT: + if dut_node[u"type"] == NodeType.DUT: pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1) if1_driver = Topology.get_interface_driver(dut_node, dut_if1) pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2) @@ -84,15 +82,15 @@ class DPDKTools(object): ssh = SSH() ssh.connect(dut_node) - cmd = '{fwdir}/tests/dpdk/dpdk_scripts/cleanup_dpdk.sh ' \ - '{drv1} {pci1} {drv2} {pci2}'.\ - format(fwdir=Constants.REMOTE_FW_DIR, drv1=if1_driver, - pci1=pci_address1, drv2=if2_driver, pci2=pci_address2) + cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \ + f"/cleanup_dpdk.sh {if1_driver} {pci_address1} {if2_driver} " \ + f"{pci_address2}" ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: - raise RuntimeError('Failed to cleanup the dpdk at node {name}'. - format(name=dut_node['host'])) + raise RuntimeError( + f"Failed to cleanup the dpdk at node {dut_node[u'host']}" + ) @staticmethod def install_dpdk_test(node): @@ -106,17 +104,16 @@ class DPDKTools(object): """ arch = Topology.get_node_arch(node) - command = ('{fwdir}/tests/dpdk/dpdk_scripts/install_dpdk.sh {arch}'. - format(fwdir=Constants.REMOTE_FW_DIR, arch=arch)) - message = 'Install the DPDK failed!' + command = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \ + f"/install_dpdk.sh {arch}" + message = u"Install the DPDK failed!" exec_cmd_no_error(node, command, timeout=600, message=message) - command = ('cat {fwdir}/dpdk*/VERSION'. - format(fwdir=Constants.REMOTE_FW_DIR)) - message = 'Get DPDK version failed!' + command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION" + message = u"Get DPDK version failed!" stdout, _ = exec_cmd_no_error(node, command, message=message) - logger.info('DPDK Version: {version}'.format(version=stdout)) + logger.info(f"DPDK Version: {stdout}") @staticmethod def install_dpdk_test_on_all_duts(nodes): @@ -127,6 +124,6 @@ class DPDKTools(object): :type nodes: dict :returns: nothing """ - for node in nodes.values(): - if node['type'] == NodeType.DUT: + for node in list(nodes.values()): + if node[u"type"] == NodeType.DUT: DPDKTools.install_dpdk_test(node) diff --git a/resources/libraries/python/DPDK/L2fwdTest.py b/resources/libraries/python/DPDK/L2fwdTest.py index 70ca93c512..56a055cfc5 100644 --- a/resources/libraries/python/DPDK/L2fwdTest.py +++ b/resources/libraries/python/DPDK/L2fwdTest.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -15,17 +15,17 @@ DUT nodes. """ -from resources.libraries.python.ssh import SSH from resources.libraries.python.Constants import Constants +from resources.libraries.python.ssh import SSH from resources.libraries.python.topology import NodeType, Topology -class L2fwdTest(object): +class L2fwdTest: """Setup the DPDK for l2fwd performance test.""" @staticmethod - def start_the_l2fwd_test(dut_node, cpu_cores, nb_cores, queue_nums, - jumbo_frames): + def start_the_l2fwd_test( + dut_node, cpu_cores, nb_cores, queue_nums, jumbo_frames): """ Execute the l2fwd on the dut_node. @@ -42,19 +42,18 @@ class L2fwdTest(object): :type jumbo_frames: bool :raises RuntimeError: If the script "run_l2fwd.sh" fails. """ - if dut_node['type'] == NodeType.DUT: + if dut_node[u"type"] == NodeType.DUT: ssh = SSH() ssh.connect(dut_node) arch = Topology.get_node_arch(dut_node) - jumbo = 'yes' if jumbo_frames else 'no' - cmd = '{fwdir}/tests/dpdk/dpdk_scripts/run_l2fwd.sh {cpu_cores} ' \ - '{nb_cores} {queues} {jumbo} {arch}'.\ - format(fwdir=Constants.REMOTE_FW_DIR, cpu_cores=cpu_cores, - nb_cores=nb_cores, queues=queue_nums, - jumbo=jumbo, arch=arch) + jumbo = u"yes" if jumbo_frames else u"no" + cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \ + f"/run_l2fwd.sh {cpu_cores} {nb_cores} {queue_nums} {jumbo} " \ + f"{arch}" ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: - raise RuntimeError('Failed to execute l2fwd test at node ' - '{name}'.format(name=dut_node['host'])) + raise RuntimeError( + f"Failed to execute l2fwd test at node {dut_node['host']}" + ) diff --git a/resources/libraries/python/DPDK/L3fwdTest.py b/resources/libraries/python/DPDK/L3fwdTest.py index 623075297b..0a045c01bf 100644 --- a/resources/libraries/python/DPDK/L3fwdTest.py +++ b/resources/libraries/python/DPDK/L3fwdTest.py @@ -15,17 +15,18 @@ This module exists to provide the l3fwd test for DPDK on topology nodes. """ -from resources.libraries.python.ssh import SSH from resources.libraries.python.Constants import Constants +from resources.libraries.python.ssh import SSH from resources.libraries.python.topology import NodeType, Topology -class L3fwdTest(object): +class L3fwdTest: """Test the DPDK l3fwd performance.""" @staticmethod - def start_the_l3fwd_test(nodes_info, dut_node, dut_if1, dut_if2, - nb_cores, lcores_list, queue_nums, jumbo_frames): + def start_the_l3fwd_test( + nodes_info, dut_node, dut_if1, dut_if2, nb_cores, lcores_list, + queue_nums, jumbo_frames): """ Execute the l3fwd on the dut_node. @@ -47,11 +48,12 @@ class L3fwdTest(object): :type queue_nums: str :type jumbo_frames: bool """ - if dut_node['type'] == NodeType.DUT: - adj_mac0, adj_mac1 = L3fwdTest.get_adj_mac(nodes_info, dut_node, - dut_if1, dut_if2) + if dut_node[u"type"] == NodeType.DUT: + adj_mac0, adj_mac1 = L3fwdTest.get_adj_mac( + nodes_info, dut_node, dut_if1, dut_if2 + ) - list_cores = [int(item) for item in lcores_list.split(',')] + list_cores = [int(item) for item in lcores_list.split(u",")] # prepare the port config param nb_cores = int(nb_cores) @@ -60,23 +62,22 @@ class L3fwdTest(object): for port in range(0, 2): for queue in range(0, int(queue_nums)): index = 0 if nb_cores == 1 else index - port_config += '({port}, {queue}, {core}),'.\ - format(port=port, queue=queue, core=list_cores[index]) + port_config += f"({port}, {queue}, {list_cores[index]})," index += 1 ssh = SSH() ssh.connect(dut_node) - cmd = '{fwdir}/tests/dpdk/dpdk_scripts/run_l3fwd.sh ' \ - '"{lcores}" "{ports}" {mac1} {mac2} {jumbo}'.\ - format(fwdir=Constants.REMOTE_FW_DIR, lcores=lcores_list, - ports=port_config.rstrip(','), mac1=adj_mac0, - mac2=adj_mac1, jumbo='yes' if jumbo_frames else 'no') + cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \ + f"/run_l3fwd.sh \"{lcores_list}\" " \ + f"\"{port_config.rstrip(u',')}\" " \ + f"{adj_mac0} {adj_mac1} {u'yes' if jumbo_frames else u'no'}" ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600) if ret_code != 0: - raise Exception('Failed to execute l3fwd test at node {name}' - .format(name=dut_node['host'])) + raise Exception( + f"Failed to execute l3fwd test at node {dut_node[u'host']}" + ) @staticmethod def get_adj_mac(nodes_info, dut_node, dut_if1, dut_if2): @@ -102,12 +103,14 @@ class L3fwdTest(object): # detect which is the port 0 if min(if_pci0, if_pci1) != if_pci0: if_key0, if_key1 = if_key1, if_key0 - L3fwdTest.patch_l3fwd(dut_node, 'patch_l3fwd_flip_routes') + L3fwdTest.patch_l3fwd(dut_node, u"patch_l3fwd_flip_routes") - adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( \ - nodes_info, dut_node, if_key0) - adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( \ - nodes_info, dut_node, if_key1) + adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( + nodes_info, dut_node, if_key0 + ) + adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( + nodes_info, dut_node, if_key1 + ) adj_mac0 = Topology.get_interface_mac(adj_node0, adj_if_key0) adj_mac1 = Topology.get_interface_mac(adj_node1, adj_if_key1) @@ -131,11 +134,10 @@ class L3fwdTest(object): ssh.connect(node) ret_code, _, _ = ssh.exec_command( - '{fwdir}/tests/dpdk/dpdk_scripts/patch_l3fwd.sh {arch} ' - '{fwdir}/tests/dpdk/dpdk_scripts/{patch}'. - format(fwdir=Constants.REMOTE_FW_DIR, arch=arch, patch=patch), - timeout=600) + f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts/patch_l3fwd.sh " + f"{arch} {Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts/{patch}", + timeout=600 + ) if ret_code != 0: - raise RuntimeError('Patch of l3fwd failed.') - + raise RuntimeError(u"Patch of l3fwd failed.") diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py index 9ae06c138e..1cca974893 100644 --- a/resources/libraries/python/DUTSetup.py +++ b/resources/libraries/python/DUTSetup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -20,7 +20,7 @@ from resources.libraries.python.ssh import SSH, exec_cmd_no_error from resources.libraries.python.topology import NodeType, Topology -class DUTSetup(object): +class DUTSetup: """Contains methods for setting up DUTs.""" @staticmethod @@ -32,19 +32,16 @@ class DUTSetup(object): :type node: dict :type service: str """ - if DUTSetup.running_in_container(node): - command = ('echo $(< /tmp/*supervisor*.log)') - else: - command = ('journalctl --no-pager --unit={name} ' - '--since="$(echo `systemctl show -p ' - 'ActiveEnterTimestamp {name}` | ' - 'awk \'{{print $2 $3}}\')"'. - format(name=service)) - message = 'Node {host} failed to get logs from unit {name}'.\ - format(host=node['host'], name=service) + command = u"echo $(< /tmp/*supervisor*.log)"\ + if DUTSetup.running_in_container(node) \ + else f"journalctl --no-pager --unit={service} " \ + f"--since=\"$(echo `systemctl show -p ActiveEnterTimestamp " \ + f"{service}` | awk \'{{print $2 $3}}\')\"" + message = f"Node {node[u'host']} failed to get logs from unit {service}" - exec_cmd_no_error(node, command, timeout=30, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=30, sudo=True, message=message + ) @staticmethod def get_service_logs_on_all_duts(nodes, service): @@ -56,7 +53,7 @@ class DUTSetup(object): :type service: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: DUTSetup.get_service_logs(node, service) @staticmethod @@ -68,15 +65,14 @@ class DUTSetup(object): :type node: dict :type service: str """ - if DUTSetup.running_in_container(node): - command = 'supervisorctl restart {name}'.format(name=service) - else: - command = 'service {name} restart'.format(name=service) - message = 'Node {host} failed to restart service {name}'.\ - format(host=node['host'], name=service) + command = f"supervisorctl restart {service}" \ + if DUTSetup.running_in_container(node) \ + else f"service {service} restart" + message = f"Node {node[u'host']} failed to restart service {service}" exec_cmd_no_error( - node, command, timeout=180, sudo=True, message=message) + node, command, timeout=180, sudo=True, message=message + ) DUTSetup.get_service_logs(node, service) @@ -84,13 +80,13 @@ class DUTSetup(object): def restart_service_on_all_duts(nodes, service): """Restart the named service on all DUTs. - :param node: Nodes in the topology. + :param nodes: Nodes in the topology. :param service: Service unit name. - :type node: dict + :type nodes: dict :type service: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: DUTSetup.restart_service(node, service) @staticmethod @@ -103,15 +99,14 @@ class DUTSetup(object): :type service: str """ # TODO: change command to start once all parent function updated. - if DUTSetup.running_in_container(node): - command = 'supervisorctl restart {name}'.format(name=service) - else: - command = 'service {name} restart'.format(name=service) - message = 'Node {host} failed to start service {name}'.\ - format(host=node['host'], name=service) + command = f"supervisorctl restart {service}" \ + if DUTSetup.running_in_container(node) \ + else f"service {service} restart" + message = f"Node {node[u'host']} failed to start service {service}" exec_cmd_no_error( - node, command, timeout=180, sudo=True, message=message) + node, command, timeout=180, sudo=True, message=message + ) DUTSetup.get_service_logs(node, service) @@ -119,13 +114,13 @@ class DUTSetup(object): def start_service_on_all_duts(nodes, service): """Start up the named service on all DUTs. - :param node: Nodes in the topology. + :param nodes: Nodes in the topology. :param service: Service unit name. - :type node: dict + :type nodes: dict :type service: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: DUTSetup.start_service(node, service) @staticmethod @@ -137,15 +132,14 @@ class DUTSetup(object): :type node: dict :type service: str """ - if DUTSetup.running_in_container(node): - command = 'supervisorctl stop {name}'.format(name=service) - else: - command = 'service {name} stop'.format(name=service) - message = 'Node {host} failed to stop service {name}'.\ - format(host=node['host'], name=service) + command = f"supervisorctl stop {service}" \ + if DUTSetup.running_in_container(node) \ + else f"service {service} stop" + message = f"Node {node[u'host']} failed to stop service {service}" exec_cmd_no_error( - node, command, timeout=180, sudo=True, message=message) + node, command, timeout=180, sudo=True, message=message + ) DUTSetup.get_service_logs(node, service) @@ -153,13 +147,13 @@ class DUTSetup(object): def stop_service_on_all_duts(nodes, service): """Stop the named service on all DUTs. - :param node: Nodes in the topology. + :param nodes: Nodes in the topology. :param service: Service unit name. - :type node: dict + :type nodes: dict :type service: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: DUTSetup.stop_service(node, service) @staticmethod @@ -175,28 +169,30 @@ class DUTSetup(object): ssh = SSH() ssh.connect(node) + retval = None for i in range(3): - logger.trace('Try {}: Get VPP PID'.format(i)) - ret_code, stdout, stderr = ssh.exec_command('pidof vpp') + logger.trace(f"Try {i}: Get VPP PID") + ret_code, stdout, stderr = ssh.exec_command(u"pidof vpp") if int(ret_code): - raise RuntimeError('Not possible to get PID of VPP process ' - 'on node: {0}\n {1}'. - format(node['host'], stdout + stderr)) + raise RuntimeError( + f"Not possible to get PID of VPP process on node: " + f"{node[u'host']}\n {stdout + stderr}" + ) pid_list = stdout.split() if len(pid_list) == 1: - return int(stdout) + retval = int(stdout) elif not pid_list: - logger.debug("No VPP PID found on node {0}". - format(node['host'])) + logger.debug(f"No VPP PID found on node {node[u'host']}") continue else: - logger.debug("More then one VPP PID found on node {0}". - format(node['host'])) - return [int(pid) for pid in pid_list] + logger.debug( + f"More then one VPP PID found on node {node[u'host']}" + ) + retval = [int(pid) for pid in pid_list] - return None + return retval @staticmethod def get_vpp_pids(nodes): @@ -209,8 +205,8 @@ class DUTSetup(object): """ pids = dict() for node in nodes.values(): - if node['type'] == NodeType.DUT: - pids[node['host']] = DUTSetup.get_vpp_pid(node) + if node[u"type"] == NodeType.DUT: + pids[node[u"host"]] = DUTSetup.get_vpp_pid(node) return pids @staticmethod @@ -239,8 +235,9 @@ class DUTSetup(object): # QAT is not initialized and we want to initialize with numvfs DUTSetup.crypto_device_init(node, crypto_type, numvfs) else: - raise RuntimeError('QAT device failed to create VFs on {host}'. - format(host=node['host'])) + raise RuntimeError( + f"QAT device failed to create VFs on {node[u'host']}" + ) @staticmethod def crypto_device_init(node, crypto_type, numvfs): @@ -255,15 +252,16 @@ class DUTSetup(object): :returns: nothing :raises RuntimeError: If failed to stop VPP or QAT failed to initialize. """ - if crypto_type == "HW_DH895xcc": - kernel_mod = "qat_dh895xcc" - kernel_drv = "dh895xcc" - elif crypto_type == "HW_C3xxx": - kernel_mod = "qat_c3xxx" - kernel_drv = "c3xxx" + if crypto_type == u"HW_DH895xcc": + kernel_mod = u"qat_dh895xcc" + kernel_drv = u"dh895xcc" + elif crypto_type == u"HW_C3xxx": + kernel_mod = u"qat_c3xxx" + kernel_drv = u"c3xxx" else: - raise RuntimeError('Unsupported crypto device type on {host}'. - format(host=node['host'])) + raise RuntimeError( + f"Unsupported crypto device type on {node[u'host']}" + ) pci_addr = Topology.get_cryptodev(node) @@ -274,7 +272,8 @@ class DUTSetup(object): DUTSetup.stop_service(node, Constants.VPP_UNIT) current_driver = DUTSetup.get_pci_dev_driver( - node, pci_addr.replace(':', r'\:')) + node, pci_addr.replace(u":", r"\:") + ) if current_driver is not None: DUTSetup.pci_driver_unbind(node, pci_addr) @@ -299,13 +298,13 @@ class DUTSetup(object): :rtype: int :raises RuntimeError: If failed to get Virtual Function PCI address. """ - command = "sh -c "\ - "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\ - format(pci=pf_pci_addr, vf_id=vf_id) - message = 'Failed to get virtual function PCI address.' + command = f"sh -c \"basename $(readlink " \ + f"/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id})\"" + message = u"Failed to get virtual function PCI address." - stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True, - message=message) + stdout, _ = exec_cmd_no_error( + node, command, timeout=30, sudo=True, message=message + ) return stdout.strip() @@ -321,19 +320,20 @@ class DUTSetup(object): :rtype: int :raises RuntimeError: If PCI device is not SR-IOV capable. """ - command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\ - format(pci=pf_pci_addr.replace(':', r'\:')) - message = 'PCI device {pci} is not a SR-IOV device.'.\ - format(pci=pf_pci_addr) + pci = pf_pci_addr.replace(u":", r"\:") + command = f"cat /sys/bus/pci/devices/{pci}/sriov_numvfs" + message = f"PCI device {pf_pci_addr} is not a SR-IOV device." for _ in range(3): - stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True, - message=message) + stdout, _ = exec_cmd_no_error( + node, command, timeout=30, sudo=True, message=message + ) try: sriov_numvfs = int(stdout) except ValueError: - logger.trace('Reading sriov_numvfs info failed on {host}'. - format(host=node['host'])) + logger.trace( + f"Reading sriov_numvfs info failed on {node[u'host']}" + ) else: return sriov_numvfs @@ -350,14 +350,15 @@ class DUTSetup(object): :type numvfs: int :raises RuntimeError: Failed to create VFs on PCI. """ - command = "sh -c "\ - "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\ - format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:')) - message = 'Failed to create {num} VFs on {pci} device on {host}'.\ - format(num=numvfs, pci=pf_pci_addr, host=node['host']) + pci = pf_pci_addr.replace(u":", r"\:") + command = f"sh -c \"echo {numvfs} | " \ + f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\"" + message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \ + f"on {node[u'host']}" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) @staticmethod def pci_driver_unbind(node, pci_addr): @@ -369,14 +370,14 @@ class DUTSetup(object): :type pci_addr: str :raises RuntimeError: If PCI device unbind failed. """ - command = "sh -c "\ - "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\ - format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:')) - message = 'Failed to unbind PCI device {pci} on {host}'.\ - format(pci=pci_addr, host=node['host']) + pci = pci_addr.replace(u":", r"\:") + command = f"sh -c \"echo {pci_addr} | " \ + f"tee /sys/bus/pci/devices/{pci}/driver/unbind\"" + message = f"Failed to unbind PCI device {pci_addr} on {node[u'host']}" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) @staticmethod def pci_driver_bind(node, pci_addr, driver): @@ -390,29 +391,29 @@ class DUTSetup(object): :type driver: str :raises RuntimeError: If PCI device bind failed. """ - message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\ - format(pci=pci_addr, driver=driver, host=node['host']) - - command = "sh -c "\ - "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\ - format(driver=driver, pci=pci_addr.replace(':', r'\:')) + message = f"Failed to bind PCI device {pci_addr} to {driver} " \ + f"on host {node[u'host']}" + pci = pci_addr.replace(u":", r"\:") + command = f"sh -c \"echo {driver} | " \ + f"tee /sys/bus/pci/devices/{pci}/driver_override\"" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) - command = "sh -c "\ - "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\ - format(pci=pci_addr, driver=driver) + command = f"sh -c \"echo {pci_addr} | " \ + f"tee /sys/bus/pci/drivers/{driver}/bind\"" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) - command = "sh -c "\ - "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\ - format(pci=pci_addr.replace(':', r'\:')) + command = f"sh -c \"echo | " \ + f"tee /sys/bus/pci/devices/{pci}/driver_override\"" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) @staticmethod def pci_vf_driver_unbind(node, pf_pci_addr, vf_id): @@ -427,18 +428,15 @@ class DUTSetup(object): :raises RuntimeError: If Virtual Function unbind failed. """ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) - vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\ - format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id) + pf_pci = pf_pci_addr.replace(u":", r"\:") + vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}" - command = "sh -c "\ - "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\ - format(vf_pci_addr=vf_pci_addr, vf_path=vf_path) + command = f"sh -c \"echo {vf_pci_addr} | tee {vf_path}/driver/unbind\"" + message = f"Failed to unbind VF {vf_pci_addr} on {node[u'host']}" - message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\ - format(vf_pci_addr=vf_pci_addr, host=node['host']) - - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) @staticmethod def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver): @@ -455,32 +453,29 @@ class DUTSetup(object): :raises RuntimeError: If PCI device bind failed. """ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) - vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\ - format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id) + pf_pci = pf_pci_addr.replace(u":", r'\:') + vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}" - message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\ - format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host']) + message = f"Failed to bind VF {vf_pci_addr} to {driver} " \ + f"on {node[u'host']}" + command = f"sh -c \"echo {driver} | tee {vf_path}/driver_override\"" - command = "sh -c "\ - "'echo {driver} | tee {vf_path}/driver_override'".\ - format(driver=driver, vf_path=vf_path) - - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) - command = "sh -c "\ - "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\ - format(vf_pci_addr=vf_pci_addr, driver=driver) + command = f"sh -c \"echo {vf_pci_addr} | " \ + f"tee /sys/bus/pci/drivers/{driver}/bind\"" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) - command = "sh -c "\ - "'echo | tee {vf_path}/driver_override'".\ - format(vf_path=vf_path) + command = f"sh -c \"echo | tee {vf_path}/driver_override\"" - exec_cmd_no_error(node, command, timeout=120, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=120, sudo=True, message=message + ) @staticmethod def get_pci_dev_driver(node, pci_addr): @@ -510,13 +505,12 @@ class DUTSetup(object): ssh.connect(node) for i in range(3): - logger.trace('Try number {0}: Get PCI device driver'.format(i)) + logger.trace(f"Try number {i}: Get PCI device driver") - cmd = 'lspci -vmmks {0}'.format(pci_addr) + cmd = f"lspci -vmmks {pci_addr}" ret_code, stdout, _ = ssh.exec_command(cmd) if int(ret_code): - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") for line in stdout.splitlines(): if not line: @@ -524,21 +518,22 @@ class DUTSetup(object): name = None value = None try: - name, value = line.split("\t", 1) + name, value = line.split(u"\t", 1) except ValueError: - if name == "Driver:": + if name == u"Driver:": return None - if name == 'Driver:': + if name == u"Driver:": return value if i < 2: - logger.trace('Driver for PCI device {} not found, executing ' - 'pci rescan and retrying'.format(pci_addr)) - cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"' + logger.trace( + f"Driver for PCI device {pci_addr} not found, " + f"executing pci rescan and retrying" + ) + cmd = u"sh -c \"echo 1 > /sys/bus/pci/rescan\"" ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") return None @@ -555,13 +550,14 @@ class DUTSetup(object): :type force_load: bool :raises RuntimeError: If module is not loaded or failed to load. """ - command = 'grep -w {module} /proc/modules'.format(module=module) - message = 'Kernel module {module} is not loaded on host {host}'.\ - format(module=module, host=node['host']) + command = f"grep -w {module} /proc/modules" + message = f"Kernel module {module} is not loaded " \ + f"on host {node[u'host']}" try: - exec_cmd_no_error(node, command, timeout=30, sudo=False, - message=message) + exec_cmd_no_error( + node, command, timeout=30, sudo=False, message=message + ) except RuntimeError: if force_load: # Module is not loaded and we want to load it @@ -574,15 +570,15 @@ class DUTSetup(object): """Verify if kernel module is loaded on all DUTs. If parameter force load is set to True, then try to load the modules. - :param node: DUT nodes. + :param nodes: DUT nodes. :param module: Module to verify. :param force_load: If True then try to load module. - :type node: dict + :type nodes: dict :type module: str :type force_load: bool """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: DUTSetup.verify_kernel_module(node, module, force_load) @staticmethod @@ -590,11 +586,11 @@ class DUTSetup(object): """Verify if uio driver kernel module is loaded on all DUTs. If module is not present it will try to load it. - :param node: DUT nodes. - :type node: dict + :param nodes: DUT nodes. + :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: uio_driver = Topology.get_uio_driver(node) DUTSetup.verify_kernel_module(node, uio_driver, force_load=True) @@ -609,9 +605,8 @@ class DUTSetup(object): :returns: nothing :raises RuntimeError: If loading failed. """ - command = 'modprobe {module}'.format(module=module) - message = 'Failed to load {module} on host {host}'.\ - format(module=module, host=node['host']) + command = f"modprobe {module}" + message = f"Failed to load {module} on host {node[u'host']}" exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message) @@ -627,31 +622,46 @@ class DUTSetup(object): :raises RuntimeError: If failed to remove or install VPP. """ for node in nodes.values(): - message = 'Failed to install VPP on host {host}!'.\ - format(host=node['host']) - if node['type'] == NodeType.DUT: - command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true' + message = f"Failed to install VPP on host {node[u'host']}!" + if node[u"type"] == NodeType.DUT: + command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true" exec_cmd_no_error(node, command, sudo=True) - command = '. /etc/lsb-release; echo "${DISTRIB_ID}"' + command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\"" stdout, _ = exec_cmd_no_error(node, command) - if stdout.strip() == 'Ubuntu': - exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true', - timeout=120, sudo=True) - exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'. - format(dir=vpp_pkg_dir), timeout=120, - sudo=True, message=message) - exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True) + if stdout.strip() == u"Ubuntu": + exec_cmd_no_error( + node, u"apt-get purge -y '*vpp*' || true", + timeout=120, sudo=True + ) + # workaround to avoid installation of vpp-api-python + exec_cmd_no_error( + node, u"rm -f {vpp_pkg_dir}vpp-api-python.deb", + timeout=120, sudo=True + ) + exec_cmd_no_error( + node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb", + timeout=120, sudo=True, message=message + ) + exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True) if DUTSetup.running_in_container(node): DUTSetup.restart_service(node, Constants.VPP_UNIT) else: - exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true', - timeout=120, sudo=True) - exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'. - format(dir=vpp_pkg_dir), timeout=120, - sudo=True, message=message) - exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True) + exec_cmd_no_error( + node, u"yum -y remove '*vpp*' || true", + timeout=120, sudo=True + ) + # workaround to avoid installation of vpp-api-python + exec_cmd_no_error( + node, u"rm -f {vpp_pkg_dir}vpp-api-python.rpm", + timeout=120, sudo=True + ) + exec_cmd_no_error( + node, f"rpm -ivh {vpp_pkg_dir}*.rpm", + timeout=120, sudo=True, message=message + ) + exec_cmd_no_error(node, u"rpm -qai *vpp*", sudo=True) DUTSetup.restart_service(node, Constants.VPP_UNIT) @staticmethod @@ -661,14 +671,15 @@ class DUTSetup(object): :param node: Topology node. :type node: dict :returns: True if running in docker container, false if not or failed - to detect. + to detect. :rtype: bool """ - command = "fgrep docker /proc/1/cgroup" - message = 'Failed to get cgroup settings.' + command = u"fgrep docker /proc/1/cgroup" + message = u"Failed to get cgroup settings." try: - exec_cmd_no_error(node, command, timeout=30, sudo=False, - message=message) + exec_cmd_no_error( + node, command, timeout=30, sudo=False, message=message + ) except RuntimeError: return False return True @@ -685,10 +696,9 @@ class DUTSetup(object): :rtype: str :raises RuntimeError: If getting output failed. """ - command = "docker inspect --format='"\ - "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid) - message = 'Failed to get directory of {uuid} on host {host}'.\ - format(uuid=uuid, host=node['host']) + command = f"docker inspect " \ + f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}" + message = f"Failed to get directory of {uuid} on host {node[u'host']}" stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message) return stdout.strip() @@ -708,16 +718,17 @@ class DUTSetup(object): for _ in range(3): ret_code, stdout, _ = ssh.exec_command_sudo( - "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'") + u"grep Hugepagesize /proc/meminfo | awk '{ print $2 }'" + ) if ret_code == 0: try: huge_size = int(stdout) except ValueError: - logger.trace('Reading huge page size information failed') + logger.trace(u"Reading huge page size information failed") else: break else: - raise RuntimeError('Getting huge page size information failed.') + raise RuntimeError(u"Getting huge page size information failed.") return huge_size @staticmethod @@ -738,17 +749,18 @@ class DUTSetup(object): for _ in range(3): ret_code, stdout, _ = ssh.exec_command_sudo( - 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'. - format(huge_size)) + f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/" + f"free_hugepages" + ) if ret_code == 0: try: huge_free = int(stdout) except ValueError: - logger.trace('Reading free huge pages information failed') + logger.trace(u"Reading free huge pages information failed") else: break else: - raise RuntimeError('Getting free huge pages information failed.') + raise RuntimeError(u"Getting free huge pages information failed.") return huge_free @staticmethod @@ -759,7 +771,6 @@ class DUTSetup(object): :param huge_size: Size of hugepages. :type node: dict :type huge_size: int - :returns: Total number of huge pages in system. :rtype: int :raises RuntimeError: If reading failed for three times. @@ -770,17 +781,18 @@ class DUTSetup(object): for _ in range(3): ret_code, stdout, _ = ssh.exec_command_sudo( - 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'. - format(huge_size)) + f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/" + f"nr_hugepages" + ) if ret_code == 0: try: huge_total = int(stdout) except ValueError: - logger.trace('Reading total huge pages information failed') + logger.trace(u"Reading total huge pages information failed") else: break else: - raise RuntimeError('Getting total huge pages information failed.') + raise RuntimeError(u"Getting total huge pages information failed.") return huge_total @staticmethod @@ -796,9 +808,8 @@ class DUTSetup(object): :type huge_mnt: str :type mem_size: str :type allocate: bool - :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages - or increasing map count failed. + or increasing map count failed. """ # TODO: split function into smaller parts. ssh = SSH() @@ -809,7 +820,8 @@ class DUTSetup(object): huge_free = DUTSetup.get_huge_page_free(node, huge_size) huge_total = DUTSetup.get_huge_page_total(node, huge_size) - # Check if memory reqested is available on host + # Check if memory requested is available on + mem_size = int(mem_size) if (mem_size * 1024) > (huge_free * huge_size): # If we want to allocate hugepage dynamically if allocate: @@ -818,43 +830,50 @@ class DUTSetup(object): max_map_count = huge_to_allocate*4 # Increase maximum number of memory map areas a process may have ret_code, _, _ = ssh.exec_command_sudo( - 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'. - format(max_map_count)) + f"echo \"{max_map_count}\" | " + f"sudo tee /proc/sys/vm/max_map_count" + ) if int(ret_code) != 0: - raise RuntimeError('Increase map count failed on {host}'. - format(host=node['host'])) + raise RuntimeError( + f"Increase map count failed on {node[u'host']}" + ) # Increase hugepage count ret_code, _, _ = ssh.exec_command_sudo( - 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'. - format(huge_to_allocate)) + f"echo \"{huge_to_allocate}\" | " + f"sudo tee /proc/sys/vm/nr_hugepages" + ) if int(ret_code) != 0: - raise RuntimeError('Mount huge pages failed on {host}'. - format(host=node['host'])) - # If we do not want to allocate dynamicaly end with error + raise RuntimeError( + f"Mount huge pages failed on {node[u'host']}" + ) + # If we do not want to allocate dynamically end with error else: - raise RuntimeError('Not enough free huge pages: {0}, {1} MB'. - format(huge_free, huge_free * huge_size)) + raise RuntimeError( + f"Not enough free huge pages: {huge_free}, " + f"{huge_free * huge_size} MB" + ) # Check if huge pages mount point exist has_huge_mnt = False - ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts') + ret_code, stdout, _ = ssh.exec_command(u"cat /proc/mounts") if int(ret_code) == 0: for line in stdout.splitlines(): # Try to find something like: - # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 + # none /mnt/huge hugetlbfs rw,realtime,pagesize=2048k 0 0 mount = line.split() - if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt: + if mount[2] == u"hugetlbfs" and mount[1] == huge_mnt: has_huge_mnt = True break # If huge page mount point not exist create one if not has_huge_mnt: - ret_code, _, _ = ssh.exec_command_sudo( - 'mkdir -p {mnt}'.format(mnt=huge_mnt)) + ret_code, _, _ = ssh.exec_command_sudo(f"mkdir -p {huge_mnt}") if int(ret_code) != 0: - raise RuntimeError('Create mount dir failed on {host}'. - format(host=node['host'])) + raise RuntimeError( + f"Create mount dir failed on {node[u'host']}" + ) ret_code, _, _ = ssh.exec_command_sudo( - 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'. - format(mnt=huge_mnt)) + f"mount -t hugetlbfs -o pagesize=2048k none {huge_mnt}" + ) if int(ret_code) != 0: - raise RuntimeError('Mount huge pages failed on {host}'. - format(host=node['host'])) + raise RuntimeError( + f"Mount huge pages failed on {node[u'host']}" + ) diff --git a/resources/libraries/python/Dhcp.py b/resources/libraries/python/Dhcp.py index e69ee0bcb8..ec2c895bc3 100644 --- a/resources/libraries/python/Dhcp.py +++ b/resources/libraries/python/Dhcp.py @@ -17,7 +17,7 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor -class DhcpProxy(object): +class DhcpProxy: """DHCP Proxy utilities.""" @staticmethod @@ -31,10 +31,9 @@ class DhcpProxy(object): :returns: DHCP relay data. :rtype: list """ - cmd = 'dhcp_proxy_dump' - args = dict(is_ip6=1 if ip_version == 'ipv6' else 0) - err_msg = 'Failed to get DHCP proxy dump on host {host}'.format( - host=node['host']) + cmd = u"dhcp_proxy_dump" + args = dict(is_ip6=1 if ip_version == u"ipv6" else 0) + err_msg = f"Failed to get DHCP proxy dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) diff --git a/resources/libraries/python/DpdkUtil.py b/resources/libraries/python/DpdkUtil.py index bbd6987be4..3a04cbd021 100644 --- a/resources/libraries/python/DpdkUtil.py +++ b/resources/libraries/python/DpdkUtil.py @@ -17,7 +17,7 @@ from resources.libraries.python.OptionString import OptionString from resources.libraries.python.ssh import exec_cmd_no_error -class DpdkUtil(object): +class DpdkUtil: """Utilities for DPDK.""" @staticmethod @@ -29,17 +29,19 @@ class DpdkUtil(object): :returns: EAL parameters. :rtype: OptionString """ - options = OptionString(prefix='-') - options.add('v') + options = OptionString(prefix=u"-") + options.add(u"v") # Set the hexadecimal bitmask of the cores to run on. - options.add_with_value_from_dict('l', 'eal_corelist', kwargs) + options.add_with_value_from_dict(u"l", u"eal_corelist", kwargs) # Set master core. - options.add_with_value('-master-lcore', '0') + options.add_with_value(u"-master-lcore", u"0") # Load an external driver. Multiple -d options are allowed. options.add_with_value_if_from_dict( - 'd', '/usr/lib/librte_pmd_virtio.so', 'eal_driver', kwargs, True) + u"d", u"/usr/lib/librte_pmd_virtio.so", u"eal_driver", kwargs, True + ) options.add_if_from_dict( - '-in-memory', 'eal_in_memory', kwargs, False) + u"-in-memory", u"eal_in_memory", kwargs, False + ) return options @staticmethod @@ -51,50 +53,54 @@ class DpdkUtil(object): :returns: PMD parameters. :rtype: OptionString """ - options = OptionString(prefix='--') + options = OptionString(prefix=u"--") # Set the forwarding mode: io, mac, mac_retry, mac_swap, flowgen, # rxonly, txonly, csum, icmpecho, ieee1588 options.add_equals_from_dict( - 'forward-mode', 'pmd_fwd_mode', kwargs, 'io') + u"forward-mode", u"pmd_fwd_mode", kwargs, u"io" + ) # Set the number of packets per burst to N. - options.add_equals('burst', 64) + options.add_equals(u"burst", 64) # Set the number of descriptors in the TX rings to N. - options.add_equals_from_dict('txd', 'pmd_txd', kwargs, 1024) + options.add_equals_from_dict(u"txd", u"pmd_txd", kwargs, 1024) # Set the number of descriptors in the RX rings to N. - options.add_equals_from_dict('rxd', 'pmd_rxd', kwargs, 1024) + options.add_equals_from_dict(u"rxd", u"pmd_rxd", kwargs, 1024) # Set the number of queues in the TX to N. - options.add_equals_from_dict('txq', 'pmd_txq', kwargs, 1) + options.add_equals_from_dict(u"txq", u"pmd_txq", kwargs, 1) # Set the number of queues in the RX to N. - options.add_equals_from_dict('rxq', 'pmd_rxq', kwargs, 1) + options.add_equals_from_dict(u"rxq", u"pmd_rxq", kwargs, 1) # Set the hexadecimal bitmask of offloads. - options.add_equals_from_dict('tx-offloads', 'pmd_tx_offloads', kwargs) + options.add_equals_from_dict(u"tx-offloads", u"pmd_tx_offloads", kwargs) # Set the number of mbufs to be allocated in the mbuf pools. - options.add_equals_from_dict('total-num-mbufs', 'pmd_num_mbufs', kwargs) + options.add_equals_from_dict( + u"total-num-mbufs", u"pmd_num_mbufs", kwargs + ) # Disable hardware VLAN. options.add_if_from_dict( - 'disable-hw-vlan', 'pmd_disable_hw_vlan', kwargs, True) + u"disable-hw-vlan", u"pmd_disable_hw_vlan", kwargs, True + ) # Set the MAC address XX:XX:XX:XX:XX:XX of the peer port N - options.add_equals_from_dict('eth-peer', 'pmd_eth_peer_0', kwargs) - options.add_equals_from_dict('eth-peer', 'pmd_eth_peer_1', kwargs) + options.add_equals_from_dict(u"eth-peer", u"pmd_eth_peer_0", kwargs) + options.add_equals_from_dict(u"eth-peer", u"pmd_eth_peer_1", kwargs) # Set the max packet length. - options.add_equals_from_dict('max-pkt-len', 'pmd_max_pkt_len', kwargs) + options.add_equals_from_dict(u"max-pkt-len", u"pmd_max_pkt_len", kwargs) # Set the number of forwarding cores based on coremask. - options.add_equals_from_dict('nb-cores', 'pmd_nb_cores', kwargs) + options.add_equals_from_dict(u"nb-cores", u"pmd_nb_cores", kwargs) return options @staticmethod def get_testpmd_cmdline(**kwargs): """Get DPDK testpmd command line arguments. - :param args: Key-value testpmd parameters. - :type args: dict + :param kwargs: Key-value testpmd parameters. + :type kwargs: dict :returns: Command line string. :rtype: OptionString """ options = OptionString() - options.add('testpmd') + options.add(u"testpmd") options.extend(DpdkUtil.get_eal_options(**kwargs)) - options.add('--') + options.add(u"--") options.extend(DpdkUtil.get_pmd_options(**kwargs)) return options @@ -103,14 +109,14 @@ class DpdkUtil(object): """Start DPDK testpmd app on VM node. :param node: VM Node to start testpmd on. - :param args: Key-value testpmd parameters. + :param kwargs: Key-value testpmd parameters. :type node: dict :type kwargs: dict """ cmd_options = OptionString() - cmd_options.add("/start-testpmd.sh") + cmd_options.add(u"/start-testpmd.sh") cmd_options.extend(DpdkUtil.get_eal_options(**kwargs)) - cmd_options.add('--') + cmd_options.add(u"--") cmd_options.extend(DpdkUtil.get_pmd_options(**kwargs)) exec_cmd_no_error(node, cmd_options, sudo=True, disconnect=True) @@ -122,5 +128,5 @@ class DpdkUtil(object): :type node: dict :returns: nothing """ - cmd = "/stop-testpmd.sh" # Completed string, simpler than OptionString. + cmd = u"/stop-testpmd.sh" # Completed string, simple one. exec_cmd_no_error(node, cmd, sudo=True, disconnect=True) diff --git a/resources/libraries/python/DropRateSearch.py b/resources/libraries/python/DropRateSearch.py index e87ef95434..49e64d9219 100644 --- a/resources/libraries/python/DropRateSearch.py +++ b/resources/libraries/python/DropRateSearch.py @@ -20,7 +20,6 @@ from enum import Enum, unique @unique class SearchDirection(Enum): """Direction of linear search.""" - TOP_DOWN = 1 BOTTOM_UP = 2 @@ -28,7 +27,6 @@ class SearchDirection(Enum): @unique class SearchResults(Enum): """Result of the drop rate search.""" - SUCCESS = 1 FAILURE = 2 SUSPICIOUS = 3 @@ -37,7 +35,6 @@ class SearchResults(Enum): @unique class RateType(Enum): """Type of rate units.""" - PERCENTAGE = 1 PACKETS_PER_SECOND = 2 BITS_PER_SECOND = 3 @@ -46,7 +43,6 @@ class RateType(Enum): @unique class LossAcceptanceType(Enum): """Type of the loss acceptance criteria.""" - FRAMES = 1 PERCENTAGE = 2 @@ -54,16 +50,13 @@ class LossAcceptanceType(Enum): @unique class SearchResultType(Enum): """Type of search result evaluation.""" - BEST_OF_N = 1 WORST_OF_N = 2 -class DropRateSearch(object): +class DropRateSearch(metaclass=ABCMeta): """Abstract class with search algorithm implementation.""" - __metaclass__ = ABCMeta - def __init__(self): # duration of traffic run (binary, linear) self._duration = 60 @@ -86,8 +79,8 @@ class DropRateSearch(object): # permitted values: LossAcceptanceType self._loss_acceptance_type = LossAcceptanceType.FRAMES # size of frames to send - self._frame_size = "64" - # binary convergence criterium type is self._rate_type + self._frame_size = u"64" + # binary convergence criterion type is self._rate_type self._binary_convergence_threshold = 5000 # numbers of traffic runs during one rate step self._max_attempts = 1 @@ -105,11 +98,11 @@ class DropRateSearch(object): :returns: Latency stats. :rtype: list """ - pass @abstractmethod - def measure_loss(self, rate, frame_size, loss_acceptance, - loss_acceptance_type, traffic_profile, skip_warmup=False): + def measure_loss( + self, rate, frame_size, loss_acceptance, loss_acceptance_type, + traffic_profile, skip_warmup=False): """Send traffic from TG and measure count of dropped frames. :param rate: Offered traffic load. @@ -118,7 +111,7 @@ class DropRateSearch(object): :param loss_acceptance_type: Type of permitted loss. :param traffic_profile: Module name to use for traffic generation. :param skip_warmup: Start TRex without warmup traffic if true. - :type rate: int + :type rate: float :type frame_size: str :type loss_acceptance: float :type loss_acceptance_type: LossAcceptanceType @@ -127,7 +120,6 @@ class DropRateSearch(object): :returns: Drop threshold exceeded? (True/False) :rtype: bool """ - pass def set_search_rate_boundaries(self, max_rate, min_rate): """Set search boundaries: min,max. @@ -140,53 +132,55 @@ class DropRateSearch(object): :raises ValueError: If min rate is lower than 0 or higher than max rate. """ if float(min_rate) <= 0: - raise ValueError("min_rate must be higher than 0") + msg = u"min_rate must be higher than 0" elif float(min_rate) > float(max_rate): - raise ValueError("min_rate must be lower than max_rate") + msg = u"min_rate must be lower than max_rate" else: self._rate_max = float(max_rate) self._rate_min = float(min_rate) + return + raise ValueError(msg) def set_loss_acceptance(self, loss_acceptance): - """Set loss acceptance treshold for PDR search. + """Set loss acceptance threshold for PDR search. - :param loss_acceptance: Loss acceptance treshold for PDR search. + :param loss_acceptance: Loss acceptance threshold for PDR search. :type loss_acceptance: str :returns: nothing :raises ValueError: If loss acceptance is lower than zero. """ - if float(loss_acceptance) < 0: - raise ValueError("Loss acceptance must be higher or equal 0") - else: + if float(loss_acceptance) >= 0: self._loss_acceptance = float(loss_acceptance) + else: + raise ValueError(u"Loss acceptance must be higher or equal 0") def get_loss_acceptance(self): - """Return configured loss acceptance treshold. + """Return configured loss acceptance threshold. - :returns: Loss acceptance treshold. + :returns: Loss acceptance threshold. :rtype: float """ return self._loss_acceptance def set_loss_acceptance_type_percentage(self): - """Set loss acceptance treshold type to percentage. + """Set loss acceptance threshold type to percentage. :returns: nothing """ self._loss_acceptance_type = LossAcceptanceType.PERCENTAGE def set_loss_acceptance_type_frames(self): - """Set loss acceptance treshold type to frames. + """Set loss acceptance threshold type to frames. :returns: nothing """ self._loss_acceptance_type = LossAcceptanceType.FRAMES def loss_acceptance_type_is_percentage(self): - """Return true if loss acceptance treshold type is percentage, + """Return true if loss acceptance threshold type is percentage, false otherwise. - :returns: True if loss acceptance treshold type is percentage. + :returns: True if loss acceptance threshold type is percentage. :rtype: boolean """ return self._loss_acceptance_type == LossAcceptanceType.PERCENTAGE @@ -229,10 +223,10 @@ class DropRateSearch(object): :returns: nothing :raises Exception: If rate type is unknown. """ - if rate_type not in RateType: - raise Exception("rate_type unknown: {}".format(rate_type)) - else: + if rate_type in RateType: self._rate_type = rate_type + else: + raise Exception(f"rate_type unknown: {rate_type}") def set_search_frame_size(self, frame_size): """Set size of frames to send. @@ -263,7 +257,7 @@ class DropRateSearch(object): def set_binary_convergence_threshold(self, convergence): """Set convergence for binary search. - :param convergence: Treshold value number. + :param convergence: Threshold value number. :type convergence: float :returns: nothing """ @@ -272,7 +266,7 @@ class DropRateSearch(object): def get_binary_convergence_threshold(self): """Get convergence for binary search. - :returns: Treshold value number. + :returns: Threshold value number. :rtype: float """ return self._binary_convergence_threshold @@ -285,13 +279,14 @@ class DropRateSearch(object): :raises ValueError: If rate type is unknown. """ if self._rate_type == RateType.PERCENTAGE: - return "%" + retval = u"%" elif self._rate_type == RateType.BITS_PER_SECOND: - return "bps" + retval = u"bps" elif self._rate_type == RateType.PACKETS_PER_SECOND: - return "pps" + retval = u"pps" else: - raise ValueError("RateType unknown") + raise ValueError(u"RateType unknown") + return retval def set_max_attempts(self, max_attempts): """Set maximum number of traffic runs during one rate step. @@ -304,7 +299,7 @@ class DropRateSearch(object): if int(max_attempts) > 0: self._max_attempts = int(max_attempts) else: - raise ValueError("Max attempt must by greater than zero") + raise ValueError(u"Max attempt must by greater than zero") def get_max_attempts(self): """Return maximum number of traffic runs during one rate step. @@ -336,10 +331,10 @@ class DropRateSearch(object): :returns: nothing :raises ValueError: If search type is unknown. """ - if search_type not in SearchResultType: - raise ValueError("search_type unknown: {}".format(search_type)) - else: + if search_type in SearchResultType: self._search_result_type = search_type + else: + raise ValueError(f"search_type unknown: {search_type}") @staticmethod def _get_best_of_n(res_list): @@ -375,11 +370,12 @@ class DropRateSearch(object): :raises ValueError: If search result type is unknown. """ if self._search_result_type == SearchResultType.BEST_OF_N: - return self._get_best_of_n(res_list) + retval = self._get_best_of_n(res_list) elif self._search_result_type == SearchResultType.WORST_OF_N: - return self._get_worst_of_n(res_list) + retval = self._get_worst_of_n(res_list) else: - raise ValueError("Unknown search result type") + raise ValueError(u"Unknown search result type") + return retval def linear_search(self, start_rate, traffic_profile): """Linear search of rate with loss below acceptance criteria. @@ -391,9 +387,8 @@ class DropRateSearch(object): :returns: nothing :raises ValueError: If start rate is not in range. """ - if not self._rate_min <= float(start_rate) <= self._rate_max: - raise ValueError("Start rate is not in min,max range") + raise ValueError(u"Start rate is not in min,max range") rate = float(start_rate) # the last but one step @@ -403,9 +398,12 @@ class DropRateSearch(object): while True: res = [] for dummy in range(self._max_attempts): - res.append(self.measure_loss( - rate, self._frame_size, self._loss_acceptance, - self._loss_acceptance_type, traffic_profile)) + res.append( + self.measure_loss( + rate, self._frame_size, self._loss_acceptance, + self._loss_acceptance_type, traffic_profile + ) + ) res = self._get_res_based_on_search_type(res) @@ -419,21 +417,19 @@ class DropRateSearch(object): # one last step with rate set to _rate_min rate = self._rate_min continue - else: - self._search_result = SearchResults.FAILURE - self._search_result_rate = None - return - else: - continue + self._search_result = SearchResults.FAILURE + self._search_result_rate = None + return + continue # no loss => non/partial drop rate found elif res: self._search_result = SearchResults.SUCCESS self._search_result_rate = rate return else: - raise RuntimeError("Unknown search result") + raise RuntimeError(u"Unknown search result") else: - raise Exception("Unknown search direction") + raise Exception(u"Unknown search direction") def verify_search_result(self): """Fail if search was not successful. @@ -442,13 +438,14 @@ class DropRateSearch(object): :rtype: tuple :raises Exception: If search failed. """ - if self._search_result in [ - SearchResults.SUCCESS, SearchResults.SUSPICIOUS]: + if self._search_result in \ + [SearchResults.SUCCESS, SearchResults.SUSPICIOUS]: return self._search_result_rate, self.get_latency() - raise Exception('Search FAILED') + raise Exception(u"Search FAILED") - def binary_search(self, b_min, b_max, traffic_profile, skip_max_rate=False, - skip_warmup=False): + def binary_search( + self, b_min, b_max, traffic_profile, skip_max_rate=False, + skip_warmup=False): """Binary search of rate with loss below acceptance criteria. :param b_min: Min range rate. @@ -464,13 +461,12 @@ class DropRateSearch(object): :returns: nothing :raises ValueError: If input values are not valid. """ - if not self._rate_min <= float(b_min) <= self._rate_max: - raise ValueError("Min rate is not in min,max range") + raise ValueError(u"Min rate is not in min,max range") if not self._rate_min <= float(b_max) <= self._rate_max: - raise ValueError("Max rate is not in min,max range") + raise ValueError(u"Max rate is not in min,max range") if float(b_max) < float(b_min): - raise ValueError("Min rate is greater than max rate") + raise ValueError(u"Min rate is greater than max rate") # rate is half of interval + start of interval if not using max rate rate = ((float(b_max) - float(b_min)) / 2) + float(b_min) \ @@ -492,7 +488,8 @@ class DropRateSearch(object): res.append(self.measure_loss( rate, self._frame_size, self._loss_acceptance, self._loss_acceptance_type, traffic_profile, - skip_warmup=skip_warmup)) + skip_warmup=skip_warmup + )) res = self._get_res_based_on_search_type(res) @@ -514,11 +511,10 @@ class DropRateSearch(object): :returns: nothing :raises RuntimeError: If linear search failed. """ - self.linear_search(start_rate, traffic_profile) - if self._search_result in [SearchResults.SUCCESS, - SearchResults.SUSPICIOUS]: + if self._search_result in \ + [SearchResults.SUCCESS, SearchResults.SUSPICIOUS]: b_min = self._search_result_rate b_max = self._search_result_rate + self._rate_linear_step @@ -537,15 +533,16 @@ class DropRateSearch(object): # we will use binary search to refine search in one linear step self.binary_search(b_min, b_max, traffic_profile, True) - # linear and binary search succeed - if self._search_result == SearchResults.SUCCESS: - return + # linear search succeed but binary failed or suspicious - else: + if self._search_result != SearchResults.SUCCESS: self._search_result = SearchResults.SUSPICIOUS self._search_result_rate = temp_rate + # linear and binary search succeed + else: + return else: - raise RuntimeError("Linear search FAILED") + raise RuntimeError(u"Linear search FAILED") @staticmethod def floats_are_close_equal(num_a, num_b, rel_tol=1e-9, abs_tol=0.0): @@ -553,23 +550,24 @@ class DropRateSearch(object): :param num_a: First number to compare. :param num_b: Second number to compare. - :param rel_tol=1e-9: The relative tolerance. - :param abs_tol=0.0: The minimum absolute tolerance level. + :param rel_tol: The relative tolerance. + :param abs_tol: The minimum absolute tolerance level. (Optional, + default value: 0.0) :type num_a: float :type num_b: float :type rel_tol: float :type abs_tol: float :returns: Returns True if num_a is close in value to num_b or equal. - False otherwise. + False otherwise. :rtype: boolean :raises ValueError: If input values are not valid. """ - if num_a == num_b: return True if rel_tol < 0.0 or abs_tol < 0.0: - raise ValueError('Error tolerances must be non-negative') + raise ValueError(u"Error tolerances must be non-negative") - return abs(num_b - num_a) <= max(rel_tol * max(abs(num_a), abs(num_b)), - abs_tol) + return abs(num_b - num_a) <= max( + rel_tol * max(abs(num_a), abs(num_b)), abs_tol + ) diff --git a/resources/libraries/python/FilteredLogger.py b/resources/libraries/python/FilteredLogger.py index a04eb67476..3df5714837 100644 --- a/resources/libraries/python/FilteredLogger.py +++ b/resources/libraries/python/FilteredLogger.py @@ -26,17 +26,18 @@ Logger.console() is not supported. import logging _LEVELS = { - "TRACE": logging.DEBUG // 2, - "DEBUG": logging.DEBUG, - "INFO": logging.INFO, - "HTML": logging.INFO, - "WARN": logging.WARN, - "ERROR": logging.ERROR, - "CRITICAL": logging.CRITICAL, - "NONE": logging.CRITICAL, + u"TRACE": logging.DEBUG // 2, + u"DEBUG": logging.DEBUG, + u"INFO": logging.INFO, + u"HTML": logging.INFO, + u"WARN": logging.WARN, + u"ERROR": logging.ERROR, + u"CRITICAL": logging.CRITICAL, + u"NONE": logging.CRITICAL, } -class FilteredLogger(object): + +class FilteredLogger: """Instances of this class have the similar API to robot.api.logger. TODO: Support html argument? @@ -57,7 +58,7 @@ class FilteredLogger(object): self.logger_module = logger_module self.min_level_num = _LEVELS[min_level.upper()] - def write(self, message, farg=None, level="INFO"): + def write(self, message, farg=None, level=u"INFO"): """Forwards the message to logger if min_level is reached. Formatting using '%' operator is used when farg argument is suplied. @@ -76,20 +77,20 @@ class FilteredLogger(object): def trace(self, message, farg=None): """Forward the message using the ``TRACE`` level.""" - self.write(message, farg=farg, level="TRACE") + self.write(message, farg=farg, level=u"TRACE") def debug(self, message, farg=None): """Forward the message using the ``DEBUG`` level.""" - self.write(message, farg=farg, level="DEBUG") + self.write(message, farg=farg, level=u"DEBUG") def info(self, message, farg=None): """Forward the message using the ``INFO`` level.""" - self.write(message, farg=farg, level="INFO") + self.write(message, farg=farg, level=u"INFO") def warn(self, message, farg=None): """Forward the message using the ``WARN`` level.""" - self.write(message, farg=farg, level="WARN") + self.write(message, farg=farg, level=u"WARN") def error(self, message, farg=None): """Forward the message using the ``ERROR`` level.""" - self.write(message, farg=farg, level="ERROR") + self.write(message, farg=farg, level=u"ERROR") diff --git a/resources/libraries/python/GBP.py b/resources/libraries/python/GBP.py index 3625640853..3d4d249b10 100644 --- a/resources/libraries/python/GBP.py +++ b/resources/libraries/python/GBP.py @@ -14,6 +14,7 @@ """GBP utilities library.""" from enum import IntEnum + from ipaddress import ip_address from resources.libraries.python.IPUtil import IPUtil @@ -68,7 +69,7 @@ class GBPHashMode(IntEnum): GBP_API_HASH_MODE_SYMETRIC = 3 -class GBP(object): +class GBP: """GBP utilities.""" @staticmethod @@ -90,9 +91,8 @@ class GBP(object): :type ip4_uu_sw_if_index: int :type ip6_uu_sw_if_index: int """ - cmd = 'gbp_route_domain_add' - err_msg = 'Failed to add GBP route domain on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_route_domain_add" + err_msg = f"Failed to add GBP route domain on {node[u'host']}!" args_in = dict( rd=dict( @@ -126,14 +126,14 @@ class GBP(object): :type uu_fwd_sw_if_index: int :type bm_flood_sw_if_index: int """ - cmd = 'gbp_bridge_domain_add' - err_msg = 'Failed to add GBP bridge domain on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_bridge_domain_add" + err_msg = f"Failed to add GBP bridge domain on {node[u'host']}!" args_in = dict( bd=dict( flags=getattr( - GBPBridgeDomainFlags, 'GBP_BD_API_FLAG_NONE').value, + GBPBridgeDomainFlags, u"GBP_BD_API_FLAG_NONE" + ).value, bvi_sw_if_index=bvi_sw_if_index, uu_fwd_sw_if_index=uu_fwd_sw_if_index, bm_flood_sw_if_index=bm_flood_sw_if_index, @@ -166,9 +166,8 @@ class GBP(object): :type uplink_sw_if_index: int :type remote_ep_timeout: int """ - cmd = 'gbp_endpoint_group_add' - err_msg = 'Failed to add GBP endpoint group on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_endpoint_group_add" + err_msg = f"Failed to add GBP endpoint group on {node[u'host']}!" args_in = dict( epg=dict( @@ -201,17 +200,13 @@ class GBP(object): :type mac_addr: str :type sclass: int """ - cmd = 'gbp_endpoint_add' - err_msg = 'Failed to add GBP endpoint on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_endpoint_add" + err_msg = f"Failed to add GBP endpoint on {node[u'host']}!" ips = list() - ips.append(IPUtil.create_ip_address_object( - ip_address(unicode(ip_addr)))) - tun_src = IPUtil.create_ip_address_object( - ip_address(unicode('0.0.0.0'))) - tun_dst = IPUtil.create_ip_address_object( - ip_address(unicode('0.0.0.0'))) + ips.append(IPUtil.create_ip_address_object(ip_address(ip_addr))) + tun_src = IPUtil.create_ip_address_object(ip_address(u"0.0.0.0")) + tun_dst = IPUtil.create_ip_address_object(ip_address(u"0.0.0.0")) args_in = dict( endpoint=dict( @@ -221,7 +216,8 @@ class GBP(object): mac=L2Util.mac_to_bin(mac_addr), sclass=sclass, flags=getattr( - GBPEndpointFlags, 'GBP_API_ENDPOINT_FLAG_EXTERNAL').value, + GBPEndpointFlags, u"GBP_API_ENDPOINT_FLAG_EXTERNAL" + ).value, tun=dict( src=tun_src, dst=tun_dst @@ -245,9 +241,8 @@ class GBP(object): :type bd_id: int :type rd_id: int """ - cmd = 'gbp_ext_itf_add_del' - err_msg = 'Failed to add external GBP interface on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_ext_itf_add_del" + err_msg = u"Failed to add external GBP interface on {node[u'host']}!" args_in = dict( is_add=1, @@ -255,7 +250,7 @@ class GBP(object): sw_if_index=sw_if_index, bd_id=bd_id, rd_id=rd_id, - flags=getattr(GBPExtItfFlags, 'GBP_API_EXT_ITF_F_NONE').value + flags=getattr(GBPExtItfFlags, u"GBP_API_EXT_ITF_F_NONE").value ) ) @@ -281,19 +276,19 @@ class GBP(object): :type rd_id: int :type sw_if_index: int """ - cmd = 'gbp_subnet_add_del' - err_msg = 'Failed to add GBP subnet on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_subnet_add_del" + err_msg = f"Failed to add GBP subnet on {node[u'host']}!" args_in = dict( is_add=1, subnet=dict( - type=getattr(GBPSubnetType, 'GBP_API_SUBNET_L3_OUT').value, + type=getattr(GBPSubnetType, u"GBP_API_SUBNET_L3_OUT").value, sw_if_index=sw_if_index, sclass=sclass, prefix=dict( address=IPUtil.create_ip_address_object( - ip_address(unicode(address))), + ip_address(address) + ), len=int(subnet_length) ), rd_id=rd_id @@ -318,14 +313,13 @@ class GBP(object): :type acl_index: int :type hash_mode: str """ - cmd = 'gbp_contract_add_del' - err_msg = 'Failed to add GBP contract on {node}!'\ - .format(node=node['host']) + cmd = u"gbp_contract_add_del" + err_msg = f"Failed to add GBP contract on {node[u'host']}!" - hash_mode = 'GBP_API_HASH_MODE_SRC_IP' if hash_mode is None \ + hash_mode = u"GBP_API_HASH_MODE_SRC_IP" if hash_mode is None \ else hash_mode rule_permit = dict( - action=getattr(GBPRuleAction, 'GBP_API_RULE_PERMIT').value, + action=getattr(GBPRuleAction, u"GBP_API_RULE_PERMIT").value, nh_set=dict( hash_mode=getattr(GBPHashMode, hash_mode).value, n_nhs=8, diff --git a/resources/libraries/python/HTTPRequest.py b/resources/libraries/python/HTTPRequest.py deleted file mode 100644 index 0f650a89a1..0000000000 --- a/resources/libraries/python/HTTPRequest.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of HTTP requests GET, PUT, POST and DELETE used in -communication with Honeycomb. - -The HTTP requests are implemented in the class HTTPRequest which uses -requests.request. -""" - -from ipaddress import IPv6Address, AddressValueError -from enum import IntEnum, unique - -from robot.api.deco import keyword -from robot.api import logger -from robot.libraries.BuiltIn import BuiltIn - -from requests import request, RequestException, Timeout, TooManyRedirects, \ - HTTPError, ConnectionError -from requests.auth import HTTPBasicAuth - - -@unique -class HTTPCodes(IntEnum): - """HTTP status codes""" - OK = 200 # HTTP standard code name. # pylint: disable=invalid-name - ACCEPTED = 201 - UNAUTHORIZED = 401 - FORBIDDEN = 403 - NOT_FOUND = 404 - CONFLICT = 409 - INTERNAL_SERVER_ERROR = 500 - SERVICE_UNAVAILABLE = 503 - - -class HTTPRequestError(Exception): - """Exception raised by HTTPRequest objects. - - When raising this exception, put this information to the message in this - order: - - - short description of the encountered problem, - - relevant messages if there are any collected, e.g., from caught - exception, - - relevant data if there are any collected. - - The logging is performed on two levels: 1. error - short description of the - problem; 2. debug - detailed information. - """ - - def __init__(self, msg, details='', enable_logging=True): - """Sets the exception message and enables / disables logging. - - It is not wanted to log errors when using these keywords together - with keywords like "Wait until keyword succeeds". So you can disable - logging by setting enable_logging to False. - - :param msg: Message to be displayed and logged. - :param enable_logging: When True, logging is enabled, otherwise - logging is disabled. - :type msg: str - :type enable_logging: bool - """ - super(HTTPRequestError, self).__init__() - self._msg = "{0}: {1}".format(self.__class__.__name__, msg) - self._details = details - if enable_logging: - logger.info(self._msg) - logger.debug(self._details) - - def __repr__(self): - return repr(self._msg) - - def __str__(self): - return str(self._msg) - - -class HTTPRequest(object): - """A class implementing HTTP requests GET, PUT, POST and DELETE used in - communication with Honeycomb. - - The communication with Honeycomb and processing of all exceptions is done in - the method _http_request which uses requests.request to send requests and - receive responses. The received status code and content of response are - logged on the debug level. - All possible exceptions raised by requests.request are also processed there. - - The other methods (get, put, post and delete) use _http_request to send - corresponding request. - - These methods must not be used as keywords in tests. Use keywords - implemented in the module HoneycombAPIKeywords instead. - """ - - def __init__(self): - pass - - @staticmethod - def create_full_url(ip_addr, port, path): - """Creates full url including host, port, and path to data. - - :param ip_addr: Server IP. - :param port: Communication port. - :param path: Path to data. - :type ip_addr: str - :type port: str or int - :type path: str - :returns: Full url. - :rtype: str - """ - - try: - IPv6Address(unicode(ip_addr)) - # IPv6 address must be in brackets - ip_addr = "[{0}]".format(ip_addr) - except (AttributeError, AddressValueError): - pass - - return "http://{ip}:{port}{path}".format(ip=ip_addr, port=port, - path=path) - - @staticmethod - def _http_request(method, node, path, enable_logging=True, **kwargs): - """Sends specified HTTP request and returns status code and response - content. - - :param method: The method to be performed on the resource identified by - the given request URI. - :param node: Honeycomb node. - :param path: URL path, e.g. /index.html. - :param enable_logging: Used to suppress errors when checking Honeycomb - state during suite setup and teardown. - :param kwargs: Named parameters accepted by request.request: - params -- (optional) Dictionary or bytes to be sent in the query - string for the Request. - data -- (optional) Dictionary, bytes, or file-like object to - send in the body of the Request. - json -- (optional) json data to send in the body of the Request. - headers -- (optional) Dictionary of HTTP Headers to send with - the Request. - cookies -- (optional) Dict or CookieJar object to send with the - Request. - files -- (optional) Dictionary of 'name': file-like-objects - (or {'name': ('filename', fileobj)}) for multipart encoding upload. - timeout (float or tuple) -- (optional) How long to wait for the - server to send data before giving up, as a float, or a (connect - timeout, read timeout) tuple. - allow_redirects (bool) -- (optional) Boolean. Set to True if POST/ - PUT/DELETE redirect following is allowed. - proxies -- (optional) Dictionary mapping protocol to the URL of - the proxy. - verify -- (optional) whether the SSL cert will be verified. - A CA_BUNDLE path can also be provided. Defaults to True. - stream -- (optional) if False, the response content will be - immediately downloaded. - cert -- (optional) if String, path to ssl client cert file (.pem). - If Tuple, ('cert', 'key') pair. - :type method: str - :type node: dict - :type path: str - :type enable_logging: bool - :type kwargs: dict - :returns: Status code and content of response. - :rtype: tuple - :raises HTTPRequestError: If - 1. it is not possible to connect, - 2. invalid HTTP response comes from server, - 3. request exceeded the configured number of maximum re-directions, - 4. request timed out, - 5. there is any other unexpected HTTP request exception. - """ - timeout = kwargs["timeout"] - - use_odl = BuiltIn().get_variable_value("${use_odl_client}") - - if use_odl: - port = 8181 - # Using default ODL Restconf port - # TODO: add node["honeycomb"]["odl_port"] to topology, use it here - odl_url_part = "/network-topology:network-topology/topology/" \ - "topology-netconf/node/vpp/yang-ext:mount" - else: - port = node["honeycomb"]["port"] - odl_url_part = "" - - try: - path = path.format(odl_url_part=odl_url_part) - except KeyError: - pass - - url = HTTPRequest.create_full_url(node['host'], - port, - path) - try: - auth = HTTPBasicAuth(node['honeycomb']['user'], - node['honeycomb']['passwd']) - rsp = request(method, url, auth=auth, verify=False, **kwargs) - - logger.debug("Status code: {0}".format(rsp.status_code)) - logger.debug("Response: {0}".format(rsp.content)) - - return rsp.status_code, rsp.content - - except ConnectionError as err: - # Switching the logging on / off is needed only for - # "requests.ConnectionError" - raise HTTPRequestError("Not possible to connect to {0}:{1}.". - format(node['host'], - node['honeycomb']['port']), - repr(err), enable_logging=enable_logging) - except HTTPError as err: - raise HTTPRequestError("Invalid HTTP response from {0}.". - format(node['host']), repr(err)) - except TooManyRedirects as err: - raise HTTPRequestError("Request exceeded the configured number " - "of maximum re-directions.", repr(err)) - except Timeout as err: - raise HTTPRequestError("Request timed out. Timeout is set to {0}.". - format(timeout), repr(err)) - except RequestException as err: - raise HTTPRequestError("Unexpected HTTP request exception.", - repr(err)) - - @staticmethod - @keyword(name="HTTP Get") - def get(node, path, headers=None, timeout=15, enable_logging=True): - """Sends a GET request and returns the response and status code. - - :param node: Honeycomb node. - :param path: URL path, e.g. /index.html. - :param headers: Dictionary of HTTP Headers to send with the Request. - :param timeout: How long to wait for the server to send data before - giving up, as a float, or a (connect timeout, read timeout) tuple. - :param enable_logging: Used to suppress errors when checking Honeycomb - state during suite setup and teardown. When True, - logging is enabled, otherwise logging is disabled. - :type node: dict - :type path: str - :type headers: dict - :type timeout: float or tuple - :type enable_logging: bool - :returns: Status code and content of response. - :rtype: tuple - """ - - return HTTPRequest._http_request('GET', node, path, - enable_logging=enable_logging, - headers=headers, timeout=timeout) - - @staticmethod - @keyword(name="HTTP Put") - def put(node, path, headers=None, payload=None, json=None, timeout=15): - """Sends a PUT request and returns the response and status code. - - :param node: Honeycomb node. - :param path: URL path, e.g. /index.html. - :param headers: Dictionary of HTTP Headers to send with the Request. - :param payload: Dictionary, bytes, or file-like object to send in - the body of the Request. - :param json: JSON formatted string to send in the body of the Request. - :param timeout: How long to wait for the server to send data before - giving up, as a float, or a (connect timeout, read timeout) tuple. - :type node: dict - :type path: str - :type headers: dict - :type payload: dict, bytes, or file-like object - :type json: str - :type timeout: float or tuple - :returns: Status code and content of response. - :rtype: tuple - """ - return HTTPRequest._http_request('PUT', node, path, headers=headers, - data=payload, json=json, - timeout=timeout) - - @staticmethod - @keyword(name="HTTP Post") - def post(node, path, headers=None, payload=None, json=None, timeout=15, - enable_logging=True): - """Sends a POST request and returns the response and status code. - - :param node: Honeycomb node. - :param path: URL path, e.g. /index.html. - :param headers: Dictionary of HTTP Headers to send with the Request. - :param payload: Dictionary, bytes, or file-like object to send in - the body of the Request. - :param json: JSON formatted string to send in the body of the Request. - :param timeout: How long to wait for the server to send data before - giving up, as a float, or a (connect timeout, read timeout) tuple. - :param enable_logging: Used to suppress errors when checking ODL - state during suite setup and teardown. When True, - logging is enabled, otherwise logging is disabled. - :type node: dict - :type path: str - :type headers: dict - :type payload: dict, bytes, or file-like object - :type json: str - :type timeout: float or tuple - :type enable_logging: bool - :returns: Status code and content of response. - :rtype: tuple - """ - return HTTPRequest._http_request('POST', node, path, - enable_logging=enable_logging, - headers=headers, data=payload, - json=json, timeout=timeout) - - @staticmethod - @keyword(name="HTTP Delete") - def delete(node, path, timeout=15): - """Sends a DELETE request and returns the response and status code. - - :param node: Honeycomb node. - :param path: URL path, e.g. /index.html. - :param timeout: How long to wait for the server to send data before - giving up, as a float, or a (connect timeout, read timeout) tuple. - :type node: dict - :type path: str - :type timeout: float or tuple - :returns: Status code and content of response. - :rtype: tuple - """ - return HTTPRequest._http_request('DELETE', node, path, timeout=timeout) diff --git a/resources/libraries/python/IPUtil.py b/resources/libraries/python/IPUtil.py index de018b9565..f99deb1e08 100644 --- a/resources/libraries/python/IPUtil.py +++ b/resources/libraries/python/IPUtil.py @@ -16,6 +16,7 @@ import re from enum import IntEnum + from ipaddress import ip_address from resources.libraries.python.Constants import Constants @@ -55,7 +56,7 @@ class FibPathType(IntEnum): class FibPathFlags(IntEnum): """FIB path flags.""" FIB_PATH_FLAG_NONE = 0 - FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1 #pylint: disable=invalid-name + FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1 # pylint: disable=invalid-name FIB_PATH_FLAG_RESOLVE_VIA_HOST = 2 @@ -68,7 +69,7 @@ class FibPathNhProto(IntEnum): FIB_PATH_NH_PROTO_BIER = 4 -class IPUtil(object): +class IPUtil: """Common IP utilities""" @staticmethod @@ -81,7 +82,7 @@ class IPUtil(object): :returns: Integer representation of IP address. :rtype: int """ - return int(ip_address(unicode(ip_str))) + return int(ip_address(ip_str)) @staticmethod def int_to_ip(ip_int): @@ -115,13 +116,14 @@ class IPUtil(object): if not sw_if_index: return list() - is_ipv6 = 1 if ip_version == 'ipv6' else 0 + is_ipv6 = 1 if ip_version == u"ipv6" else 0 - cmd = 'ip_address_dump' - args = dict(sw_if_index=sw_if_index, - is_ipv6=is_ipv6) - err_msg = 'Failed to get L2FIB dump on host {host}'.format( - host=node['host']) + cmd = u"ip_address_dump" + args = dict( + sw_if_index=sw_if_index, + is_ipv6=is_ipv6 + ) + err_msg = f"Failed to get L2FIB dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) @@ -138,11 +140,10 @@ class IPUtil(object): :param node: VPP node. :type node: dict """ - - PapiSocketExecutor.run_cli_cmd(node, 'show ip fib') - PapiSocketExecutor.run_cli_cmd(node, 'show ip fib summary') - PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib') - PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib summary') + PapiSocketExecutor.run_cli_cmd(node, u"show ip fib") + PapiSocketExecutor.run_cli_cmd(node, u"show ip fib summary") + PapiSocketExecutor.run_cli_cmd(node, u"show ip6 fib") + PapiSocketExecutor.run_cli_cmd(node, u"show ip6 fib summary") @staticmethod def vpp_get_ip_tables_prefix(node, address): @@ -153,13 +154,12 @@ class IPUtil(object): :type node: dict :type address: str """ - addr = ip_address(unicode(address)) + addr = ip_address(address) + ip_ver = u"ip6" if addr.version == 6 else u"ip" PapiSocketExecutor.run_cli_cmd( - node, 'show {ip_ver} fib {addr}/{addr_len}'.format( - ip_ver='ip6' if addr.version == 6 else 'ip', - addr=addr, - addr_len=addr.max_prefixlen)) + node, f"show {ip_ver} fib {addr}/{addr.max_prefixlen}" + ) @staticmethod def get_interface_vrf_table(node, interface, ip_version='ipv4'): @@ -176,18 +176,17 @@ class IPUtil(object): """ sw_if_index = InterfaceUtil.get_interface_index(node, interface) - cmd = 'sw_interface_get_table' + cmd = u"sw_interface_get_table" args = dict( sw_if_index=sw_if_index, - is_ipv6=True if ip_version == 'ipv6' else False + is_ipv6=bool(ip_version == u"ipv6") ) - err_msg = 'Failed to get VRF id assigned to interface {ifc}'.format( - ifc=interface) + err_msg = f"Failed to get VRF id assigned to interface {interface}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg) - return reply['vrf_id'] + return reply[u"vrf_id"] @staticmethod def vpp_ip_source_check_setup(node, if_name): @@ -198,14 +197,13 @@ class IPUtil(object): :type node: dict :type if_name: str """ - cmd = 'ip_source_check_interface_add_del' + cmd = u"ip_source_check_interface_add_del" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, if_name), is_add=1, loose=0 ) - err_msg = 'Failed to enable source check on interface {ifc}'.format( - ifc=if_name) + err_msg = f"Failed to enable source check on interface {if_name}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -220,12 +218,12 @@ class IPUtil(object): :type interface: str :type addr: str """ - cmd = 'ip_probe_neighbor' + cmd = u"ip_probe_neighbor" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), - dst=str(addr)) - err_msg = 'VPP ip probe {dev} {ip} failed on {h}'.format( - dev=interface, ip=addr, h=node['host']) + dst=str(addr) + ) + err_msg = f"VPP ip probe {interface} {addr} failed on {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -239,16 +237,15 @@ class IPUtil(object): :type ip1: str :type ip2: str """ - addr1 = ip_address(unicode(ip1)) - addr2 = ip_address(unicode(ip2)) + addr1 = ip_address(ip1) + addr2 = ip_address(ip2) if addr1 != addr2: - raise AssertionError('IP addresses are not equal: {0} != {1}'. - format(ip1, ip2)) + raise AssertionError(f"IP addresses are not equal: {ip1} != {ip2}") @staticmethod - def setup_network_namespace(node, namespace_name, interface_name, - ip_addr, prefix): + def setup_network_namespace( + node, namespace_name, interface_name, ip_addr, prefix): """Setup namespace on given node and attach interface and IP to this namespace. Applicable also on TG node. @@ -263,19 +260,18 @@ class IPUtil(object): :type ip_addr: str :type prefix: int """ - cmd = ('ip netns add {0}'.format(namespace_name)) + cmd = f"ip netns add {namespace_name}" exec_cmd_no_error(node, cmd, sudo=True) - cmd = ('ip link set dev {0} up netns {1}'.format(interface_name, - namespace_name)) + cmd = f"ip link set dev {interface_name} up netns {namespace_name}" exec_cmd_no_error(node, cmd, sudo=True) - cmd = ('ip netns exec {0} ip addr add {1}/{2} dev {3}'.format( - namespace_name, ip_addr, prefix, interface_name)) + cmd = f"ip netns exec {namespace_name} ip addr add {ip_addr}/{prefix}" \ + f" dev {interface_name}" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def linux_enable_forwarding(node, ip_ver='ipv4'): + def linux_enable_forwarding(node, ip_ver=u"ipv4"): """Enable forwarding on a Linux node, e.g. VM. :param node: VPP node. @@ -283,7 +279,7 @@ class IPUtil(object): :type node: dict :type ip_ver: str """ - cmd = 'sysctl -w net.{0}.ip_forward=1'.format(ip_ver) + cmd = f"sysctl -w net.{ip_ver}.ip_forward=1" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod @@ -298,15 +294,16 @@ class IPUtil(object): :rtype: str :raises RuntimeError: If cannot get the information about interfaces. """ - regex_intf_info = r"pci@" \ - r"([0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f])\s*" \ - r"([a-zA-Z0-9]*)\s*network" + regex_intf_info = \ + r"pci@([0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f])\s" \ + r"*([a-zA-Z0-9]*)\s*network" - cmd = "lshw -class network -businfo" + cmd = u"lshw -class network -businfo" ret_code, stdout, stderr = exec_cmd(node, cmd, timeout=30, sudo=True) if ret_code != 0: - raise RuntimeError('Could not get information about interfaces:\n' - '{err}'.format(err=stderr)) + raise RuntimeError( + f"Could not get information about interfaces:\n{stderr}" + ) for line in stdout.splitlines()[2:]: try: @@ -326,12 +323,12 @@ class IPUtil(object): :type interface: str :raises RuntimeError: If the interface could not be set up. """ - cmd = "ip link set {0} up".format(interface) + cmd = f"ip link set {interface} up" exec_cmd_no_error(node, cmd, timeout=30, sudo=True) @staticmethod - def set_linux_interface_ip(node, interface, ip_addr, prefix, - namespace=None): + def set_linux_interface_ip( + node, interface, ip_addr, prefix, namespace=None): """Set IP address to interface in linux. :param node: VPP/TG node. @@ -347,11 +344,10 @@ class IPUtil(object): :raises RuntimeError: IP could not be set. """ if namespace is not None: - cmd = 'ip netns exec {ns} ip addr add {ip}/{p} dev {dev}'.format( - ns=namespace, ip=ip_addr, p=prefix, dev=interface) + cmd = f"ip netns exec {namespace} ip addr add {ip_addr}/{prefix}" \ + f" dev {interface}" else: - cmd = 'ip addr add {ip}/{p} dev {dev}'.format( - ip=ip_addr, p=prefix, dev=interface) + cmd = f"ip addr add {ip_addr}/{prefix} dev {interface}" exec_cmd_no_error(node, cmd, timeout=5, sudo=True) @@ -371,15 +367,16 @@ class IPUtil(object): :type namespace: str """ if namespace is not None: - cmd = 'ip netns exec {} ip route add {}/{} via {}'.format( - namespace, ip_addr, prefix, gateway) + cmd = f"ip netns exec {namespace} ip route add {ip_addr}/{prefix}" \ + f" via {gateway}" else: - cmd = 'ip route add {}/{} via {}'.format(ip_addr, prefix, gateway) + cmd = f"ip route add {ip_addr}/{prefix} via {gateway}" + exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def vpp_interface_set_ip_address(node, interface, address, - prefix_length=None): + def vpp_interface_set_ip_address( + node, interface, address, prefix_length=None): """Set IP address to VPP interface. :param node: VPP node. @@ -391,9 +388,9 @@ class IPUtil(object): :type address: str :type prefix_length: int """ - ip_addr = ip_address(unicode(address)) + ip_addr = ip_address(address) - cmd = 'sw_interface_add_del_address' + cmd = u"sw_interface_add_del_address" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), is_add=True, @@ -401,10 +398,11 @@ class IPUtil(object): prefix=IPUtil.create_prefix_object( ip_addr, prefix_length if prefix_length else 128 - if ip_addr.version == 6 else 32) + if ip_addr.version == 6 else 32 + ) ) - err_msg = 'Failed to add IP address on interface {ifc}'.format( - ifc=interface) + err_msg = f"Failed to add IP address on interface {interface}" + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -421,19 +419,21 @@ class IPUtil(object): :type ip_addr: str :type mac_address: str """ - dst_ip = ip_address(unicode(ip_addr)) + dst_ip = ip_address(ip_addr) neighbor = dict( sw_if_index=Topology.get_interface_sw_index(node, iface_key), flags=0, mac_address=str(mac_address), - ip_address=str(dst_ip)) - cmd = 'ip_neighbor_add_del' + ip_address=str(dst_ip) + ) + cmd = u"ip_neighbor_add_del" args = dict( is_add=1, - neighbor=neighbor) - err_msg = 'Failed to add IP neighbor on interface {ifc}'.format( - ifc=iface_key) + neighbor=neighbor + ) + err_msg = f"Failed to add IP neighbor on interface {iface_key}" + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -460,9 +460,11 @@ class IPUtil(object): """ return dict( af=getattr( - AddressFamily, 'ADDRESS_IP6' if ip_addr.version == 6 - else 'ADDRESS_IP4').value, - un=IPUtil.union_addr(ip_addr)) + AddressFamily, u"ADDRESS_IP6" if ip_addr.version == 6 + else u"ADDRESS_IP4" + ).value, + un=IPUtil.union_addr(ip_addr) + ) @staticmethod def create_prefix_object(ip_addr, addr_len): @@ -508,36 +510,37 @@ class IPUtil(object): :returns: route parameter basic structure :rtype: dict """ - interface = kwargs.get('interface', '') - gateway = kwargs.get('gateway', '') + interface = kwargs.get(u"interface", u"") + gateway = kwargs.get(u"gateway", u"") - net_addr = ip_address(unicode(network)) + net_addr = ip_address(network) prefix = IPUtil.create_prefix_object(net_addr, prefix_len) paths = list() n_hop = dict( - address=IPUtil.union_addr(ip_address(unicode(gateway))) if gateway - else 0, + address=IPUtil.union_addr(ip_address(gateway)) if gateway else 0, via_label=MPLS_LABEL_INVALID, obj_id=Constants.BITWISE_NON_ZERO ) path = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface) if interface else Constants.BITWISE_NON_ZERO, - table_id=int(kwargs.get('lookup_vrf', 0)), + table_id=int(kwargs.get(u"lookup_vrf", 0)), rpf_id=Constants.BITWISE_NON_ZERO, - weight=int(kwargs.get('weight', 1)), + weight=int(kwargs.get(u"weight", 1)), preference=1, type=getattr( - FibPathType, 'FIB_PATH_TYPE_LOCAL' - if kwargs.get('local', False) - else 'FIB_PATH_TYPE_NORMAL').value, - flags=getattr(FibPathFlags, 'FIB_PATH_FLAG_NONE').value, + FibPathType, u"FIB_PATH_TYPE_LOCAL" + if kwargs.get(u"local", False) + else u"FIB_PATH_TYPE_NORMAL" + ).value, + flags=getattr(FibPathFlags, u"FIB_PATH_FLAG_NONE").value, proto=getattr( - FibPathNhProto, 'FIB_PATH_NH_PROTO_IP6' + FibPathNhProto, u"FIB_PATH_NH_PROTO_IP6" if net_addr.version == 6 - else 'FIB_PATH_NH_PROTO_IP4').value, + else u"FIB_PATH_NH_PROTO_IP4" + ).value, nh=n_hop, n_labels=0, label_stack=list(0 for _ in range(16)) @@ -545,7 +548,7 @@ class IPUtil(object): paths.append(path) route = dict( - table_id=int(kwargs.get('vrf', 0)), + table_id=int(kwargs.get(u"vrf", 0)), prefix=prefix, n_paths=len(paths), paths=paths @@ -576,43 +579,45 @@ class IPUtil(object): :type prefix_len: int :type kwargs: dict """ - count = kwargs.get("count", 1) + count = kwargs.get(u"count", 1) if count > 100: - gateway = kwargs.get("gateway", '') - interface = kwargs.get("interface", '') - vrf = kwargs.get("vrf", None) - multipath = kwargs.get("multipath", False) + gateway = kwargs.get(u"gateway", '') + interface = kwargs.get(u"interface", '') + vrf = kwargs.get(u"vrf", None) + multipath = kwargs.get(u"multipath", False) with VatTerminal(node, json_param=False) as vat: + vat.vat_terminal_exec_cmd_from_template( - 'vpp_route_add.vat', + u"vpp_route_add.vat", network=network, prefix_length=prefix_len, - via='via {}'.format(gateway) if gateway else '', - sw_if_index='sw_if_index {}'.format( - InterfaceUtil.get_interface_index(node, interface)) - if interface else '', - vrf='vrf {}'.format(vrf) if vrf else '', - count='count {}'.format(count) if count else '', - multipath='multipath' if multipath else '') + via=f"via {gateway}" if gateway else u"", + sw_if_index=f"sw_if_index " + f"{InterfaceUtil.get_interface_index(node, interface)}" + if interface else u"", + vrf=f"vrf {vrf}" if vrf else u"", + count=f"count {count}" if count else u"", + multipath=u"multipath" if multipath else u"" + ) return - net_addr = ip_address(unicode(network)) - cmd = 'ip_route_add_del' + net_addr = ip_address(network) + cmd = u"ip_route_add_del" args = dict( is_add=1, - is_multipath=int(kwargs.get('multipath', False)), + is_multipath=int(kwargs.get(u"multipath", False)), route=None ) + err_msg = f"Failed to add route(s) on host {node[u'host']}" - err_msg = 'Failed to add route(s) on host {host}'.format( - host=node['host']) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(kwargs.get('count', 1)): - args['route'] = IPUtil.compose_vpp_route_structure( - node, net_addr + i, prefix_len, **kwargs) - history = False if 1 < i < kwargs.get('count', 1) else True + for i in range(kwargs.get(u"count", 1)): + args[u"route"] = IPUtil.compose_vpp_route_structure( + node, net_addr + i, prefix_len, **kwargs + ) + history = bool(not 1 < i < kwargs.get(u"count", 1)) papi_exec.add(cmd, history=history, **args) papi_exec.get_replies(err_msg) @@ -625,14 +630,14 @@ class IPUtil(object): :type node: dict :type interface: str """ - cmd = 'sw_interface_add_del_address' + cmd = u"sw_interface_add_del_address" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), is_add=False, del_all=True ) - err_msg = 'Failed to flush IP address on interface {ifc}'.format( - ifc=interface) + err_msg = f"Failed to flush IP address on interface {interface}" + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -647,14 +652,16 @@ class IPUtil(object): :type table_id: int :type ipv6: bool """ - cmd = 'ip_table_add_del' + cmd = u"ip_table_add_del" table = dict( table_id=int(table_id), - is_ip6=int(ipv6)) + is_ip6=int(ipv6) + ) args = dict( table=table, - is_add=1) - err_msg = 'Failed to add FIB table on host {host}'.format( - host=node['host']) + is_add=1 + ) + err_msg = f"Failed to add FIB table on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py index 4578b43987..9237769d45 100644 --- a/resources/libraries/python/IPsecUtil.py +++ b/resources/libraries/python/IPsecUtil.py @@ -15,10 +15,11 @@ import os +from enum import Enum, IntEnum +from io import open from random import choice -from string import letters +from string import ascii_letters -from enum import Enum, IntEnum from ipaddress import ip_network, ip_address from resources.libraries.python.IPUtil import IPUtil @@ -35,16 +36,18 @@ def gen_key(length): :param length: Length of generated payload. :type length: int :returns: The generated payload. - :rtype: str + :rtype: bytes """ - return ''.join(choice(letters) for _ in range(length)) + return u"".join( + choice(ascii_letters) for _ in range(length) + ).encode(encoding=u"utf-8") class PolicyAction(Enum): """Policy actions.""" - BYPASS = ('bypass', 0) - DISCARD = ('discard', 1) - PROTECT = ('protect', 3) + BYPASS = (u"bypass", 0) + DISCARD = (u"discard", 1) + PROTECT = (u"protect", 3) def __init__(self, policy_name, policy_int_repr): self.policy_name = policy_name @@ -53,10 +56,10 @@ class PolicyAction(Enum): class CryptoAlg(Enum): """Encryption algorithms.""" - AES_CBC_128 = ('aes-cbc-128', 1, 'AES-CBC', 16) - AES_CBC_256 = ('aes-cbc-256', 3, 'AES-CBC', 32) - AES_GCM_128 = ('aes-gcm-128', 7, 'AES-GCM', 16) - AES_GCM_256 = ('aes-gcm-256', 9, 'AES-GCM', 32) + AES_CBC_128 = (u"aes-cbc-128", 1, u"AES-CBC", 16) + AES_CBC_256 = (u"aes-cbc-256", 3, u"AES-CBC", 32) + AES_GCM_128 = (u"aes-gcm-128", 7, u"AES-GCM", 16) + AES_GCM_256 = (u"aes-gcm-256", 9, u"AES-GCM", 32) def __init__(self, alg_name, alg_int_repr, scapy_name, key_len): self.alg_name = alg_name @@ -67,8 +70,8 @@ class CryptoAlg(Enum): class IntegAlg(Enum): """Integrity algorithm.""" - SHA_256_128 = ('sha-256-128', 4, 'SHA2-256-128', 32) - SHA_512_256 = ('sha-512-256', 6, 'SHA2-512-256', 64) + SHA_256_128 = (u"sha-256-128", 4, u"SHA2-256-128", 32) + SHA_512_256 = (u"sha-512-256", 6, u"SHA2-512-256", 64) def __init__(self, alg_name, alg_int_repr, scapy_name, key_len): self.alg_name = alg_name @@ -90,7 +93,7 @@ class IPsecSadFlags(IntEnum): IPSEC_API_SAD_FLAG_IS_TUNNEL_V6 = 8 -class IPsecUtil(object): +class IPsecUtil: """IPsec utilities.""" @staticmethod @@ -249,9 +252,8 @@ class IPsecUtil(object): :raises RuntimeError: If failed to select IPsec backend or if no API reply received. """ - cmd = 'ipsec_select_backend' - err_msg = 'Failed to select IPsec backend on host {host}'.format( - host=node['host']) + cmd = u"ipsec_select_backend" + err_msg = f"Failed to select IPsec backend on host {node[u'host']}" args = dict( protocol=protocol, index=index @@ -266,15 +268,14 @@ class IPsecUtil(object): :param node: VPP node to dump IPsec backend on. :type node: dict """ - err_msg = 'Failed to dump IPsec backends on host {host}'.format( - host=node['host']) + err_msg = f"Failed to dump IPsec backends on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: - papi_exec.add('ipsec_backend_dump').get_details(err_msg) + papi_exec.add(u"ipsec_backend_dump").get_details(err_msg) @staticmethod def vpp_ipsec_add_sad_entry( node, sad_id, spi, crypto_alg, crypto_key, integ_alg=None, - integ_key='', tunnel_src=None, tunnel_dst=None): + integ_key=u"", tunnel_src=None, tunnel_dst=None): """Create Security Association Database entry on the VPP node. :param node: VPP node to add SAD entry on. @@ -298,6 +299,10 @@ class IPsecUtil(object): :type tunnel_src: str :type tunnel_dst: str """ + if isinstance(crypto_key, str): + crypto_key = crypto_key.encode(encoding=u"utf-8") + if isinstance(integ_key, str): + integ_key = integ_key.encode(encoding=u"utf-8") ckey = dict( length=len(crypto_key), data=crypto_key @@ -310,18 +315,18 @@ class IPsecUtil(object): flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE) if tunnel_src and tunnel_dst: flags = flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL) - src_addr = ip_address(unicode(tunnel_src)) - dst_addr = ip_address(unicode(tunnel_dst)) + src_addr = ip_address(tunnel_src) + dst_addr = ip_address(tunnel_dst) if src_addr.version == 6: flags = \ flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6) else: - src_addr = '' - dst_addr = '' + src_addr = u"" + dst_addr = u"" - cmd = 'ipsec_sad_entry_add_del' - err_msg = 'Failed to add Security Association Database entry on ' \ - 'host {host}'.format(host=node['host']) + cmd = u"ipsec_sad_entry_add_del" + err_msg = f"Failed to add Security Association Database entry " \ + f"on host {node[u'host']}" sad_entry = dict( sad_id=int(sad_id), spi=int(spi), @@ -344,7 +349,7 @@ class IPsecUtil(object): @staticmethod def vpp_ipsec_add_sad_entries( node, n_entries, sad_id, spi, crypto_alg, crypto_key, - integ_alg=None, integ_key='', tunnel_src=None, tunnel_dst=None): + integ_alg=None, integ_key=u"", tunnel_src=None, tunnel_dst=None): """Create multiple Security Association Database entries on VPP node. :param node: VPP node to add SAD entry on. @@ -372,46 +377,41 @@ class IPsecUtil(object): :type tunnel_src: str :type tunnel_dst: str """ + if isinstance(crypto_key, str): + crypto_key = crypto_key.encode(encoding=u"utf-8") + if isinstance(integ_key, str): + integ_key = integ_key.encode(encoding=u"utf-8") if tunnel_src and tunnel_dst: - src_addr = ip_address(unicode(tunnel_src)) - dst_addr = ip_address(unicode(tunnel_dst)) + src_addr = ip_address(tunnel_src) + dst_addr = ip_address(tunnel_dst) else: - src_addr = '' - dst_addr = '' + src_addr = u"" + dst_addr = u"" addr_incr = 1 << (128 - 96) if src_addr.version == 6 \ else 1 << (32 - 24) if int(n_entries) > 10: - tmp_filename = '/tmp/ipsec_sad_{0}_add_del_entry.script'.\ - format(sad_id) + tmp_filename = f"/tmp/ipsec_sad_{sad_id}_add_del_entry.script" with open(tmp_filename, 'w') as tmp_file: - for i in xrange(n_entries): - integ = ( - 'integ-alg {integ_alg} integ-key {integ_key}'.format( - integ_alg=integ_alg.alg_name, - integ_key=integ_key.encode('hex')) - if integ_alg else '') - tunnel = ( - 'tunnel-src {laddr} tunnel-dst {raddr}'.format( - laddr=src_addr + i * addr_incr, - raddr=dst_addr + i * addr_incr) - if tunnel_src and tunnel_dst else '') - conf = ( - 'exec ipsec sa add {sad_id} esp spi {spi} ' - 'crypto-alg {crypto_alg} crypto-key {crypto_key} ' - '{integ} {tunnel}\n'.format( - sad_id=sad_id + i, - spi=spi + i, - crypto_alg=crypto_alg.alg_name, - crypto_key=crypto_key.encode('hex'), - integ=integ, - tunnel=tunnel)) + for i in range(n_entries): + integ = f"integ-alg {integ_alg.alg_name} " \ + f"integ-key {integ_key.hex()}" \ + if integ_alg else u"" + tunnel = f"tunnel-src {src_addr + i * addr_incr} " \ + f"tunnel-dst {dst_addr + i * addr_incr}" \ + if tunnel_src and tunnel_dst else u"" + conf = f"exec ipsec sa add {sad_id + i} esp spi {spi + i} "\ + f"crypto-alg {crypto_alg.alg_name} " \ + f"crypto-key {crypto_key.hex()} " \ + f"{integ} {tunnel}\n" tmp_file.write(conf) vat = VatExecutor() - vat.execute_script(tmp_filename, node, timeout=300, json_out=False, - copy_on_execute=True) + vat.execute_script( + tmp_filename, node, timeout=300, json_out=False, + copy_on_execute=True + ) os.remove(tmp_filename) return @@ -429,11 +429,12 @@ class IPsecUtil(object): flags = flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL) if src_addr.version == 6: flags = flags | int( - IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6) + IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6 + ) - cmd = 'ipsec_sad_entry_add_del' - err_msg = 'Failed to add Security Association Database entry on ' \ - 'host {host}'.format(host=node['host']) + cmd = u"ipsec_sad_entry_add_del" + err_msg = f"Failed to add Security Association Database entry " \ + f"on host {node[u'host']}" sad_entry = dict( sad_id=int(sad_id), @@ -452,14 +453,14 @@ class IPsecUtil(object): entry=sad_entry ) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(n_entries): - args['entry']['sad_id'] = int(sad_id) + i - args['entry']['spi'] = int(spi) + i - args['entry']['tunnel_src'] = str(src_addr + i * addr_incr) \ + for i in range(n_entries): + args[u"entry"][u"sad_id"] = int(sad_id) + i + args[u"entry"][u"spi"] = int(spi) + i + args[u"entry"][u"tunnel_src"] = str(src_addr + i * addr_incr) \ if tunnel_src and tunnel_dst else src_addr - args['entry']['tunnel_dst'] = str(dst_addr + i * addr_incr) \ + args[u"entry"][u"tunnel_dst"] = str(dst_addr + i * addr_incr) \ if tunnel_src and tunnel_dst else dst_addr - history = False if 1 < i < n_entries - 1 else True + history = bool(not 1 < i < n_entries - 2) papi_exec.add(cmd, history=history, **args) papi_exec.get_replies(err_msg) @@ -486,65 +487,59 @@ class IPsecUtil(object): :type interface: str :type raddr_range: int """ - laddr = ip_address(unicode(tunnel_src)) - raddr = ip_address(unicode(tunnel_dst)) - taddr = ip_address(unicode(traffic_addr)) + laddr = ip_address(tunnel_src) + raddr = ip_address(tunnel_dst) + taddr = ip_address(traffic_addr) addr_incr = 1 << (128 - raddr_range) if laddr.version == 6 \ else 1 << (32 - raddr_range) if int(n_tunnels) > 10: - tmp_filename = '/tmp/ipsec_set_ip.script' + tmp_filename = u"/tmp/ipsec_set_ip.script" with open(tmp_filename, 'w') as tmp_file: - for i in xrange(n_tunnels): - conf = ( - 'exec set interface ip address {interface} ' - '{laddr}/{laddr_l}\n' - 'exec ip route add {taddr}/{taddr_l} via {raddr} ' - '{interface}\n'.format( - interface=Topology.get_interface_name( - node, interface), - laddr=laddr + i * addr_incr, - laddr_l=raddr_range, - raddr=raddr + i * addr_incr, - taddr=taddr + i, - taddr_l=128 if taddr.version == 6 else 32)) + if_name = Topology.get_interface_name(node, interface) + for i in range(n_tunnels): + conf = f"exec set interface ip address {if_name} " \ + f"{laddr + i * addr_incr}/{raddr_range}\n" \ + f"exec ip route add {taddr + i}/" \ + f"{128 if taddr.version == 6 else 32} " \ + f"via {raddr + i * addr_incr} {if_name}\n" tmp_file.write(conf) vat = VatExecutor() - vat.execute_script(tmp_filename, node, timeout=300, json_out=False, - copy_on_execute=True) + vat.execute_script( + tmp_filename, node, timeout=300, json_out=False, + copy_on_execute=True + ) os.remove(tmp_filename) return - cmd1 = 'sw_interface_add_del_address' + cmd1 = u"sw_interface_add_del_address" args1 = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), is_add=True, del_all=False, prefix=None ) - cmd2 = 'ip_route_add_del' + cmd2 = u"ip_route_add_del" args2 = dict( is_add=1, is_multipath=0, route=None ) - err_msg = 'Failed to configure IP addresses and IP routes on ' \ - 'interface {ifc} on host {host}'.\ - format(ifc=interface, host=node['host']) + err_msg = f"Failed to configure IP addresses and IP routes " \ + f"on interface {interface} on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: - for i in xrange(n_tunnels): - args1['prefix'] = IPUtil.create_prefix_object( - laddr + i * addr_incr, raddr_range) - args2['route'] = IPUtil.compose_vpp_route_structure( - node, - taddr + i, + for i in range(n_tunnels): + args1[u"prefix"] = IPUtil.create_prefix_object( + laddr + i * addr_incr, raddr_range + ) + args2[u"route"] = IPUtil.compose_vpp_route_structure( + node, taddr + i, prefix_len=128 if taddr.version == 6 else 32, - interface=interface, - gateway=raddr + i * addr_incr + interface=interface, gateway=raddr + i * addr_incr ) - history = False if 1 < i < n_tunnels - 1 else True + history = bool(not 1 < i < n_tunnels - 2) papi_exec.add(cmd1, history=history, **args1).\ add(cmd2, history=history, **args2) papi_exec.get_replies(err_msg) @@ -558,9 +553,9 @@ class IPsecUtil(object): :type node: dict :type spd_id: int """ - cmd = 'ipsec_spd_add_del' - err_msg = 'Failed to add Security Policy Database on host {host}'.\ - format(host=node['host']) + cmd = u"ipsec_spd_add_del" + err_msg = f"Failed to add Security Policy Database " \ + f"on host {node[u'host']}" args = dict( is_add=1, spd_id=int(spd_id) @@ -579,10 +574,9 @@ class IPsecUtil(object): :type spd_id: int :type interface: str or int """ - cmd = 'ipsec_interface_add_del_spd' - err_msg = 'Failed to add interface {ifc} to Security Policy Database ' \ - '{spd} on host {host}'.\ - format(ifc=interface, spd=spd_id, host=node['host']) + cmd = u"ipsec_interface_add_del_spd" + err_msg = f"Failed to add interface {interface} to Security Policy " \ + f"Database {spd_id} on host {node[u'host']}" args = dict( is_add=1, sw_if_index=InterfaceUtil.get_interface_index(node, interface), @@ -631,16 +625,15 @@ class IPsecUtil(object): :type rport_range: string :type is_ipv6: bool """ - if laddr_range is None: - laddr_range = '::/0' if is_ipv6 else '0.0.0.0/0' + laddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0" if raddr_range is None: - raddr_range = '::/0' if is_ipv6 else '0.0.0.0/0' + raddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0" - cmd = 'ipsec_spd_entry_add_del' - err_msg = 'Failed to add entry to Security Policy Database ' \ - '{spd} on host {host}'.format(spd=spd_id, host=node['host']) + cmd = u"ipsec_spd_entry_add_del" + err_msg = f"Failed to add entry to Security Policy Database {spd_id} " \ + f"on host {node[u'host']}" spd_entry = dict( spd_id=int(spd_id), @@ -650,23 +643,24 @@ class IPsecUtil(object): policy=action.policy_int_repr, protocol=int(proto) if proto else 0, remote_address_start=IPUtil.create_ip_address_object( - ip_network(unicode(raddr_range), strict=False).network_address), + ip_network(raddr_range, strict=False).network_address + ), remote_address_stop=IPUtil.create_ip_address_object( - ip_network( - unicode(raddr_range), strict=False).broadcast_address), + ip_network(raddr_range, strict=False).broadcast_address + ), local_address_start=IPUtil.create_ip_address_object( - ip_network( - unicode(laddr_range), strict=False).network_address), + ip_network(laddr_range, strict=False).network_address + ), local_address_stop=IPUtil.create_ip_address_object( - ip_network( - unicode(laddr_range), strict=False).broadcast_address), - remote_port_start=int(rport_range.split('-')[0]) if rport_range + ip_network(laddr_range, strict=False).broadcast_address + ), + remote_port_start=int(rport_range.split(u"-")[0]) if rport_range else 0, - remote_port_stop=int(rport_range.split('-')[1]) if rport_range + remote_port_stop=int(rport_range.split(u"-")[1]) if rport_range else 65535, - local_port_start=int(lport_range.split('-')[0]) if lport_range + local_port_start=int(lport_range.split(u"-")[0]) if lport_range else 0, - local_port_stop=int(lport_range.split('-')[1]) if rport_range + local_port_stop=int(lport_range.split(u"-")[1]) if rport_range else 65535 ) args = dict( @@ -678,7 +672,8 @@ class IPsecUtil(object): @staticmethod def vpp_ipsec_spd_add_entries( - node, n_entries, spd_id, priority, inbound, sa_id, raddr_ip): + node, n_entries, spd_id, priority, inbound, sa_id, raddr_ip, + raddr_range=0): """Create multiple Security Policy Database entries on the VPP node. :param node: VPP node to add SPD entries on. @@ -693,47 +688,42 @@ class IPsecUtil(object): entry. Remote IPv4 end address will be calculated depending on raddr_range parameter. Each subsequent entry will have start address next after IPv4 end address of previous entry. + :param raddr_range: Required IP addres range. :type node: dict :type n_entries: int :type spd_id: int :type priority: int :type inbound: bool :type sa_id: int - :type raddr_ip: string + :type raddr_ip: str + :type raddr_range: int """ + raddr_ip = ip_address(raddr_ip) if int(n_entries) > 10: - tmp_filename = '/tmp/ipsec_spd_{0}_add_del_entry.script'.\ - format(sa_id) + tmp_filename = f"/tmp/ipsec_spd_{sa_id}_add_del_entry.script" with open(tmp_filename, 'w') as tmp_file: - for i in xrange(n_entries): - raddr_s = ip_address(unicode(raddr_ip)) + i - raddr_e = ip_address(unicode(raddr_ip)) + (i + 1) - 1 - tunnel = ( - 'exec ipsec policy add spd {spd_id} ' - 'priority {priority} {direction} action protect ' - 'sa {sa_id} remote-ip-range {raddr_s} - {raddr_e} ' - 'local-ip-range 0.0.0.0 - 255.255.255.255\n'. - format( - spd_id=spd_id, - priority=priority, - direction='inbound' if inbound else 'outbound', - sa_id=sa_id+i, - raddr_s=raddr_s, - raddr_e=raddr_e)) + for i in range(n_entries): + direction = u'inbound' if inbound else u'outbound' + tunnel = f"exec ipsec policy add spd {spd_id} " \ + f"priority {priority} {direction} " \ + f"action protect sa {sa_id+i} " \ + f"remote-ip-range {raddr_ip + i * (raddr_range + 1)} " \ + f"- {raddr_ip + (i + 1) * raddr_range + i} " \ + f"local-ip-range 0.0.0.0 - 255.255.255.255\n" tmp_file.write(tunnel) VatExecutor().execute_script( tmp_filename, node, timeout=300, json_out=False, - copy_on_execute=True) + copy_on_execute=True + ) os.remove(tmp_filename) return - raddr_ip = ip_address(unicode(raddr_ip)) - laddr_range = '::/0' if raddr_ip.version == 6 else '0.0.0.0/0' + laddr_range = u"::/0" if raddr_ip.version == 6 else u"0.0.0.0/0" - cmd = 'ipsec_spd_entry_add_del' - err_msg = 'Failed to add entry to Security Policy Database ' \ - '{spd} on host {host}'.format(spd=spd_id, host=node['host']) + cmd = u"ipsec_spd_entry_add_del" + err_msg = f"ailed to add entry to Security Policy Database '{spd_id} " \ + f"on host {node[u'host']}" spd_entry = dict( spd_id=int(spd_id), @@ -745,10 +735,11 @@ class IPsecUtil(object): remote_address_start=IPUtil.create_ip_address_object(raddr_ip), remote_address_stop=IPUtil.create_ip_address_object(raddr_ip), local_address_start=IPUtil.create_ip_address_object( - ip_network(unicode(laddr_range), strict=False).network_address), + ip_network(laddr_range, strict=False).network_address + ), local_address_stop=IPUtil.create_ip_address_object( - ip_network( - unicode(laddr_range), strict=False).broadcast_address), + ip_network(laddr_range, strict=False).broadcast_address + ), remote_port_start=0, remote_port_stop=65535, local_port_start=0, @@ -760,12 +751,12 @@ class IPsecUtil(object): ) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(n_entries): - args['entry']['remote_address_start']['un'] = \ + for i in range(n_entries): + args[u"entry"][u"remote_address_start"][u"un"] = \ IPUtil.union_addr(raddr_ip + i) - args['entry']['remote_address_stop']['un'] = \ + args[u"entry"][u"remote_address_stop"][u"un"] = \ IPUtil.union_addr(raddr_ip + i) - history = False if 1 < i < n_entries - 1 else True + history = bool(not 1 < i < n_entries - 2) papi_exec.add(cmd, history=history, **args) papi_exec.get_replies(err_msg) @@ -805,187 +796,158 @@ class IPsecUtil(object): n_tunnels = int(n_tunnels) spi_1 = 100000 spi_2 = 200000 - if1_ip = ip_address(unicode(if1_ip_addr)) - if2_ip = ip_address(unicode(if2_ip_addr)) - raddr_ip1 = ip_address(unicode(raddr_ip1)) - raddr_ip2 = ip_address(unicode(raddr_ip2)) + if1_ip = ip_address(if1_ip_addr) + if2_ip = ip_address(if2_ip_addr) + raddr_ip1 = ip_address(raddr_ip1) + raddr_ip2 = ip_address(raddr_ip2) addr_incr = 1 << (128 - raddr_range) if if1_ip.version == 6 \ else 1 << (32 - raddr_range) if n_tunnels > 10: - tmp_fn1 = '/tmp/ipsec_create_tunnel_dut1.config' - tmp_fn2 = '/tmp/ipsec_create_tunnel_dut2.config' + tmp_fn1 = u"/tmp/ipsec_create_tunnel_dut1.config" + tmp_fn2 = u"/tmp/ipsec_create_tunnel_dut2.config" + if1_n = Topology.get_interface_name(nodes[u"DUT1"], if1_key) + if2_n = Topology.get_interface_name(nodes[u"DUT2"], if2_key) + mask = 96 if if2_ip.version == 6 else 24 + mask2 = 128 if if2_ip.version == 6 else 32 vat = VatExecutor() with open(tmp_fn1, 'w') as tmp_f1, open(tmp_fn2, 'w') as tmp_f2: + rmac = Topology.get_interface_mac(nodes[u"DUT2"], if2_key) tmp_f1.write( - 'exec create loopback interface\n' - 'exec set interface state loop0 up\n' - 'exec set interface ip address {uifc} {iaddr}/{mask}\n' - 'exec set ip arp {uifc} {raddr}/32 {rmac} static\n' - .format( - iaddr=if2_ip - 1, - raddr=if2_ip, - uifc=Topology.get_interface_name( - nodes['DUT1'], if1_key), - rmac=Topology.get_interface_mac( - nodes['DUT2'], if2_key), - mask=96 if if2_ip.version == 6 else 24)) + f"exec create loopback interface\n" + f"exec set interface state loop0 up\n" + f"exec set interface ip address " + f"{if1_n} {if2_ip - 1}/{mask}\n" + f"exec set ip arp {if1_n} {if2_ip}/{mask2} {rmac} static\n" + ) tmp_f2.write( - 'exec set interface ip address {uifc} {iaddr}/{mask}\n' - .format( - iaddr=if2_ip, - uifc=Topology.get_interface_name( - nodes['DUT2'], if2_key), - mask=96 if if2_ip.version == 6 else 24)) - for i in xrange(n_tunnels): - ckey = gen_key(IPsecUtil.get_crypto_alg_key_len( - crypto_alg)).encode('hex') + f"exec set interface ip address {if2_n} {if2_ip}/{mask}\n" + ) + for i in range(n_tunnels): + ckey = gen_key( + IPsecUtil.get_crypto_alg_key_len(crypto_alg) + ).hex() if integ_alg: - ikey = gen_key(IPsecUtil.get_integ_alg_key_len( - integ_alg)).encode('hex') - integ = ( - 'integ_alg {integ_alg} ' - 'local_integ_key {local_integ_key} ' - 'remote_integ_key {remote_integ_key} ' - .format( - integ_alg=integ_alg.alg_name, - local_integ_key=ikey, - remote_integ_key=ikey)) + ikey = gen_key( + IPsecUtil.get_integ_alg_key_len(integ_alg) + ).hex() + integ = f"integ_alg {integ_alg.alg_name} " \ + f"local_integ_key {ikey} remote_integ_key {ikey} " else: - integ = '' + integ = u"" tmp_f1.write( - 'exec set interface ip address loop0 {laddr}/32\n' - 'ipsec_tunnel_if_add_del ' - 'local_spi {local_spi} ' - 'remote_spi {remote_spi} ' - 'crypto_alg {crypto_alg} ' - 'local_crypto_key {local_crypto_key} ' - 'remote_crypto_key {remote_crypto_key} ' - '{integ} ' - 'local_ip {laddr} ' - 'remote_ip {raddr}\n' - .format( - local_spi=spi_1 + i, - remote_spi=spi_2 + i, - crypto_alg=crypto_alg.alg_name, - local_crypto_key=ckey, - remote_crypto_key=ckey, - integ=integ, - laddr=if1_ip + i * addr_incr, - raddr=if2_ip)) + f"exec set interface ip address loop0 " + f"{if1_ip + i * addr_incr}/32\n" + f"ipsec_tunnel_if_add_del " + f"local_spi {spi_1 + i} remote_spi {spi_2 + i} " + f"crypto_alg {crypto_alg.alg_name} " + f"local_crypto_key {ckey} remote_crypto_key {ckey} " + f"{integ} " + f"local_ip {if1_ip + i * addr_incr} " + f"remote_ip {if2_ip}\n" + ) tmp_f2.write( - 'ipsec_tunnel_if_add_del ' - 'local_spi {local_spi} ' - 'remote_spi {remote_spi} ' - 'crypto_alg {crypto_alg} ' - 'local_crypto_key {local_crypto_key} ' - 'remote_crypto_key {remote_crypto_key} ' - '{integ} ' - 'local_ip {laddr} ' - 'remote_ip {raddr}\n' - .format( - local_spi=spi_2 + i, - remote_spi=spi_1 + i, - crypto_alg=crypto_alg.alg_name, - local_crypto_key=ckey, - remote_crypto_key=ckey, - integ=integ, - laddr=if2_ip, - raddr=if1_ip + i * addr_incr)) + f"ipsec_tunnel_if_add_del " + f"local_spi {spi_2 + i} remote_spi {spi_1 + i} " + f"crypto_alg {crypto_alg.alg_name} " + f"local_crypto_key {ckey} remote_crypto_key {ckey} " + f"{integ} " + f"local_ip {if2_ip} " + f"remote_ip {if1_ip + i * addr_incr}\n" + ) vat.execute_script( - tmp_fn1, nodes['DUT1'], timeout=1800, json_out=False, + tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False, copy_on_execute=True, - history=False if n_tunnels > 100 else True) + history=bool(n_tunnels < 100) + ) vat.execute_script( - tmp_fn2, nodes['DUT2'], timeout=1800, json_out=False, + tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False, copy_on_execute=True, - history=False if n_tunnels > 100 else True) + history=bool(n_tunnels < 100) + ) os.remove(tmp_fn1) os.remove(tmp_fn2) with open(tmp_fn1, 'w') as tmp_f1, open(tmp_fn2, 'w') as tmp_f2: + raddr = ip_network(if1_ip_addr + u"/8", False) tmp_f2.write( - 'exec ip route add {raddr} via {uifc} {iaddr}\n' - .format( - raddr=ip_network(unicode(if1_ip_addr+'/8'), False), - iaddr=if2_ip - 1, - uifc=Topology.get_interface_name( - nodes['DUT2'], if2_key))) - for i in xrange(n_tunnels): + f"exec ip route add {raddr} via {if2_n} {if2_ip - 1}\n" + ) + for i in range(n_tunnels): tmp_f1.write( - 'exec set interface unnumbered ipsec{i} use {uifc}\n' - 'exec set interface state ipsec{i} up\n' - 'exec ip route add {taddr}/{mask} via ipsec{i}\n' - .format( - taddr=raddr_ip2 + i, - i=i, - uifc=Topology.get_interface_name(nodes['DUT1'], - if1_key), - mask=128 if if2_ip.version == 6 else 32)) + f"exec set interface unnumbered ipsec{i} use {if1_n}\n" + f"exec set interface state ipsec{i} up\n" + f"exec ip route add {raddr_ip2 + i}/{mask2} " + f"via ipsec{i}\n" + ) tmp_f2.write( - 'exec set interface unnumbered ipsec{i} use {uifc}\n' - 'exec set interface state ipsec{i} up\n' - 'exec ip route add {taddr}/{mask} via ipsec{i}\n' - .format( - taddr=raddr_ip1 + i, - i=i, - uifc=Topology.get_interface_name(nodes['DUT2'], - if2_key), - mask=128 if if2_ip.version == 6 else 32)) + f"exec set interface unnumbered ipsec{i} use {if2_n}\n" + f"exec set interface state ipsec{i} up\n" + f"exec ip route add {raddr_ip1 + i}/{mask2} " + f"via ipsec{i}\n" + ) vat.execute_script( - tmp_fn1, nodes['DUT1'], timeout=1800, json_out=False, + tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False, copy_on_execute=True, - history=False if n_tunnels > 100 else True) + history=bool(n_tunnels < 100) + ) vat.execute_script( - tmp_fn2, nodes['DUT2'], timeout=1800, json_out=False, + tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False, copy_on_execute=True, - history=False if n_tunnels > 100 else True) + history=bool(n_tunnels < 100) + ) os.remove(tmp_fn1) os.remove(tmp_fn2) return - with PapiSocketExecutor(nodes['DUT1']) as papi_exec: + with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec: # Create loopback interface on DUT1, set it to up state - cmd1 = 'create_loopback' - args1 = dict(mac_address=0) - err_msg = 'Failed to create loopback interface on host {host}'.\ - format(host=nodes['DUT1']['host']) + cmd1 = u"create_loopback" + args1 = dict( + mac_address=0 + ) + err_msg = f"Failed to create loopback interface " \ + f"on host {nodes[u'DUT1'][u'host']}" loop_sw_if_idx = papi_exec.add(cmd1, **args1).\ get_sw_if_index(err_msg) - cmd1 = 'sw_interface_set_flags' + cmd1 = u"sw_interface_set_flags" args1 = dict( sw_if_index=loop_sw_if_idx, - flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value) - err_msg = 'Failed to set loopback interface state up on host ' \ - '{host}'.format(host=nodes['DUT1']['host']) + flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value + ) + err_msg = f"Failed to set loopback interface state up " \ + f"on host {nodes[u'DUT1'][u'host']}" papi_exec.add(cmd1, **args1).get_reply(err_msg) # Set IP address on VPP node 1 interface - cmd1 = 'sw_interface_add_del_address' + cmd1 = u"sw_interface_add_del_address" args1 = dict( sw_if_index=InterfaceUtil.get_interface_index( - nodes['DUT1'], if1_key), + nodes[u"DUT1"], if1_key + ), is_add=True, del_all=False, prefix=IPUtil.create_prefix_object( - if2_ip - 1, 96 if if2_ip.version == 6 else 24) + if2_ip - 1, 96 if if2_ip.version == 6 else 24 + ) ) - err_msg = 'Failed to set IP address on interface {ifc} on host ' \ - '{host}'.format(ifc=if1_key, host=nodes['DUT1']['host']) + err_msg = f"Failed to set IP address on interface {if1_key} " \ + f"on host {nodes[u'DUT1'][u'host']}" papi_exec.add(cmd1, **args1).get_reply(err_msg) - cmd4 = 'ip_neighbor_add_del' + cmd4 = u"ip_neighbor_add_del" args4 = dict( is_add=1, neighbor=dict( sw_if_index=Topology.get_interface_sw_index( - nodes['DUT1'], if1_key), + nodes[u"DUT1"], if1_key + ), flags=1, mac_address=str( - Topology.get_interface_mac(nodes['DUT2'], if2_key)), - ip_address=str(ip_address(unicode(if2_ip_addr))) + Topology.get_interface_mac(nodes[u"DUT2"], if2_key) + ), + ip_address=str(ip_address(if2_ip_addr)) ) ) - err_msg = 'Failed to add IP neighbor on interface {ifc}'.format( - ifc=if1_key) + err_msg = f"Failed to add IP neighbor on interface {if1_key}" papi_exec.add(cmd4, **args4).get_reply(err_msg) # Configure IPsec tunnel interfaces args1 = dict( @@ -994,7 +956,7 @@ class IPsecUtil(object): del_all=False, prefix=None ) - cmd2 = 'ipsec_tunnel_if_add_del' + cmd2 = u"ipsec_tunnel_if_add_del" args2 = dict( is_add=1, local_ip=None, @@ -1013,91 +975,98 @@ class IPsecUtil(object): remote_integ_key=None, tx_table_id=0 ) - err_msg = 'Failed to add IPsec tunnel interfaces on host {host}'.\ - format(host=nodes['DUT1']['host']) + err_msg = f"Failed to add IPsec tunnel interfaces " \ + f"on host {nodes[u'DUT1'][u'host']}" ipsec_tunnels = list() ckeys = list() ikeys = list() - for i in xrange(n_tunnels): + for i in range(n_tunnels): ckeys.append( - gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))) + gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)) + ) if integ_alg: ikeys.append( - gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))) - args1['prefix'] = IPUtil.create_prefix_object( - if1_ip + i * addr_incr, 128 if if1_ip.version == 6 else 32) - args2['local_spi'] = spi_1 + i - args2['remote_spi'] = spi_2 + i - args2['local_ip'] = IPUtil.create_ip_address_object( - if1_ip + i * addr_incr) - args2['remote_ip'] = IPUtil.create_ip_address_object(if2_ip) - args2['local_crypto_key_len'] = len(ckeys[i]) - args2['local_crypto_key'] = ckeys[i] - args2['remote_crypto_key_len'] = len(ckeys[i]) - args2['remote_crypto_key'] = ckeys[i] + gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)) + ) + args1[u"prefix"] = IPUtil.create_prefix_object( + if1_ip + i * addr_incr, 128 if if1_ip.version == 6 else 32 + ) + args2[u"local_spi"] = spi_1 + i + args2[u"remote_spi"] = spi_2 + i + args2[u"local_ip"] = IPUtil.create_ip_address_object( + if1_ip + i * addr_incr + ) + args2[u"remote_ip"] = IPUtil.create_ip_address_object(if2_ip) + args2[u"local_crypto_key_len"] = len(ckeys[i]) + args2[u"local_crypto_key"] = ckeys[i] + args2[u"remote_crypto_key_len"] = len(ckeys[i]) + args2[u"remote_crypto_key"] = ckeys[i] if integ_alg: - args2['local_integ_key_len'] = len(ikeys[i]) - args2['local_integ_key'] = ikeys[i] - args2['remote_integ_key_len'] = len(ikeys[i]) - args2['remote_integ_key'] = ikeys[i] - history = False if 1 < i < n_tunnels - 1 else True + args2[u"local_integ_key_len"] = len(ikeys[i]) + args2[u"local_integ_key"] = ikeys[i] + args2[u"remote_integ_key_len"] = len(ikeys[i]) + args2[u"remote_integ_key"] = ikeys[i] + history = bool(not 1 < i < n_tunnels - 2) papi_exec.add(cmd1, history=history, **args1).\ add(cmd2, history=history, **args2) replies = papi_exec.get_replies(err_msg) for reply in replies: - if 'sw_if_index' in reply: - ipsec_tunnels.append(reply["sw_if_index"]) + if u"sw_if_index" in reply: + ipsec_tunnels.append(reply[u"sw_if_index"]) # Configure IP routes - cmd1 = 'sw_interface_set_unnumbered' + cmd1 = u"sw_interface_set_unnumbered" args1 = dict( is_add=True, sw_if_index=InterfaceUtil.get_interface_index( - nodes['DUT1'], if1_key), + nodes[u"DUT1"], if1_key + ), unnumbered_sw_if_index=0 ) - cmd2 = 'sw_interface_set_flags' + cmd2 = u"sw_interface_set_flags" args2 = dict( sw_if_index=0, - flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value) - cmd3 = 'ip_route_add_del' + flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value + ) + cmd3 = u"ip_route_add_del" args3 = dict( is_add=1, is_multipath=0, route=None ) - err_msg = 'Failed to add IP routes on host {host}'.format( - host=nodes['DUT1']['host']) - for i in xrange(n_tunnels): - args1['unnumbered_sw_if_index'] = ipsec_tunnels[i] - args2['sw_if_index'] = ipsec_tunnels[i] - args3['route'] = IPUtil.compose_vpp_route_structure( - nodes['DUT1'], - (raddr_ip2 + i).compressed, + err_msg = f"Failed to add IP routes " \ + f"on host {nodes[u'DUT1'][u'host']}" + for i in range(n_tunnels): + args1[u"unnumbered_sw_if_index"] = ipsec_tunnels[i] + args2[u"sw_if_index"] = ipsec_tunnels[i] + args3[u"route"] = IPUtil.compose_vpp_route_structure( + nodes[u"DUT1"], (raddr_ip2 + i).compressed, prefix_len=128 if raddr_ip2.version == 6 else 32, interface=ipsec_tunnels[i] ) - history = False if 1 < i < n_tunnels - 1 else True + history = bool(not 1 < i < n_tunnels - 2) papi_exec.add(cmd1, history=history, **args1).\ add(cmd2, history=history, **args2).\ add(cmd3, history=history, **args3) papi_exec.get_replies(err_msg) - with PapiSocketExecutor(nodes['DUT2']) as papi_exec: + with PapiSocketExecutor(nodes[u"DUT2"]) as papi_exec: # Set IP address on VPP node 2 interface - cmd1 = 'sw_interface_add_del_address' + cmd1 = u"sw_interface_add_del_address" args1 = dict( sw_if_index=InterfaceUtil.get_interface_index( - nodes['DUT2'], if2_key), + nodes[u"DUT2"], if2_key + ), is_add=True, del_all=False, prefix=IPUtil.create_prefix_object( - if2_ip, 96 if if2_ip.version == 6 else 24) + if2_ip, 96 if if2_ip.version == 6 else 24 + ) ) - err_msg = 'Failed to set IP address on interface {ifc} on host ' \ - '{host}'.format(ifc=if2_key, host=nodes['DUT2']['host']) + err_msg = f"Failed to set IP address on interface {if2_key} " \ + f"on host {nodes[u'DUT2'][u'host']}" papi_exec.add(cmd1, **args1).get_reply(err_msg) # Configure IPsec tunnel interfaces - cmd2 = 'ipsec_tunnel_if_add_del' + cmd2 = u"ipsec_tunnel_if_add_del" args2 = dict( is_add=1, local_ip=IPUtil.create_ip_address_object(if2_ip), @@ -1116,34 +1085,34 @@ class IPsecUtil(object): remote_integ_key=None, tx_table_id=0 ) - err_msg = 'Failed to add IPsec tunnel interfaces on host {host}'. \ - format(host=nodes['DUT2']['host']) + err_msg = f"Failed to add IPsec tunnel interfaces " \ + f"on host {nodes[u'DUT2'][u'host']}" ipsec_tunnels = list() - for i in xrange(n_tunnels): - args2['local_spi'] = spi_2 + i - args2['remote_spi'] = spi_1 + i - args2['local_ip'] = IPUtil.create_ip_address_object(if2_ip) - args2['remote_ip'] = IPUtil.create_ip_address_object( + for i in range(n_tunnels): + args2[u"local_spi"] = spi_2 + i + args2[u"remote_spi"] = spi_1 + i + args2[u"local_ip"] = IPUtil.create_ip_address_object(if2_ip) + args2[u"remote_ip"] = IPUtil.create_ip_address_object( if1_ip + i * addr_incr) - args2['local_crypto_key_len'] = len(ckeys[i]) - args2['local_crypto_key'] = ckeys[i] - args2['remote_crypto_key_len'] = len(ckeys[i]) - args2['remote_crypto_key'] = ckeys[i] + args2[u"local_crypto_key_len"] = len(ckeys[i]) + args2[u"local_crypto_key"] = ckeys[i] + args2[u"remote_crypto_key_len"] = len(ckeys[i]) + args2[u"remote_crypto_key"] = ckeys[i] if integ_alg: - args2['local_integ_key_len'] = len(ikeys[i]) - args2['local_integ_key'] = ikeys[i] - args2['remote_integ_key_len'] = len(ikeys[i]) - args2['remote_integ_key'] = ikeys[i] - history = False if 1 < i < n_tunnels - 1 else True + args2[u"local_integ_key_len"] = len(ikeys[i]) + args2[u"local_integ_key"] = ikeys[i] + args2[u"remote_integ_key_len"] = len(ikeys[i]) + args2[u"remote_integ_key"] = ikeys[i] + history = bool(not 1 < i < n_tunnels - 2) papi_exec.add(cmd2, history=history, **args2) replies = papi_exec.get_replies(err_msg) for reply in replies: - if 'sw_if_index' in reply: - ipsec_tunnels.append(reply["sw_if_index"]) + if u"sw_if_index" in reply: + ipsec_tunnels.append(reply[u"sw_if_index"]) # Configure IP routes - cmd1 = 'ip_route_add_del' + cmd1 = u"ip_route_add_del" route = IPUtil.compose_vpp_route_structure( - nodes['DUT2'], if1_ip.compressed, + nodes[u"DUT2"], if1_ip.compressed, prefix_len=32 if if1_ip.version == 6 else 8, interface=if2_key, gateway=(if2_ip - 1).compressed @@ -1154,35 +1123,36 @@ class IPsecUtil(object): route=route ) papi_exec.add(cmd1, **args1) - cmd1 = 'sw_interface_set_unnumbered' + cmd1 = u"sw_interface_set_unnumbered" args1 = dict( is_add=True, sw_if_index=InterfaceUtil.get_interface_index( - nodes['DUT2'], if2_key), + nodes[u"DUT2"], if2_key + ), unnumbered_sw_if_index=0 ) - cmd2 = 'sw_interface_set_flags' + cmd2 = u"sw_interface_set_flags" args2 = dict( sw_if_index=0, - flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value) - cmd3 = 'ip_route_add_del' + flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value + ) + cmd3 = u"ip_route_add_del" args3 = dict( is_add=1, is_multipath=0, route=None ) - err_msg = 'Failed to add IP routes on host {host}'.format( - host=nodes['DUT2']['host']) - for i in xrange(n_tunnels): - args1['unnumbered_sw_if_index'] = ipsec_tunnels[i] - args2['sw_if_index'] = ipsec_tunnels[i] - args3['route'] = IPUtil.compose_vpp_route_structure( - nodes['DUT1'], - (raddr_ip1 + i).compressed, + err_msg = f"Failed to add IP routes " \ + f"on host {nodes[u'DUT2'][u'host']}" + for i in range(n_tunnels): + args1[u"unnumbered_sw_if_index"] = ipsec_tunnels[i] + args2[u"sw_if_index"] = ipsec_tunnels[i] + args3[u"route"] = IPUtil.compose_vpp_route_structure( + nodes[u"DUT1"], (raddr_ip1 + i).compressed, prefix_len=128 if raddr_ip1.version == 6 else 32, interface=ipsec_tunnels[i] ) - history = False if 1 < i < n_tunnels - 1 else True + history = bool(not 1 < i < n_tunnels - 2) papi_exec.add(cmd1, history=history, **args1). \ add(cmd2, history=history, **args2). \ add(cmd3, history=history, **args3) @@ -1228,66 +1198,75 @@ class IPsecUtil(object): spi_1 = 300000 spi_2 = 400000 - crypto_key = gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)) - integ_key = gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)) \ - if integ_alg else '' + crypto_key = gen_key( + IPsecUtil.get_crypto_alg_key_len(crypto_alg) + ).decode() + integ_key = gen_key( + IPsecUtil.get_integ_alg_key_len(integ_alg) + ).decode() if integ_alg else u"" IPsecUtil.vpp_ipsec_set_ip_route( - nodes['DUT1'], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2, + nodes[u"DUT1"], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2, interface1, raddr_range) IPsecUtil.vpp_ipsec_set_ip_route( - nodes['DUT2'], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1, + nodes[u"DUT2"], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1, interface2, raddr_range) - IPsecUtil.vpp_ipsec_add_spd( - nodes['DUT1'], spd_id) - IPsecUtil.vpp_ipsec_spd_add_if( - nodes['DUT1'], spd_id, interface1) + IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT1"], spd_id) + IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT1"], spd_id, interface1) IPsecUtil.vpp_ipsec_policy_add( - nodes['DUT1'], spd_id, p_hi, PolicyAction.BYPASS, inbound=False, - proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8') + nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False, + proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8" + ) IPsecUtil.vpp_ipsec_policy_add( - nodes['DUT1'], spd_id, p_hi, PolicyAction.BYPASS, inbound=True, - proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8') + nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True, + proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8" + ) - IPsecUtil.vpp_ipsec_add_spd( - nodes['DUT2'], spd_id) - IPsecUtil.vpp_ipsec_spd_add_if( - nodes['DUT2'], spd_id, interface2) + IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT2"], spd_id) + IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT2"], spd_id, interface2) IPsecUtil.vpp_ipsec_policy_add( - nodes['DUT2'], spd_id, p_hi, PolicyAction.BYPASS, inbound=False, - proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8') + nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False, + proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8" + ) IPsecUtil.vpp_ipsec_policy_add( - nodes['DUT2'], spd_id, p_hi, PolicyAction.BYPASS, inbound=True, - proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8') + nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True, + proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8" + ) IPsecUtil.vpp_ipsec_add_sad_entries( - nodes['DUT1'], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key, - integ_alg, integ_key, tunnel_ip1, tunnel_ip2) - + nodes[u"DUT1"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key, + integ_alg, integ_key, tunnel_ip1, tunnel_ip2 + ) IPsecUtil.vpp_ipsec_spd_add_entries( - nodes['DUT1'], n_tunnels, spd_id, p_lo, False, sa_id_1, raddr_ip2) + nodes[u"DUT1"], n_tunnels, spd_id, p_lo, False, sa_id_1, raddr_ip2 + ) IPsecUtil.vpp_ipsec_add_sad_entries( - nodes['DUT2'], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key, - integ_alg, integ_key, tunnel_ip1, tunnel_ip2) - + nodes[u"DUT2"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key, + integ_alg, integ_key, tunnel_ip1, tunnel_ip2 + ) IPsecUtil.vpp_ipsec_spd_add_entries( - nodes['DUT2'], n_tunnels, spd_id, p_lo, True, sa_id_1, raddr_ip2) + nodes[u"DUT2"], n_tunnels, spd_id, p_lo, True, sa_id_1, raddr_ip2 + ) IPsecUtil.vpp_ipsec_add_sad_entries( - nodes['DUT2'], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key, - integ_alg, integ_key, tunnel_ip2, tunnel_ip1) + nodes[u"DUT2"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key, + integ_alg, integ_key, tunnel_ip2, tunnel_ip1 + ) IPsecUtil.vpp_ipsec_spd_add_entries( - nodes['DUT2'], n_tunnels, spd_id, p_lo, False, sa_id_2, raddr_ip1) + nodes[u"DUT2"], n_tunnels, spd_id, p_lo, False, sa_id_2, raddr_ip1 + ) IPsecUtil.vpp_ipsec_add_sad_entries( - nodes['DUT1'], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key, - integ_alg, integ_key, tunnel_ip2, tunnel_ip1) + nodes[u"DUT1"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key, + integ_alg, integ_key, tunnel_ip2, tunnel_ip1 + ) IPsecUtil.vpp_ipsec_spd_add_entries( - nodes['DUT1'], n_tunnels, spd_id, p_lo, True, sa_id_2, raddr_ip1) + nodes[u"DUT1"], n_tunnels, spd_id, p_lo, True, sa_id_2, raddr_ip1 + ) @staticmethod def vpp_ipsec_show(node): @@ -1296,4 +1275,4 @@ class IPsecUtil(object): :param node: Node to run command on. :type node: dict """ - PapiSocketExecutor.run_cli_cmd(node, 'show ipsec') + PapiSocketExecutor.run_cli_cmd(node, u"show ipsec") diff --git a/resources/libraries/python/IPv6Util.py b/resources/libraries/python/IPv6Util.py index aacf0fb5f7..683f892f62 100644 --- a/resources/libraries/python/IPv6Util.py +++ b/resources/libraries/python/IPv6Util.py @@ -19,7 +19,7 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.topology import NodeType -class IPv6Util(object): +class IPv6Util: """IPv6 utilities""" @staticmethod @@ -31,12 +31,13 @@ class IPv6Util(object): :type node: dict :type interface: str """ - cmd = 'sw_interface_ip6nd_ra_config' + cmd = u"sw_interface_ip6nd_ra_config" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), - suppress=1) - err_msg = 'Failed to suppress ICMPv6 router advertisement message on ' \ - 'interface {ifc}'.format(ifc=interface) + suppress=1 + ) + err_msg = f"Failed to suppress ICMPv6 router advertisement message " \ + f"on interface {interface}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -53,12 +54,13 @@ class IPv6Util(object): :type interface: str :type interval: int """ - cmd = 'sw_interface_ip6nd_ra_config' + cmd = u"sw_interface_ip6nd_ra_config" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), - initial_interval=int(interval)) - err_msg = 'Failed to set router advertisement interval on ' \ - 'interface {ifc}'.format(ifc=interface) + initial_interval=int(interval) + ) + err_msg = f"Failed to set router advertisement interval " \ + f"on interface {interface}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -72,10 +74,11 @@ class IPv6Util(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.TG: + if node[u"type"] == NodeType.TG: continue - for port_k in node['interfaces'].keys(): + for port_k in node[u"interfaces"].keys(): ip6_addr_list = IPUtil.vpp_get_interface_ip_addresses( - node, port_k, 'ipv6') + node, port_k, u"ipv6" + ) if ip6_addr_list: IPv6Util.vpp_ra_suppress_link_layer(node, port_k) diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index a15507454a..fa6a33197c 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -14,8 +14,8 @@ """Interface util library.""" from time import sleep - from enum import IntEnum + from ipaddress import ip_address from robot.api import logger @@ -103,10 +103,10 @@ class LinkBondMode(IntEnum): BOND_API_MODE_LACP = 5 -class InterfaceUtil(object): +class InterfaceUtil: """General utilities for managing interfaces""" - __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules' + __UDEV_IF_RULES_FILE = u"/etc/udev/rules.d/10-network.rules" @staticmethod def pci_to_int(pci_str): @@ -118,28 +118,28 @@ class InterfaceUtil(object): :returns: Integer representation of PCI address. :rtype: int """ - pci = list(pci_str.split(':')[0:2]) - pci.extend(pci_str.split(':')[2].split('.')) + pci = list(pci_str.split(u":")[0:2]) + pci.extend(pci_str.split(u":")[2].split(u".")) return (int(pci[0], 16) | int(pci[1], 16) << 16 | int(pci[2], 16) << 24 | int(pci[3], 16) << 29) @staticmethod def pci_to_eth(node, pci_str): - """Convert PCI address to Linux ethernet name. + """Convert PCI address on DUT to Linux ethernet name. + :param node: DUT node :param pci_str: PCI address. + :type node: dict :type pci_str: str :returns: Ethernet name. :rtype: str """ - cmd = ('basename /sys/bus/pci/devices/{pci_str}/net/*'. - format(pci_str=pci_str)) + cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*" try: stdout, _ = exec_cmd_no_error(node, cmd) except RuntimeError: - raise RuntimeError("Cannot convert {pci_str} to ethernet name!". - format(pci_str=pci_str)) + raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!") return stdout.strip() @@ -162,13 +162,12 @@ class InterfaceUtil(object): sw_if_index = \ Topology.get_interface_sw_index_by_name(node, interface) except TypeError as err: - raise TypeError('Wrong interface format {ifc}: {err}'.format( - ifc=interface, err=err.message)) + raise TypeError(f"Wrong interface format {interface}") from err return sw_if_index @staticmethod - def set_interface_state(node, interface, state, if_type='key'): + def set_interface_state(node, interface, state, if_type=u"key"): """Set interface state on a node. Function can be used for DUTs as well as for TGs. @@ -186,43 +185,42 @@ class InterfaceUtil(object): :raises ValueError: If the state of interface is unexpected. :raises ValueError: If the node has an unknown node type. """ - if if_type == 'key': - if isinstance(interface, basestring): + if if_type == u"key": + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) iface_name = Topology.get_interface_name(node, interface) else: sw_if_index = interface - elif if_type == 'name': + elif if_type == u"name": iface_key = Topology.get_interface_by_name(node, interface) if iface_key is not None: sw_if_index = Topology.get_interface_sw_index(node, iface_key) iface_name = interface else: - raise ValueError('Unknown if_type: {type}'.format(type=if_type)) + raise ValueError(f"Unknown if_type: {if_type}") - if node['type'] == NodeType.DUT: - if state == 'up': + if node[u"type"] == NodeType.DUT: + if state == u"up": flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value - elif state == 'down': + elif state == u"down": flags = 0 else: - raise ValueError('Unexpected interface state: {state}'.format( - state=state)) - cmd = 'sw_interface_set_flags' - err_msg = 'Failed to set interface state on host {host}'.format( - host=node['host']) + raise ValueError(f"Unexpected interface state: {state}") + cmd = u"sw_interface_set_flags" + err_msg = f"Failed to set interface state on host {node[u'host']}" args = dict( sw_if_index=int(sw_if_index), - flags=flags) + flags=flags + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) - elif node['type'] == NodeType.TG or node['type'] == NodeType.VM: - cmd = 'ip link set {ifc} {state}'.format( - ifc=iface_name, state=state) + elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM: + cmd = f"ip link set {iface_name} {state}" exec_cmd_no_error(node, cmd, sudo=True) else: - raise ValueError('Node {} has unknown NodeType: "{}"' - .format(node['host'], node['type'])) + raise ValueError( + f"Node {node[u'host']} has unknown NodeType: {node[u'type']}" + ) @staticmethod def set_interface_ethernet_mtu(node, iface_key, mtu): @@ -240,16 +238,17 @@ class InterfaceUtil(object): :raises ValueError: If the node type is "DUT". :raises ValueError: If the node has an unknown node type. """ - if node['type'] == NodeType.DUT: - raise ValueError('Node {}: Setting Ethernet MTU for interface ' - 'on DUT nodes not supported', node['host']) - elif node['type'] == NodeType.TG: + if node[u"type"] == NodeType.DUT: + msg = f"Node {node[u'host']}: Setting Ethernet MTU for interface " \ + f"on DUT nodes not supported" + elif node[u"type"] != NodeType.TG: + msg = f"Node {node[u'host']} has unknown NodeType: {node[u'type']}" + else: iface_name = Topology.get_interface_name(node, iface_key) - cmd = 'ip link set {} mtu {}'.format(iface_name, mtu) + cmd = f"ip link set {iface_name} mtu {mtu}" exec_cmd_no_error(node, cmd, sudo=True) - else: - raise ValueError('Node {} has unknown NodeType: "{}"' - .format(node['host'], node['type'])) + return + raise ValueError(msg) @staticmethod def set_default_ethernet_mtu_on_all_interfaces_on_node(node): @@ -261,7 +260,7 @@ class InterfaceUtil(object): :type node: dict :returns: Nothing. """ - for ifc in node['interfaces']: + for ifc in node[u"interfaces"]: InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500) @staticmethod @@ -275,23 +274,23 @@ class InterfaceUtil(object): :type interface: str or int :type mtu: int """ - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = interface - cmd = 'hw_interface_set_mtu' - err_msg = 'Failed to set interface MTU on host {host}'.format( - host=node['host']) - args = dict(sw_if_index=sw_if_index, - mtu=int(mtu)) + cmd = u"hw_interface_set_mtu" + err_msg = f"Failed to set interface MTU on host {node[u'host']}" + args = dict( + sw_if_index=sw_if_index, + mtu=int(mtu) + ) try: with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) except AssertionError as err: # TODO: Make failure tolerance optional. - logger.debug("Setting MTU failed. Expected?\n{err}".format( - err=err)) + logger.debug(f"Setting MTU failed. Expected?\n{err}") @staticmethod def vpp_set_interfaces_mtu_on_node(node, mtu=9200): @@ -302,7 +301,7 @@ class InterfaceUtil(object): :type node: dict :type mtu: int """ - for interface in node['interfaces']: + for interface in node[u"interfaces"]: InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu) @staticmethod @@ -315,7 +314,7 @@ class InterfaceUtil(object): :type mtu: int """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu) @staticmethod @@ -331,21 +330,22 @@ class InterfaceUtil(object): :raises RuntimeError: If any interface is not in link-up state after defined number of retries. """ - for _ in xrange(0, retries): + for _ in range(0, retries): not_ready = list() out = InterfaceUtil.vpp_get_interface_data(node) for interface in out: - if interface.get('flags') == 1: - not_ready.append(interface.get('interface_name')) - if not not_ready: - break - else: - logger.debug('Interfaces still not in link-up state:\n{ifs} ' - '\nWaiting...'.format(ifs=not_ready)) + if interface.get(u"flags") == 1: + not_ready.append(interface.get(u"interface_name")) + if not_ready: + logger.debug( + f"Interfaces still not in link-up state:\n{not_ready}" + ) sleep(1) + else: + break else: - err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \ - if 'not_ready' in locals() else 'No check executed!' + err = f"Timeout, interfaces not up:\n{not_ready}" \ + if u"not_ready" in locals() else u"No check executed!" raise RuntimeError(err) @staticmethod @@ -361,7 +361,7 @@ class InterfaceUtil(object): :returns: Nothing. """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries) @staticmethod @@ -388,49 +388,48 @@ class InterfaceUtil(object): :returns: Processed interface dump. :rtype: dict """ - if_dump['l2_address'] = str(if_dump['l2_address']) - if_dump['b_dmac'] = str(if_dump['b_dmac']) - if_dump['b_smac'] = str(if_dump['b_smac']) - if_dump['flags'] = if_dump['flags'].value - if_dump['type'] = if_dump['type'].value - if_dump['link_duplex'] = if_dump['link_duplex'].value - if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \ - if hasattr(if_dump['sub_if_flags'], 'value') \ - else int(if_dump['sub_if_flags']) + if_dump[u"l2_address"] = str(if_dump[u"l2_address"]) + if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"]) + if_dump[u"b_smac"] = str(if_dump[u"b_smac"]) + if_dump[u"flags"] = if_dump[u"flags"].value + if_dump[u"type"] = if_dump[u"type"].value + if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value + if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \ + if hasattr(if_dump[u"sub_if_flags"], u"value") \ + else int(if_dump[u"sub_if_flags"]) return if_dump if interface is not None: - if isinstance(interface, basestring): - param = 'interface_name' + if isinstance(interface, str): + param = u"interface_name" elif isinstance(interface, int): - param = 'sw_if_index' + param = u"sw_if_index" else: - raise TypeError('Wrong interface format {ifc}'.format( - ifc=interface)) + raise TypeError(f"Wrong interface format {interface}") else: - param = '' + param = u"" - cmd = 'sw_interface_dump' + cmd = u"sw_interface_dump" args = dict( name_filter_valid=False, - name_filter='' + name_filter=u"" ) - err_msg = 'Failed to get interface dump on host {host}'.format( - host=node['host']) + err_msg = f"Failed to get interface dump on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) - logger.debug('Received data:\n{d!r}'.format(d=details)) + logger.debug(f"Received data:\n{details!r}") data = list() if interface is None else dict() for dump in details: if interface is None: data.append(process_if_dump(dump)) - elif str(dump.get(param)).rstrip('\x00') == str(interface): + elif str(dump.get(param)).rstrip(u"\x00") == str(interface): data = process_if_dump(dump) break - logger.debug('Interface data:\n{if_data}'.format(if_data=data)) + logger.debug(f"Interface data:\n{data}") return data @staticmethod @@ -446,11 +445,12 @@ class InterfaceUtil(object): :rtype: str """ if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index) - if if_data['sup_sw_if_index'] != if_data['sw_if_index']: + if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]: if_data = InterfaceUtil.vpp_get_interface_data( - node, if_data['sup_sw_if_index']) + node, if_data[u"sup_sw_if_index"] + ) - return if_data.get('interface_name') + return if_data.get(u"interface_name") @staticmethod def vpp_get_interface_sw_index(node, interface_name): @@ -466,7 +466,7 @@ class InterfaceUtil(object): """ if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name) - return if_data.get('sw_if_index') + return if_data.get(u"sw_if_index") @staticmethod def vpp_get_interface_mac(node, interface): @@ -480,11 +480,11 @@ class InterfaceUtil(object): :rtype: str """ if_data = InterfaceUtil.vpp_get_interface_data(node, interface) - if if_data['sup_sw_if_index'] != if_data['sw_if_index']: + if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]: if_data = InterfaceUtil.vpp_get_interface_data( - node, if_data['sup_sw_if_index']) + node, if_data[u"sup_sw_if_index"]) - return if_data.get('l2_address') + return if_data.get(u"l2_address") @staticmethod def tg_set_interface_driver(node, pci_addr, driver): @@ -508,20 +508,17 @@ class InterfaceUtil(object): # Unbind from current driver if old_driver is not None: - cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\ - .format(pci_addr, old_driver) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + cmd = f"sh -c \"echo {pci_addr} > " \ + f"/sys/bus/pci/drivers/{old_driver}/unbind\"" + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") # Bind to the new driver - cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\ - .format(pci_addr, driver) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + cmd = f"sh -c \"echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind\"" + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") @staticmethod def tg_get_interface_driver(node, pci_addr): @@ -557,24 +554,23 @@ class InterfaceUtil(object): ssh = SSH() ssh.connect(node) - cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + cmd = f"rm -f {InterfaceUtil.__UDEV_IF_RULES_FILE}" + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) - - for interface in node['interfaces'].values(): - rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \ - '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \ - interface['name'] + '\\"' - cmd = 'sh -c "echo \'{0}\' >> {1}"'.format( - rule, InterfaceUtil.__UDEV_IF_RULES_FILE) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") + + for interface in node[u"interfaces"].values(): + rule = u'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \ + u'==\\"' + interface[u"mac_address"] + u'\\", NAME=\\"' + \ + interface[u"name"] + u'\\"' + cmd = f"sh -c \"echo '{rule}'\" >> " \ + f"{InterfaceUtil.__UDEV_IF_RULES_FILE}'" + + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError("'{0}' failed on '{1}'" - .format(cmd, node['host'])) + raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'") - cmd = '/etc/init.d/udev restart' + cmd = u"/etc/init.d/udev restart" ssh.exec_command_sudo(cmd) @staticmethod @@ -584,10 +580,10 @@ class InterfaceUtil(object): :param node: Node to setup interfaces driver on (must be TG node). :type node: dict """ - for interface in node['interfaces'].values(): - InterfaceUtil.tg_set_interface_driver(node, - interface['pci_address'], - interface['driver']) + for interface in node[u"interfaces"].values(): + InterfaceUtil.tg_set_interface_driver( + node, interface[u"pci_address"], interface[u"driver"] + ) @staticmethod def update_vpp_interface_data_on_node(node): @@ -605,20 +601,24 @@ class InterfaceUtil(object): interface_list = InterfaceUtil.vpp_get_interface_data(node) interface_dict = dict() for ifc in interface_list: - interface_dict[ifc['l2_address']] = ifc + interface_dict[ifc[u"l2_address"]] = ifc - for if_name, if_data in node['interfaces'].items(): - ifc_dict = interface_dict.get(if_data['mac_address']) + for if_name, if_data in node[u"interfaces"].items(): + ifc_dict = interface_dict.get(if_data[u"mac_address"]) if ifc_dict is not None: - if_data['name'] = ifc_dict['interface_name'] - if_data['vpp_sw_index'] = ifc_dict['sw_if_index'] - if_data['mtu'] = ifc_dict['mtu'][0] - logger.trace('Interface {ifc} found by MAC {mac}'.format( - ifc=if_name, mac=if_data['mac_address'])) + if_data[u"name"] = ifc_dict[u"interface_name"] + if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"] + if_data[u"mtu"] = ifc_dict[u"mtu"][0] + logger.trace( + f"Interface {if_name} found by MAC " + f"{if_data[u'mac_address']}" + ) else: - logger.trace('Interface {ifc} not found by MAC {mac}'.format( - ifc=if_name, mac=if_data['mac_address'])) - if_data['vpp_sw_index'] = None + logger.trace( + f"Interface {if_name} not found by MAC " + f"{if_data[u'mac_address']}" + ) + if_data[u"vpp_sw_index"] = None @staticmethod def update_nic_interface_names(node): @@ -629,24 +629,22 @@ class InterfaceUtil(object): :param node: Node dictionary. :type node: dict """ - for ifc in node['interfaces'].values(): - if_pci = ifc['pci_address'].replace('.', ':').split(':') - bus = '{:x}'.format(int(if_pci[1], 16)) - dev = '{:x}'.format(int(if_pci[2], 16)) - fun = '{:x}'.format(int(if_pci[3], 16)) - loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun) - if ifc['model'] == 'Intel-XL710': - ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc) - elif ifc['model'] == 'Intel-X710': - ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc) - elif ifc['model'] == 'Intel-X520-DA2': - ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc) - elif ifc['model'] == 'Cisco-VIC-1385': - ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc) - elif ifc['model'] == 'Cisco-VIC-1227': - ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc) + for ifc in node[u"interfaces"].values(): + if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":") + loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \ + f"{int(if_pci[3], 16):x}" + if ifc[u"model"] == u"Intel-XL710": + ifc[u"name"] = f"FortyGigabitEthernet{loc}" + elif ifc[u"model"] == u"Intel-X710": + ifc[u"name"] = f"TenGigabitEthernet{loc}" + elif ifc[u"model"] == u"Intel-X520-DA2": + ifc[u"name"] = f"TenGigabitEthernet{loc}" + elif ifc[u"model"] == u"Cisco-VIC-1385": + ifc[u"name"] = f"FortyGigabitEthernet{loc}" + elif ifc[u"model"] == u"Cisco-VIC-1227": + ifc[u"name"] = f"TenGigabitEthernet{loc}" else: - ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc) + ifc[u"name"] = f"UnknownEthernet{loc}" @staticmethod def update_nic_interface_names_on_all_duts(nodes): @@ -658,7 +656,7 @@ class InterfaceUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: InterfaceUtil.update_nic_interface_names(node) @staticmethod @@ -686,19 +684,20 @@ class InterfaceUtil(object): ssh = SSH() ssh.connect(node) - cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat ' - '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;') + cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \ + u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;' - (ret_code, stdout, _) = ssh.exec_command(cmd) + ret_code, stdout, _ = ssh.exec_command(cmd) if int(ret_code) != 0: - raise RuntimeError('Get interface name and MAC failed') - tmp = "{" + stdout.rstrip().replace('\n', ',') + "}" + raise RuntimeError(u"Get interface name and MAC failed") + tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}" + interfaces = JsonParser().parse_data(tmp) - for interface in node['interfaces'].values(): - name = interfaces.get(interface['mac_address']) + for interface in node[u"interfaces"].values(): + name = interfaces.get(interface[u"mac_address"]) if name is None: continue - interface['name'] = name + interface[u"name"] = name # Set udev rules for interfaces if not skip_tg_udev: @@ -713,33 +712,37 @@ class InterfaceUtil(object): :type node: dict :returns: Nothing. :raises ValueError: If numa node ia less than 0. - :raises RuntimeError: If update of numa node failes. + :raises RuntimeError: If update of numa node failed. """ + def check_cpu_node_count(node_n, val): + val = int(val) + if val < 0: + if CpuUtils.cpu_node_count(node_n) == 1: + val = 0 + else: + raise ValueError + return val ssh = SSH() for if_key in Topology.get_node_interfaces(node): if_pci = Topology.get_interface_pci_addr(node, if_key) ssh.connect(node) - cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci) + cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node" for _ in range(3): - (ret, out, _) = ssh.exec_command(cmd) + ret, out, _ = ssh.exec_command(cmd) if ret == 0: try: - numa_node = int(out) - if numa_node < 0: - if CpuUtils.cpu_node_count(node) == 1: - numa_node = 0 - else: - raise ValueError + numa_node = check_cpu_node_count(node, out) except ValueError: - logger.trace('Reading numa location failed for: {0}' - .format(if_pci)) + logger.trace( + f"Reading numa location failed for: {if_pci}" + ) else: - Topology.set_interface_numa_node(node, if_key, - numa_node) + Topology.set_interface_numa_node( + node, if_key, numa_node + ) break else: - raise RuntimeError('Update numa node failed for: {0}' - .format(if_pci)) + raise RuntimeError(f"Update numa node failed for: {if_pci}") @staticmethod def update_all_numa_nodes(nodes, skip_tg=False): @@ -753,15 +756,14 @@ class InterfaceUtil(object): :returns: Nothing. """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: InterfaceUtil.iface_update_numa_node(node) - elif node['type'] == NodeType.TG and not skip_tg: + elif node[u"type"] == NodeType.TG and not skip_tg: InterfaceUtil.iface_update_numa_node(node) @staticmethod - def update_all_interface_data_on_all_nodes(nodes, skip_tg=False, - skip_tg_udev=False, - numa_node=False): + def update_all_interface_data_on_all_nodes( + nodes, skip_tg=False, skip_tg_udev=False, numa_node=False): """Update interface names on all nodes in DICT__nodes. This method updates the topology dictionary by querying interface lists @@ -777,16 +779,16 @@ class InterfaceUtil(object): :type numa_node: bool """ for node_data in nodes.values(): - if node_data['type'] == NodeType.DUT: + if node_data[u"type"] == NodeType.DUT: InterfaceUtil.update_vpp_interface_data_on_node(node_data) - elif node_data['type'] == NodeType.TG and not skip_tg: + elif node_data[u"type"] == NodeType.TG and not skip_tg: InterfaceUtil.update_tg_interface_data_on_node( node_data, skip_tg_udev) if numa_node: - if node_data['type'] == NodeType.DUT: + if node_data[u"type"] == NodeType.DUT: InterfaceUtil.iface_update_numa_node(node_data) - elif node_data['type'] == NodeType.TG and not skip_tg: + elif node_data[u"type"] == NodeType.TG and not skip_tg: InterfaceUtil.iface_update_numa_node(node_data) @staticmethod @@ -807,22 +809,22 @@ class InterfaceUtil(object): """ sw_if_index = InterfaceUtil.get_interface_index(node, interface) - cmd = 'create_vlan_subif' + cmd = u"create_vlan_subif" args = dict( sw_if_index=sw_if_index, vlan_id=int(vlan) ) - err_msg = 'Failed to create VLAN sub-interface on host {host}'.format( - host=node['host']) + err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'vlan_subif') + if_key = Topology.add_new_port(node, u"vlan_subif") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) - return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_index + return f"{interface}.{vlan}", sw_if_index @staticmethod def create_vxlan_interface(node, vni, source_ip, destination_ip): @@ -841,25 +843,27 @@ class InterfaceUtil(object): :raises RuntimeError: if it is unable to create VxLAN interface on the node. """ - src_address = ip_address(unicode(source_ip)) - dst_address = ip_address(unicode(destination_ip)) - - cmd = 'vxlan_add_del_tunnel' - args = dict(is_add=1, - is_ipv6=1 if src_address.version == 6 else 0, - instance=Constants.BITWISE_NON_ZERO, - src_address=src_address.packed, - dst_address=dst_address.packed, - mcast_sw_if_index=Constants.BITWISE_NON_ZERO, - encap_vrf_id=0, - decap_next_index=Constants.BITWISE_NON_ZERO, - vni=int(vni)) - err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\ - format(host=node['host']) + src_address = ip_address(source_ip) + dst_address = ip_address(destination_ip) + + cmd = u"vxlan_add_del_tunnel" + args = dict( + is_add=1, + is_ipv6=1 if src_address.version == 6 else 0, + instance=Constants.BITWISE_NON_ZERO, + src_address=src_address.packed, + dst_address=dst_address.packed, + mcast_sw_if_index=Constants.BITWISE_NON_ZERO, + encap_vrf_id=0, + decap_next_index=Constants.BITWISE_NON_ZERO, + vni=int(vni) + ) + err_msg = f"Failed to create VXLAN tunnel interface " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'vxlan_tunnel') + if_key = Topology.add_new_port(node, u"vxlan_tunnel") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) @@ -884,12 +888,14 @@ class InterfaceUtil(object): """ sw_if_index = InterfaceUtil.get_interface_index(node, interface) - cmd = 'sw_interface_set_vxlan_bypass' - args = dict(is_ipv6=0, - sw_if_index=sw_if_index, - enable=1) - err_msg = 'Failed to set VXLAN bypass on interface on host {host}'.\ - format(host=node['host']) + cmd = u"sw_interface_set_vxlan_bypass" + args = dict( + is_ipv6=0, + sw_if_index=sw_if_index, + enable=1 + ) + err_msg = f"Failed to set VXLAN bypass on interface " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_replies(err_msg) @@ -916,16 +922,16 @@ class InterfaceUtil(object): :returns: Processed vxlan interface dump. :rtype: dict """ - if vxlan_dump['is_ipv6']: - vxlan_dump['src_address'] = \ - ip_address(unicode(vxlan_dump['src_address'])) - vxlan_dump['dst_address'] = \ - ip_address(unicode(vxlan_dump['dst_address'])) + if vxlan_dump[u"is_ipv6"]: + vxlan_dump[u"src_address"] = \ + ip_address(vxlan_dump[u"src_address"]) + vxlan_dump[u"dst_address"] = \ + ip_address(vxlan_dump[u"dst_address"]) else: - vxlan_dump['src_address'] = \ - ip_address(unicode(vxlan_dump['src_address'][0:4])) - vxlan_dump['dst_address'] = \ - ip_address(unicode(vxlan_dump['dst_address'][0:4])) + vxlan_dump[u"src_address"] = \ + ip_address(vxlan_dump[u"src_address"][0:4]) + vxlan_dump[u"dst_address"] = \ + ip_address(vxlan_dump[u"dst_address"][0:4]) return vxlan_dump if interface is not None: @@ -933,10 +939,12 @@ class InterfaceUtil(object): else: sw_if_index = int(Constants.BITWISE_NON_ZERO) - cmd = 'vxlan_tunnel_dump' - args = dict(sw_if_index=sw_if_index) - err_msg = 'Failed to get VXLAN dump on host {host}'.format( - host=node['host']) + cmd = u"vxlan_tunnel_dump" + args = dict( + sw_if_index=sw_if_index + ) + err_msg = f"Failed to get VXLAN dump on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) @@ -944,55 +952,17 @@ class InterfaceUtil(object): for dump in details: if interface is None: data.append(process_vxlan_dump(dump)) - elif dump['sw_if_index'] == sw_if_index: + elif dump[u"sw_if_index"] == sw_if_index: data = process_vxlan_dump(dump) break - logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data)) + logger.debug(f"VXLAN data:\n{data}") return data @staticmethod - def vhost_user_dump(node): - """Get vhost-user data for the given node. - - TODO: Move to VhostUser.py - - :param node: VPP node to get interface data from. - :type node: dict - :returns: List of dictionaries with all vhost-user interfaces. - :rtype: list - """ - def process_vhost_dump(vhost_dump): - """Process vhost dump. - - :param vhost_dump: Vhost interface dump. - :type vhost_dump: dict - :returns: Processed vhost interface dump. - :rtype: dict - """ - vhost_dump['interface_name'] = \ - vhost_dump['interface_name'].rstrip('\x00') - vhost_dump['sock_filename'] = \ - vhost_dump['sock_filename'].rstrip('\x00') - return vhost_dump - - cmd = 'sw_interface_vhost_user_dump' - err_msg = 'Failed to get vhost-user dump on host {host}'.format( - host=node['host']) - with PapiSocketExecutor(node) as papi_exec: - details = papi_exec.add(cmd).get_details(err_msg) - - for dump in details: - # In-place edits. - process_vhost_dump(dump) - - logger.debug('Vhost-user details:\n{vhost_details}'.format( - vhost_details=details)) - return details - - @staticmethod - def create_subinterface(node, interface, sub_id, outer_vlan_id=None, - inner_vlan_id=None, type_subif=None): + def create_subinterface( + node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None, + type_subif=None): """Create sub-interface on node. It is possible to set required sub-interface type and VLAN tag(s). @@ -1017,41 +987,41 @@ class InterfaceUtil(object): subif_types = type_subif.split() flags = 0 - if 'no_tags' in subif_types: + if u"no_tags" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS - if 'one_tag' in subif_types: + if u"one_tag" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG - if 'two_tags' in subif_types: + if u"two_tags" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS - if 'dot1ad' in subif_types: + if u"dot1ad" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD - if 'exact_match' in subif_types: + if u"exact_match" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH - if 'default_sub' in subif_types: + if u"default_sub" in subif_types: flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT - if type_subif == 'default_sub': + if type_subif == u"default_sub": flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\ | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY - cmd = 'create_subif' + cmd = u"create_subif" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), sub_id=int(sub_id), - sub_if_flags=flags.value if hasattr(flags, 'value') else int(flags), + sub_if_flags=flags.value if hasattr(flags, u"value") + else int(flags), outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0, inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0 ) - err_msg = 'Failed to create sub-interface on host {host}'.format( - host=node['host']) + err_msg = f"Failed to create sub-interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'subinterface') + if_key = Topology.add_new_port(node, u"subinterface") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) - return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_if_index + return f"{interface}.{sub_id}", sw_if_index @staticmethod def create_gre_tunnel_interface(node, source_ip, destination_ip): @@ -1067,21 +1037,25 @@ class InterfaceUtil(object): :rtype: tuple :raises RuntimeError: If unable to create GRE tunnel interface. """ - cmd = 'gre_tunnel_add_del' - tunnel = dict(type=0, - instance=Constants.BITWISE_NON_ZERO, - src=str(source_ip), - dst=str(destination_ip), - outer_fib_id=0, - session_id=0) - args = dict(is_add=1, - tunnel=tunnel) - err_msg = 'Failed to create GRE tunnel interface on host {host}'.format( - host=node['host']) + cmd = u"gre_tunnel_add_del" + tunnel = dict( + type=0, + instance=Constants.BITWISE_NON_ZERO, + src=str(source_ip), + dst=str(destination_ip), + outer_fib_id=0, + session_id=0 + ) + args = dict( + is_add=1, + tunnel=tunnel + ) + err_msg = f"Failed to create GRE tunnel interface " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'gre_tunnel') + if_key = Topology.add_new_port(node, u"gre_tunnel") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) @@ -1101,14 +1075,15 @@ class InterfaceUtil(object): :raises RuntimeError: If it is not possible to create loopback on the node. """ - cmd = 'create_loopback' - args = dict(mac_address=L2Util.mac_to_bin(mac) if mac else 0) - err_msg = 'Failed to create loopback interface on host {host}'.format( - host=node['host']) + cmd = u"create_loopback" + args = dict( + mac_address=L2Util.mac_to_bin(mac) if mac else 0 + ) + err_msg = f"Failed to create loopback interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'loopback') + if_key = Topology.add_new_port(node, u"loopback") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) Topology.update_interface_name(node, if_key, ifc_name) @@ -1136,25 +1111,28 @@ class InterfaceUtil(object): :raises RuntimeError: If it is not possible to create bond interface on the node. """ - cmd = 'bond_create' + cmd = u"bond_create" args = dict( id=int(Constants.BITWISE_NON_ZERO), - use_custom_mac=False if mac is None else True, + use_custom_mac=bool(mac is not None), mac_address=L2Util.mac_to_bin(mac) if mac else None, - mode=getattr(LinkBondMode, 'BOND_API_MODE_{md}'.format( - md=mode.replace('-', '_').upper())).value, + mode=getattr( + LinkBondMode, + f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}" + ).value, lb=0 if load_balance is None else getattr( - LinkBondLoadBalanceAlgo, 'BOND_API_LB_ALGO_{lb}'.format( - lb=load_balance.upper())).value, + LinkBondLoadBalanceAlgo, + f"BOND_API_LB_ALGO_{load_balance.upper()}" + ).value, numa_only=False ) - err_msg = 'Failed to create bond interface on host {host}'.format( - host=node['host']) + err_msg = f"Failed to create bond interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( - node, sw_if_index=sw_if_index, ifc_pfx='eth_bond') + node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond" + ) if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @@ -1200,21 +1178,24 @@ class InterfaceUtil(object): the node. """ PapiSocketExecutor.run_cli_cmd( - node, 'set logging class avf level debug') - - cmd = 'avf_create' - args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr), - enable_elog=0, - rxq_num=int(num_rx_queues) if num_rx_queues else 0, - rxq_size=0, - txq_size=0) - err_msg = 'Failed to create AVF interface on host {host}'.format( - host=node['host']) + node, u"set logging class avf level debug" + ) + + cmd = u"avf_create" + args = dict( + pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr), + enable_elog=0, + rxq_num=int(num_rx_queues) if num_rx_queues else 0, + rxq_size=0, + txq_size=0 + ) + err_msg = f"Failed to create AVF interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( - node, sw_if_index=sw_if_index, ifc_pfx='eth_avf') + node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf" + ) if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @@ -1234,19 +1215,21 @@ class InterfaceUtil(object): :raises RuntimeError: If it is not possible to create RDMA interface on the node. """ - cmd = 'rdma_create' - args = dict(name=InterfaceUtil.pci_to_eth(node, pci_addr), - host_if=InterfaceUtil.pci_to_eth(node, pci_addr), - rxq_num=int(num_rx_queues) if num_rx_queues else 0, - rxq_size=0, - txq_size=0) - err_msg = 'Failed to create RDMA interface on host {host}'.format( - host=node['host']) + cmd = u"rdma_create" + args = dict( + name=InterfaceUtil.pci_to_eth(node, pci_addr), + host_if=InterfaceUtil.pci_to_eth(node, pci_addr), + rxq_num=int(num_rx_queues) if num_rx_queues else 0, + rxq_size=0, + txq_size=0 + ) + err_msg = f"Failed to create RDMA interface on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) InterfaceUtil.add_eth_interface( - node, sw_if_index=sw_if_index, ifc_pfx='eth_rdma') + node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma" + ) if_key = Topology.get_interface_by_sw_index(node, sw_if_index) return if_key @@ -1264,17 +1247,15 @@ class InterfaceUtil(object): :raises RuntimeError: If it is not possible to enslave physical interface to bond interface on the node. """ - cmd = 'bond_enslave' + cmd = u"bond_enslave" args = dict( sw_if_index=Topology.get_interface_sw_index(node, interface), bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if), is_passive=False, is_long_timeout=False ) - err_msg = 'Failed to enslave physical interface {ifc} to bond ' \ - 'interface {bond} on host {host}'.format(ifc=interface, - bond=bond_if, - host=node['host']) + err_msg = f"Failed to enslave physical interface {interface} to bond " \ + f"interface {bond_if} on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -1287,35 +1268,37 @@ class InterfaceUtil(object): :type node: dict :type verbose: bool """ - cmd = 'sw_interface_bond_dump' - err_msg = 'Failed to get bond interface dump on host {host}'.format( - host=node['host']) + cmd = u"sw_interface_bond_dump" + err_msg = f"Failed to get bond interface dump on host {node[u'host']}" - data = ('Bond data on node {host}:\n'.format(host=node['host'])) + data = f"Bond data on node {node[u'host']}:\n" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) for bond in details: - data += ('{b}\n'.format(b=bond['interface_name'])) - data += (' mode: {m}\n'.format( - m=bond['mode'].name.replace('BOND_API_MODE_', '').lower())) - data += (' load balance: {lb}\n'.format( - lb=bond['lb'].name.replace('BOND_API_LB_ALGO_', '').lower())) - data += (' number of active slaves: {n}\n'.format( - n=bond['active_slaves'])) + data += f"{bond[u'interface_name']}\n" + data += u" mode: {m}\n".format( + m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower() + ) + data += u" load balance: {lb}\n".format( + lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower() + ) + data += f" number of active slaves: {bond[u'active_slaves']}\n" if verbose: slave_data = InterfaceUtil.vpp_bond_slave_dump( node, Topology.get_interface_by_sw_index( - node, bond['sw_if_index'])) + node, bond[u"sw_if_index"] + ) + ) for slave in slave_data: - if not slave['is_passive']: - data += (' {s}\n'.format(s=slave['interface_name'])) - data += (' number of slaves: {n}\n'.format(n=bond['slaves'])) + if not slave[u"is_passive"]: + data += f" {slave[u'interface_name']}\n" + data += f" number of slaves: {bond[u'slaves']}\n" if verbose: for slave in slave_data: - data += (' {s}\n'.format(s=slave['interface_name'])) - data += (' interface id: {i}\n'.format(i=bond['id'])) - data += (' sw_if_index: {i}\n'.format(i=bond['sw_if_index'])) + data += f" {slave[u'interface_name']}\n" + data += f" interface id: {bond[u'id']}\n" + data += f" sw_if_index: {bond[u'sw_if_index']}\n" logger.info(data) @staticmethod @@ -1329,16 +1312,16 @@ class InterfaceUtil(object): :returns: Bond slave interface data. :rtype: dict """ - cmd = 'sw_interface_slave_dump' - args = dict(sw_if_index=Topology.get_interface_sw_index( - node, interface)) - err_msg = 'Failed to get slave dump on host {host}'.format( - host=node['host']) + cmd = u"sw_interface_slave_dump" + args = dict( + sw_if_index=Topology.get_interface_sw_index(node, interface) + ) + err_msg = f"Failed to get slave dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) - logger.debug('Slave data:\n{slave_data}'.format(slave_data=details)) + logger.debug(f"Slave data:\n{details}") return details @staticmethod @@ -1351,12 +1334,12 @@ class InterfaceUtil(object): :type verbose: bool """ for node_data in nodes.values(): - if node_data['type'] == NodeType.DUT: + if node_data[u"type"] == NodeType.DUT: InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose) @staticmethod - def vpp_enable_input_acl_interface(node, interface, ip_version, - table_index): + def vpp_enable_input_acl_interface( + node, interface, ip_version, table_index): """Enable input acl on interface. :param node: VPP node to setup interface for input acl. @@ -1368,18 +1351,17 @@ class InterfaceUtil(object): :type ip_version: str :type table_index: int """ - cmd = 'input_acl_set_interface' + cmd = u"input_acl_set_interface" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), - ip4_table_index=table_index if ip_version == 'ip4' + ip4_table_index=table_index if ip_version == u"ip4" else Constants.BITWISE_NON_ZERO, - ip6_table_index=table_index if ip_version == 'ip6' + ip6_table_index=table_index if ip_version == u"ip6" else Constants.BITWISE_NON_ZERO, - l2_table_index=table_index if ip_version == 'l2' + l2_table_index=table_index if ip_version == u"l2" else Constants.BITWISE_NON_ZERO, is_add=1) - err_msg = 'Failed to enable input acl on interface {ifc}'.format( - ifc=interface) + err_msg = f"Failed to enable input acl on interface {interface}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -1396,15 +1378,16 @@ class InterfaceUtil(object): :returns: Classify table name. :rtype: str """ - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = InterfaceUtil.get_sw_if_index(node, interface) else: sw_if_index = interface - cmd = 'classify_table_by_interface' - args = dict(sw_if_index=sw_if_index) - err_msg = 'Failed to get classify table name by interface {ifc}'.format( - ifc=interface) + cmd = u"classify_table_by_interface" + args = dict( + sw_if_index=sw_if_index + ) + err_msg = f"Failed to get classify table name by interface {interface}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg) @@ -1422,8 +1405,9 @@ class InterfaceUtil(object): :rtype: str """ interface_data = InterfaceUtil.vpp_get_interface_data( - node, interface=interface_name) - return interface_data.get('sw_if_index') + node, interface=interface_name + ) + return interface_data.get(u"sw_if_index") @staticmethod def vxlan_gpe_dump(node, interface_name=None): @@ -1447,28 +1431,26 @@ class InterfaceUtil(object): :returns: Processed vxlan_gpe interface dump. :rtype: dict """ - if vxlan_dump['is_ipv6']: - vxlan_dump['local'] = \ - ip_address(unicode(vxlan_dump['local'])) - vxlan_dump['remote'] = \ - ip_address(unicode(vxlan_dump['remote'])) + if vxlan_dump[u"is_ipv6"]: + vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"]) + vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"]) else: - vxlan_dump['local'] = \ - ip_address(unicode(vxlan_dump['local'][0:4])) - vxlan_dump['remote'] = \ - ip_address(unicode(vxlan_dump['remote'][0:4])) + vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4]) + vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4]) return vxlan_dump if interface_name is not None: sw_if_index = InterfaceUtil.get_interface_index( - node, interface_name) + node, interface_name + ) else: sw_if_index = int(Constants.BITWISE_NON_ZERO) - cmd = 'vxlan_gpe_tunnel_dump' - args = dict(sw_if_index=sw_if_index) - err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format( - host=node['host']) + cmd = u"vxlan_gpe_tunnel_dump" + args = dict( + sw_if_index=sw_if_index + ) + err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) @@ -1476,12 +1458,11 @@ class InterfaceUtil(object): for dump in details: if interface_name is None: data.append(process_vxlan_gpe_dump(dump)) - elif dump['sw_if_index'] == sw_if_index: + elif dump[u"sw_if_index"] == sw_if_index: data = process_vxlan_gpe_dump(dump) break - logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format( - vxlan_gpe_data=data)) + logger.debug(f"VXLAN-GPE data:\n{data}") return data @staticmethod @@ -1497,19 +1478,19 @@ class InterfaceUtil(object): :type table_id: int :type ipv6: bool """ - cmd = 'sw_interface_set_table' + cmd = u"sw_interface_set_table" args = dict( sw_if_index=InterfaceUtil.get_interface_index(node, interface), is_ipv6=ipv6, - vrf_id=int(table_id)) - err_msg = 'Failed to assign interface {ifc} to FIB table'.format( - ifc=interface) + vrf_id=int(table_id) + ) + err_msg = f"Failed to assign interface {interface} to FIB table" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def set_linux_interface_mac(node, interface, mac, namespace=None, - vf_id=None): + def set_linux_interface_mac( + node, interface, mac, namespace=None, vf_id=None): """Set MAC address for interface in linux. :param node: Node where to execute command. @@ -1523,17 +1504,16 @@ class InterfaceUtil(object): :type namespace: str :type vf_id: int """ - mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \ - if vf_id is not None else 'address {mac}'.format(mac=mac) - ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' + mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \ + else f"address {mac}" + ns_str = f"ip netns exec {namespace}" if namespace else u"" - cmd = ('{ns} ip link set {interface} {mac}'. - format(ns=ns_str, interface=interface, mac=mac_str)) + cmd = f"{ns_str} ip link set {interface} {mac_str}" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def set_linux_interface_trust_on(node, interface, namespace=None, - vf_id=None): + def set_linux_interface_trust_on( + node, interface, namespace=None, vf_id=None): """Set trust on (promisc) for interface in linux. :param node: Node where to execute command. @@ -1545,17 +1525,15 @@ class InterfaceUtil(object): :type namespace: str :type vf_id: int """ - trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \ - if vf_id is not None else 'trust on' - ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' + trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on" + ns_str = f"ip netns exec {namespace}" if namespace else u"" - cmd = ('{ns} ip link set dev {interface} {trust}'. - format(ns=ns_str, interface=interface, trust=trust_str)) + cmd = f"{ns_str} ip link set dev {interface} {trust_str}" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def set_linux_interface_spoof_off(node, interface, namespace=None, - vf_id=None): + def set_linux_interface_spoof_off( + node, interface, namespace=None, vf_id=None): """Set spoof off for interface in linux. :param node: Node where to execute command. @@ -1567,16 +1545,15 @@ class InterfaceUtil(object): :type namespace: str :type vf_id: int """ - spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \ - if vf_id is not None else 'spoof off' - ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else '' + spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \ + else u"spoof off" + ns_str = f"ip netns exec {namespace}" if namespace else u"" - cmd = ('{ns} ip link set dev {interface} {spoof}'. - format(ns=ns_str, interface=interface, spoof=spoof_str)) + cmd = f"{ns_str} ip link set dev {interface} {spoof_str}" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'): + def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"): """Init PCI device by creating VIFs and bind them to vfio-pci for AVF driver testing on DUT. @@ -1598,13 +1575,13 @@ class InterfaceUtil(object): pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":") uio_driver = Topology.get_uio_driver(node) kernel_driver = Topology.get_interface_driver(node, ifc_key) - if kernel_driver not in ("i40e", "i40evf"): + if kernel_driver not in (u"i40e", u"i40evf"): raise RuntimeError( - "AVF needs i40e-compatible driver, not {driver} at node {host}" - " ifc {ifc}".format( - driver=kernel_driver, host=node["host"], ifc=ifc_key)) + f"AVF needs i40e-compatible driver, not {kernel_driver} " + f"at node {node[u'host']} ifc {ifc_key}" + ) current_driver = DUTSetup.get_pci_dev_driver( - node, pf_pci_addr.replace(':', r'\:')) + node, pf_pci_addr.replace(u":", r"\:")) VPPUtil.stop_vpp_service(node) if current_driver != kernel_driver: @@ -1622,29 +1599,33 @@ class InterfaceUtil(object): vf_ifc_keys = [] # Set MAC address and bind each virtual function to uio driver. for vf_id in range(numvfs): - vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2], - pf_mac_addr[3], pf_mac_addr[4], - pf_mac_addr[5], "{:02x}".format(vf_id)]) + vf_mac_addr = u":".join( + [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4], + pf_mac_addr[5], f"{vf_id:02x}" + ] + ) - pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\ - format(pci=pf_pci_addr) + pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`" InterfaceUtil.set_linux_interface_trust_on(node, pf_dev, vf_id=vf_id) - if osi_layer == 'L2': - InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev, - vf_id=vf_id) - InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr, - vf_id=vf_id) + if osi_layer == u"L2": + InterfaceUtil.set_linux_interface_spoof_off( + node, pf_dev, vf_id=vf_id + ) + InterfaceUtil.set_linux_interface_mac( + node, pf_dev, vf_mac_addr, vf_id=vf_id + ) DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id) DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver) # Add newly created ports into topology file - vf_ifc_name = '{pf_if_key}_vif'.format(pf_if_key=ifc_key) + vf_ifc_name = f"{ifc_key}_vif" vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id) vf_ifc_key = Topology.add_new_port(node, vf_ifc_name) - Topology.update_interface_name(node, vf_ifc_key, - vf_ifc_name+str(vf_id+1)) + Topology.update_interface_name( + node, vf_ifc_key, vf_ifc_name+str(vf_id+1) + ) Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr) Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr) vf_ifc_keys.append(vf_ifc_key) @@ -1660,19 +1641,18 @@ class InterfaceUtil(object): :returns: Thread mapping information as a list of dictionaries. :rtype: list """ - cmd = 'sw_interface_rx_placement_dump' - err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format( - cmd=cmd, host=node['host']) + cmd = u"sw_interface_rx_placement_dump" + err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!" with PapiSocketExecutor(node) as papi_exec: - for ifc in node['interfaces'].values(): - if ifc['vpp_sw_index'] is not None: - papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']) + for ifc in node[u"interfaces"].values(): + if ifc[u"vpp_sw_index"] is not None: + papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"]) details = papi_exec.get_details(err_msg) - return sorted(details, key=lambda k: k['sw_if_index']) + return sorted(details, key=lambda k: k[u"sw_if_index"]) @staticmethod - def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id, - worker_id): + def vpp_sw_interface_set_rx_placement( + node, sw_if_index, queue_id, worker_id): """Set interface RX placement to worker on node. :param node: Node to run command on. @@ -1686,9 +1666,9 @@ class InterfaceUtil(object): :raises RuntimeError: If failed to run command on host or if no API reply received. """ - cmd = 'sw_interface_set_rx_placement' - err_msg = "Failed to set interface RX placement to worker on host " \ - "{host}!".format(host=node['host']) + cmd = u"sw_interface_set_rx_placement" + err_msg = f"Failed to set interface RX placement to worker " \ + f"on host {node[u'host']}!" args = dict( sw_if_index=sw_if_index, queue_id=queue_id, @@ -1713,12 +1693,13 @@ class InterfaceUtil(object): if not worker_cnt: return for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): - for interface in node['interfaces'].values(): - if placement['sw_if_index'] == interface['vpp_sw_index'] \ - and prefix in interface['name']: + for interface in node[u"interfaces"].values(): + if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \ + and prefix in interface[u"name"]: InterfaceUtil.vpp_sw_interface_set_rx_placement( - node, placement['sw_if_index'], placement['queue_id'], - worker_id % worker_cnt) + node, placement[u"sw_if_index"], placement[u"queue_id"], + worker_id % worker_cnt + ) worker_id += 1 @staticmethod @@ -1732,5 +1713,5 @@ class InterfaceUtil(object): :type prefix: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: InterfaceUtil.vpp_round_robin_rx_placement(node, prefix) diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py index 029d635c72..d0d72a39a1 100644 --- a/resources/libraries/python/KubernetesUtils.py +++ b/resources/libraries/python/KubernetesUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -13,25 +13,27 @@ """Library to control Kubernetes kubectl.""" +from functools import reduce +from io import open from time import sleep from resources.libraries.python.Constants import Constants -from resources.libraries.python.topology import NodeType -from resources.libraries.python.ssh import SSH, exec_cmd_no_error from resources.libraries.python.CpuUtils import CpuUtils +from resources.libraries.python.ssh import SSH, exec_cmd_no_error +from resources.libraries.python.topology import NodeType from resources.libraries.python.VppConfigGenerator import VppConfigGenerator -__all__ = ["KubernetesUtils"] +__all__ = [u"KubernetesUtils"] # Maximum number of retries to check if PODs are running or deleted. MAX_RETRY = 48 -class KubernetesUtils(object): + +class KubernetesUtils: """Kubernetes utilities class.""" def __init__(self): """Initialize KubernetesUtils class.""" - pass @staticmethod def load_docker_image_on_node(node, image_path): @@ -43,20 +45,18 @@ class KubernetesUtils(object): :type image_path: str :raises RuntimeError: If loading image failed on node. """ - command = 'docker load -i {image_path}'.\ - format(image_path=image_path) - message = 'Failed to load Docker image on {node}.'.\ - format(node=node['host']) - exec_cmd_no_error(node, command, timeout=240, sudo=True, - message=message) - - command = "docker rmi $(sudo docker images -f 'dangling=true' -q)".\ - format(image_path=image_path) - message = 'Failed to clean Docker images on {node}.'.\ - format(node=node['host']) + command = f"docker load -i {image_path}" + message = f"Failed to load Docker image on {node[u'host']}." + exec_cmd_no_error( + node, command, timeout=240, sudo=True, message=message + ) + + command = u"docker rmi $(sudo docker images -f 'dangling=true' -q)" + message = f"Failed to clean Docker images on {node[u'host']}." try: - exec_cmd_no_error(node, command, timeout=240, sudo=True, - message=message) + exec_cmd_no_error( + node, command, timeout=240, sudo=True, message=message + ) except RuntimeError: pass @@ -70,7 +70,7 @@ class KubernetesUtils(object): :type image_path: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.load_docker_image_on_node(node, image_path) @staticmethod @@ -84,16 +84,17 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - cmd = '{dir}/{lib}/k8s_setup.sh deploy_calico'\ - .format(dir=Constants.REMOTE_FW_DIR, - lib=Constants.RESOURCES_LIB_SH) - (ret_code, _, _) = ssh.exec_command(cmd, timeout=240) + cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \ + f"k8s_setup.sh deploy_calico" + ret_code, _, _ = ssh.exec_command(cmd, timeout=240) if int(ret_code) != 0: - raise RuntimeError('Failed to setup Kubernetes on {node}.' - .format(node=node['host'])) + raise RuntimeError( + "Failed to setup Kubernetes on {node[u'host']}." + ) - KubernetesUtils.wait_for_kubernetes_pods_on_node(node, - nspace='kube-system') + KubernetesUtils.wait_for_kubernetes_pods_on_node( + node, nspace=u"kube-system" + ) @staticmethod def setup_kubernetes_on_all_duts(nodes): @@ -103,7 +104,7 @@ class KubernetesUtils(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.setup_kubernetes_on_node(node) @staticmethod @@ -117,13 +118,14 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - cmd = '{dir}/{lib}/k8s_setup.sh destroy'\ - .format(dir=Constants.REMOTE_FW_DIR, - lib=Constants.RESOURCES_LIB_SH) - (ret_code, _, _) = ssh.exec_command(cmd, timeout=120) + cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \ + f"k8s_setup.sh destroy" + + ret_code, _, _ = ssh.exec_command(cmd, timeout=120) if int(ret_code) != 0: - raise RuntimeError('Failed to destroy Kubernetes on {node}.' - .format(node=node['host'])) + raise RuntimeError( + f"Failed to destroy Kubernetes on {node[u'host']}." + ) @staticmethod def destroy_kubernetes_on_all_duts(nodes): @@ -133,7 +135,7 @@ class KubernetesUtils(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.destroy_kubernetes_on_node(node) @staticmethod @@ -151,19 +153,20 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - fqn_file = '{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S, - yaml=yaml_file) + fqn_file = f"{Constants.RESOURCES_TPL_K8S}/{yaml_file}" with open(fqn_file, 'r') as src_file: stream = src_file.read() - data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(), - stream) - cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format( - data=data) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + data = reduce( + lambda a, kv: a.replace(*kv), list(kwargs.items()), stream + ) + cmd = f"cat <<EOF | kubectl apply -f - \n{data}\nEOF" + + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError('Failed to apply Kubernetes template {yaml} ' - 'on {node}.'.format(yaml=yaml_file, - node=node['host'])) + raise RuntimeError( + f"Failed to apply Kubernetes template {yaml_file} " + f"on {node[u'host']}." + ) @staticmethod def apply_kubernetes_resource_on_all_duts(nodes, yaml_file, **kwargs): @@ -177,10 +180,10 @@ class KubernetesUtils(object): :type kwargs: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: - KubernetesUtils.apply_kubernetes_resource_on_node(node, - yaml_file, - **kwargs) + if node[u"type"] == NodeType.DUT: + KubernetesUtils.apply_kubernetes_resource_on_node( + node, yaml_file, **kwargs + ) @staticmethod def create_kubernetes_cm_from_file_on_node(node, nspace, name, **kwargs): @@ -199,21 +202,21 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - nspace = '-n {nspace}'.format(nspace=nspace) if nspace else '' - - from_file = '{0}'.format(' '.join('--from-file={0}={1} '\ - .format(key, kwargs[key]) for key in kwargs)) + nspace = f"-n {nspace}" if nspace else u"" + from_file = u" ".join( + f"--from-file={key}={kwargs[key]} " for key in kwargs + ) + cmd = f"kubectl create {nspace} configmap {name} {from_file}" - cmd = 'kubectl create {nspace} configmap {name} {from_file}'\ - .format(nspace=nspace, name=name, from_file=from_file) - (ret_code, _, _) = ssh.exec_command_sudo(cmd) + ret_code, _, _ = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError('Failed to create Kubernetes ConfigMap ' - 'on {node}.'.format(node=node['host'])) + raise RuntimeError( + f"Failed to create Kubernetes ConfigMap on {node[u'host']}." + ) @staticmethod - def create_kubernetes_cm_from_file_on_all_duts(nodes, nspace, name, - **kwargs): + def create_kubernetes_cm_from_file_on_all_duts( + nodes, nspace, name, **kwargs): """Create Kubernetes ConfigMap from file on all DUTs. :param nodes: Topology nodes. @@ -226,15 +229,14 @@ class KubernetesUtils(object): :param kwargs: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: - KubernetesUtils.create_kubernetes_cm_from_file_on_node(node, - nspace, - name, - **kwargs) + if node[u"type"] == NodeType.DUT: + KubernetesUtils.create_kubernetes_cm_from_file_on_node( + node, nspace, name, **kwargs + ) @staticmethod - def delete_kubernetes_resource_on_node(node, nspace, name=None, - rtype='po,cm,deploy,rs,rc,svc'): + def delete_kubernetes_resource_on_node( + node, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"): """Delete Kubernetes resource on node. :param node: DUT node. @@ -251,27 +253,28 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - name = '{name}'.format(name=name) if name else '--all' - nspace = '-n {nspace}'.format(nspace=nspace) if nspace else '' + name = f"{name}" if name else u"--all" + nspace = f"-n {nspace}" if nspace else u"" + cmd = f"kubectl delete {nspace} {rtype} {name}" - cmd = 'kubectl delete {nspace} {rtype} {name}'\ - .format(nspace=nspace, rtype=rtype, name=name) - (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120) + ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120) if int(ret_code) != 0: - raise RuntimeError('Failed to delete Kubernetes resources ' - 'on {node}.'.format(node=node['host'])) + raise RuntimeError( + f"Failed to delete Kubernetes resources on {node[u'host']}." + ) - cmd = 'kubectl get {nspace} pods --no-headers'\ - .format(nspace=nspace) + cmd = f"kubectl get {nspace} pods --no-headers" for _ in range(MAX_RETRY): - (ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd) + ret_code, stdout, stderr = ssh.exec_command_sudo(cmd) if int(ret_code) != 0: - raise RuntimeError('Failed to retrieve Kubernetes resources on ' - '{node}.'.format(node=node['host'])) - if name == '--all': + raise RuntimeError( + f"Failed to retrieve Kubernetes resources " + f"on {node[u'host']}." + ) + if name == u"--all": ready = False for line in stderr.splitlines(): - if 'No resources found.' in line: + if u"No resources found." in line: ready = True if ready: break @@ -279,9 +282,10 @@ class KubernetesUtils(object): ready = False for line in stdout.splitlines(): try: - state = line.split()[1].split('/') - ready = True if 'Running' in line and\ - state == state[::-1] else False + state = line.split()[1].split(u"/") + ready = bool( + u"Running" in line and state == state[::-1] + ) if not ready: break except (ValueError, IndexError): @@ -290,12 +294,13 @@ class KubernetesUtils(object): break sleep(5) else: - raise RuntimeError('Failed to delete Kubernetes resources on ' - '{node}.'.format(node=node['host'])) + raise RuntimeError( + f"Failed to delete Kubernetes resources on {node[u'host']}." + ) @staticmethod - def delete_kubernetes_resource_on_all_duts(nodes, nspace, name=None, - rtype='po,cm,deploy,rs,rc,svc'): + def delete_kubernetes_resource_on_all_duts( + nodes, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"): """Delete all Kubernetes resource on all DUTs. :param nodes: Topology nodes. @@ -308,9 +313,10 @@ class KubernetesUtils(object): :type name: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: - KubernetesUtils.delete_kubernetes_resource_on_node(node, nspace, - name, rtype) + if node[u"type"] == NodeType.DUT: + KubernetesUtils.delete_kubernetes_resource_on_node( + node, nspace, name, rtype + ) @staticmethod def describe_kubernetes_resource_on_node(node, nspace): @@ -324,9 +330,9 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - nspace = '-n {nspace}'.format(nspace=nspace) if nspace else '' + nspace = f"-n {nspace}" if nspace else u"" + cmd = f"kubectl describe {nspace} all" - cmd = 'kubectl describe {nspace} all'.format(nspace=nspace) ssh.exec_command_sudo(cmd) @staticmethod @@ -339,9 +345,10 @@ class KubernetesUtils(object): :type nspace: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: - KubernetesUtils.describe_kubernetes_resource_on_node(node, - nspace) + if node[u"type"] == NodeType.DUT: + KubernetesUtils.describe_kubernetes_resource_on_node( + node, nspace + ) @staticmethod def get_kubernetes_logs_on_node(node, nspace): @@ -355,15 +362,16 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - nspace = '-n {nspace}'.format(nspace=nspace) if nspace else '' + nspace = f"-n {nspace}" if nspace else u"" + cmd = f"for p in $(kubectl get pods {nspace} " \ + f"-o jsonpath='{{.items[*].metadata.name}}'); do echo $p; " \ + f"kubectl logs {nspace} $p; done" - cmd = "for p in $(kubectl get pods {nspace} -o jsonpath="\ - "'{{.items[*].metadata.name}}'); do echo $p; kubectl logs "\ - "{nspace} $p; done".format(nspace=nspace) ssh.exec_command(cmd) - cmd = "kubectl exec {nspace} etcdv3 -- etcdctl --endpoints "\ - "\"localhost:22379\" get \"/\" --prefix=true".format(nspace=nspace) + cmd = f"kubectl exec {nspace} etcdv3 -- etcdctl " \ + f"--endpoints \"localhost:22379\" get \"/\" --prefix=true" + ssh.exec_command(cmd) @staticmethod @@ -376,7 +384,7 @@ class KubernetesUtils(object): :type nspace: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.get_kubernetes_logs_on_node(node, nspace) @staticmethod @@ -392,20 +400,19 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - nspace = '-n {nspace}'.format(nspace=nspace) if nspace \ - else '--all-namespaces' + nspace = f"-n {nspace}" if nspace else u"--all-namespaces" + cmd = f"kubectl get {nspace} pods --no-headers" - cmd = 'kubectl get {nspace} pods --no-headers' \ - .format(nspace=nspace) for _ in range(MAX_RETRY): - (ret_code, stdout, _) = ssh.exec_command_sudo(cmd) + ret_code, stdout, _ = ssh.exec_command_sudo(cmd) if int(ret_code) == 0: ready = False for line in stdout.splitlines(): try: - state = line.split()[1].split('/') - ready = True if 'Running' in line and \ - state == state[::-1] else False + state = line.split()[1].split(u"/") + ready = bool( + u"Running" in line and state == state[::-1] + ) if not ready: break except (ValueError, IndexError): @@ -414,8 +421,9 @@ class KubernetesUtils(object): break sleep(5) else: - raise RuntimeError('Kubernetes PODs are not running on {node}.' - .format(node=node['host'])) + raise RuntimeError( + f"Kubernetes PODs are not running on {node[u'host']}." + ) @staticmethod def wait_for_kubernetes_pods_on_all_duts(nodes, nspace): @@ -427,7 +435,7 @@ class KubernetesUtils(object): :type nspace: str """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.wait_for_kubernetes_pods_on_node(node, nspace) @staticmethod @@ -440,9 +448,9 @@ class KubernetesUtils(object): ssh = SSH() ssh.connect(node) - cmd = '{dir}/{lib}/k8s_setup.sh affinity_non_vpp'\ - .format(dir=Constants.REMOTE_FW_DIR, - lib=Constants.RESOURCES_LIB_SH) + cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \ + f"k8s_setup.sh affinity_non_vpp" + ssh.exec_command(cmd) @staticmethod @@ -453,7 +461,7 @@ class KubernetesUtils(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: KubernetesUtils.set_kubernetes_pods_affinity_on_node(node) @staticmethod @@ -463,44 +471,40 @@ class KubernetesUtils(object): :param kwargs: Key-value pairs used to create configuration. :param kwargs: dict """ - smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo']) - - cpuset_cpus = \ - CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'], - cpu_node=kwargs['cpu_node'], - skip_cnt=2, - cpu_cnt=kwargs['phy_cores'], - smt_used=smt_used) - cpuset_main = \ - CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'], - cpu_node=kwargs['cpu_node'], - skip_cnt=1, - cpu_cnt=1, - smt_used=smt_used) + smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"]) + + cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node( + node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=2, + cpu_cnt=kwargs[u"phy_cores"], smt_used=smt_used + ) + cpuset_main = CpuUtils.cpu_slice_of_list_per_node( + node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1, + cpu_cnt=1, smt_used=smt_used + ) # Create config instance vpp_config = VppConfigGenerator() - vpp_config.set_node(kwargs['node']) - vpp_config.add_unix_cli_listen(value='0.0.0.0:5002') + vpp_config.set_node(kwargs[u"node"]) + vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002") vpp_config.add_unix_nodaemon() vpp_config.add_socksvr() - vpp_config.add_heapsize('4G') - vpp_config.add_ip_heap_size('4G') - vpp_config.add_ip6_heap_size('4G') - vpp_config.add_ip6_hash_buckets('2000000') - if not kwargs['jumbo']: + vpp_config.add_heapsize(u"4G") + vpp_config.add_ip_heap_size(u"4G") + vpp_config.add_ip6_heap_size(u"4G") + vpp_config.add_ip6_hash_buckets(u"2000000") + if not kwargs[u"jumbo"]: vpp_config.add_dpdk_no_multi_seg() vpp_config.add_dpdk_no_tx_checksum_offload() - vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq_count_int']) - vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2']) - vpp_config.add_buffers_per_numa(kwargs['buffers_per_numa']) + vpp_config.add_dpdk_dev_default_rxq(kwargs[u"rxq_count_int"]) + vpp_config.add_dpdk_dev(kwargs[u"if1"], kwargs[u"if2"]) + vpp_config.add_buffers_per_numa(kwargs[u"buffers_per_numa"]) # We will pop first core from list to be main core vpp_config.add_cpu_main_core(str(cpuset_main.pop(0))) # if this is not only core in list, the rest will be used as workers. if cpuset_cpus: - corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus) + corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus) vpp_config.add_cpu_corelist_workers(corelist_workers) - vpp_config.write_config(filename=kwargs['filename']) + vpp_config.write_config(filename=kwargs[u"filename"]) @staticmethod def create_kubernetes_vnf_startup_config(**kwargs): @@ -509,32 +513,28 @@ class KubernetesUtils(object): :param kwargs: Key-value pairs used to create configuration. :param kwargs: dict """ - smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo']) - skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \ - (kwargs['phy_cores'] - 1) - cpuset_cpus = \ - CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'], - cpu_node=kwargs['cpu_node'], - skip_cnt=skip_cnt, - cpu_cnt=kwargs['phy_cores']-1, - smt_used=smt_used) - cpuset_main = \ - CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'], - cpu_node=kwargs['cpu_node'], - skip_cnt=1, - cpu_cnt=1, - smt_used=smt_used) + smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"]) + skip_cnt = kwargs[u"cpu_skip"] + (kwargs[u"i"] - 1) * \ + (kwargs[u"phy_cores"] - 1) + cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node( + node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], + skip_cnt=skip_cnt, cpu_cnt=kwargs[u"phy_cores"]-1, smt_used=smt_used + ) + cpuset_main = CpuUtils.cpu_slice_of_list_per_node( + node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1, + cpu_cnt=1, smt_used=smt_used + ) # Create config instance vpp_config = VppConfigGenerator() - vpp_config.set_node(kwargs['node']) - vpp_config.add_unix_cli_listen(value='0.0.0.0:5002') + vpp_config.set_node(kwargs[u"node"]) + vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002") vpp_config.add_unix_nodaemon() vpp_config.add_socksvr() # We will pop first core from list to be main core vpp_config.add_cpu_main_core(str(cpuset_main.pop(0))) # if this is not only core in list, the rest will be used as workers. if cpuset_cpus: - corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus) + corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus) vpp_config.add_cpu_corelist_workers(corelist_workers) - vpp_config.add_plugin('disable', 'dpdk_plugin.so') - vpp_config.write_config(filename=kwargs['filename']) + vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"]) + vpp_config.write_config(filename=kwargs[u"filename"]) diff --git a/resources/libraries/python/L2Util.py b/resources/libraries/python/L2Util.py index 4ca0c47308..a49c556653 100644 --- a/resources/libraries/python/L2Util.py +++ b/resources/libraries/python/L2Util.py @@ -13,9 +13,6 @@ """L2 Utilities Library.""" -import binascii -from textwrap import wrap - from enum import IntEnum from resources.libraries.python.Constants import Constants @@ -37,7 +34,7 @@ class L2VtrOp(IntEnum): L2_VTR_TRANSLATE_2_2 = 8 -class L2Util(object): +class L2Util: """Utilities for l2 configuration.""" @staticmethod @@ -50,7 +47,7 @@ class L2Util(object): :returns: Integer representation of MAC address. :rtype: int """ - return int(mac_str.replace(':', ''), 16) + return int(mac_str.replace(u":", u""), 16) @staticmethod def int_to_mac(mac_int): @@ -62,7 +59,9 @@ class L2Util(object): :returns: String representation of MAC address. :rtype: str """ - return ':'.join(wrap("{:012x}".format(mac_int), width=2)) + return u":".join( + f"{hex(mac_int)[2:]:0>12}"[i:i+2] for i in range(0, 12, 2) + ) @staticmethod def mac_to_bin(mac_str): @@ -72,9 +71,9 @@ class L2Util(object): :param mac_str: MAC address in string representation. :type mac_str: str :returns: Binary representation of MAC address. - :rtype: binary + :rtype: bytes """ - return binascii.unhexlify(mac_str.replace(':', '')) + return bytes.fromhex(mac_str.replace(u":", u"")) @staticmethod def bin_to_mac(mac_bin): @@ -82,17 +81,15 @@ class L2Util(object): (\x01\x02\x03\x04\x05\x06) to string format (e.g. 01:02:03:04:05:06). :param mac_bin: MAC address in binary representation. - :type mac_bin: binary + :type mac_bin: bytes :returns: String representation of MAC address. :rtype: str """ - mac_str = ':'.join(binascii.hexlify(mac_bin)[i:i + 2] - for i in range(0, 12, 2)) - return str(mac_str.decode('ascii')) + return u":".join(mac_bin.hex()[i:i + 2] for i in range(0, 12, 2)) @staticmethod - def vpp_add_l2fib_entry(node, mac, interface, bd_id, static_mac=1, - filter_mac=0, bvi_mac=0): + def vpp_add_l2fib_entry( + node, mac, interface, bd_id, static_mac=1, filter_mac=0, bvi_mac=0): """ Create a static L2FIB entry on a VPP node. :param node: Node to add L2FIB entry on. @@ -113,28 +110,29 @@ class L2Util(object): :type filter_mac: int or str :type bvi_mac: int or str """ - - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = interface - cmd = 'l2fib_add_del' - err_msg = 'Failed to add L2FIB entry on host {host}'.format( - host=node['host']) - args = dict(mac=L2Util.mac_to_bin(mac), - bd_id=int(bd_id), - sw_if_index=sw_if_index, - is_add=1, - static_mac=int(static_mac), - filter_mac=int(filter_mac), - bvi_mac=int(bvi_mac)) + cmd = u"l2fib_add_del" + err_msg = f"Failed to add L2FIB entry on host {node[u'host']}" + args = dict( + mac=L2Util.mac_to_bin(mac), + bd_id=int(bd_id), + sw_if_index=sw_if_index, + is_add=1, + static_mac=int(static_mac), + filter_mac=int(filter_mac), + bvi_mac=int(bvi_mac) + + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def create_l2_bd(node, bd_id, flood=1, uu_flood=1, forward=1, learn=1, - arp_term=0): + def create_l2_bd( + node, bd_id, flood=1, uu_flood=1, forward=1, learn=1, arp_term=0): """Create an L2 bridge domain on a VPP node. :param node: Node where we wish to crate the L2 bridge domain. @@ -157,17 +155,17 @@ class L2Util(object): :type learn: int or str :type arp_term: int or str """ - - cmd = 'bridge_domain_add_del' - err_msg = 'Failed to create L2 bridge domain on host {host}'.format( - host=node['host']) - args = dict(bd_id=int(bd_id), - flood=int(flood), - uu_flood=int(uu_flood), - forward=int(forward), - learn=int(learn), - arp_term=int(arp_term), - is_add=1) + cmd = u"bridge_domain_add_del" + err_msg = f"Failed to create L2 bridge domain on host {node[u'host']}" + args = dict( + bd_id=int(bd_id), + flood=int(flood), + uu_flood=int(uu_flood), + forward=int(forward), + learn=int(learn), + arp_term=int(arp_term), + is_add=1 + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -189,17 +187,19 @@ class L2Util(object): :type shg: int or str :type port_type: int or str """ - sw_if_index = Topology.get_interface_sw_index(node, interface) - cmd = 'sw_interface_set_l2_bridge' - err_msg = 'Failed to add interface {ifc} to L2 bridge domain on host ' \ - '{host}'.format(ifc=interface, host=node['host']) - args = dict(rx_sw_if_index=sw_if_index, - bd_id=int(bd_id), - shg=int(shg), - port_type=int(port_type), - enable=1) + cmd = u"sw_interface_set_l2_bridge" + err_msg = f"Failed to add interface {interface} to L2 bridge domain " \ + f"on host {node[u'host']}" + args = dict( + rx_sw_if_index=sw_if_index, + bd_id=int(bd_id), + shg=int(shg), + port_type=int(port_type), + enable=1 + ) + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -218,35 +218,40 @@ class L2Util(object): :type port_2: str :type learn: bool """ - sw_if_index1 = Topology.get_interface_sw_index(node, port_1) sw_if_index2 = Topology.get_interface_sw_index(node, port_2) learn_int = 1 if learn else 0 - cmd1 = 'bridge_domain_add_del' - args1 = dict(bd_id=int(bd_id), - flood=1, - uu_flood=1, - forward=1, - learn=learn_int, - arp_term=0, - is_add=1) - - cmd2 = 'sw_interface_set_l2_bridge' - args2 = dict(rx_sw_if_index=sw_if_index1, - bd_id=int(bd_id), - shg=0, - port_type=0, - enable=1) - - args3 = dict(rx_sw_if_index=sw_if_index2, - bd_id=int(bd_id), - shg=0, - port_type=0, - enable=1) - - err_msg = 'Failed to add L2 bridge domain with 2 interfaces on host' \ - ' {host}'.format(host=node['host']) + cmd1 = u"bridge_domain_add_del" + args1 = dict( + bd_id=int(bd_id), + flood=1, + uu_flood=1, + forward=1, + learn=learn_int, + arp_term=0, + is_add=1 + ) + + cmd2 = u"sw_interface_set_l2_bridge" + args2 = dict( + rx_sw_if_index=sw_if_index1, + bd_id=int(bd_id), + shg=0, + port_type=0, + enable=1 + ) + + args3 = dict( + rx_sw_if_index=sw_if_index2, + bd_id=int(bd_id), + shg=0, + port_type=0, + enable=1 + ) + + err_msg = f"Failed to add L2 bridge domain with 2 interfaces " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3) @@ -263,27 +268,29 @@ class L2Util(object): :type interface1: str or int :type interface2: str or int """ - - if isinstance(interface1, basestring): + if isinstance(interface1, str): sw_iface1 = Topology().get_interface_sw_index(node, interface1) else: sw_iface1 = interface1 - if isinstance(interface2, basestring): + if isinstance(interface2, str): sw_iface2 = Topology().get_interface_sw_index(node, interface2) else: sw_iface2 = interface2 - cmd = 'sw_interface_set_l2_xconnect' - args1 = dict(rx_sw_if_index=sw_iface1, - tx_sw_if_index=sw_iface2, - enable=1) - args2 = dict(rx_sw_if_index=sw_iface2, - tx_sw_if_index=sw_iface1, - enable=1) - - err_msg = 'Failed to add L2 cross-connect between two interfaces on' \ - ' host {host}'.format(host=node['host']) + cmd = u"sw_interface_set_l2_xconnect" + args1 = dict( + rx_sw_if_index=sw_iface1, + tx_sw_if_index=sw_iface2, + enable=1 + ) + args2 = dict( + rx_sw_if_index=sw_iface2, + tx_sw_if_index=sw_iface1, + enable=1 + ) + err_msg = f"Failed to add L2 cross-connect between two interfaces " \ + f"on host {node['host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg) @@ -299,27 +306,29 @@ class L2Util(object): :type interface1: str or int :type interface2: str or int """ - - if isinstance(interface1, basestring): + if isinstance(interface1, str): sw_iface1 = Topology().get_interface_sw_index(node, interface1) else: sw_iface1 = interface1 - if isinstance(interface2, basestring): + if isinstance(interface2, str): sw_iface2 = Topology().get_interface_sw_index(node, interface2) else: sw_iface2 = interface2 - cmd = 'l2_patch_add_del' - args1 = dict(rx_sw_if_index=sw_iface1, - tx_sw_if_index=sw_iface2, - is_add=1) - args2 = dict(rx_sw_if_index=sw_iface2, - tx_sw_if_index=sw_iface1, - is_add=1) - - err_msg = 'Failed to add L2 patch between two interfaces on' \ - ' host {host}'.format(host=node['host']) + cmd = u"l2_patch_add_del" + args1 = dict( + rx_sw_if_index=sw_iface1, + tx_sw_if_index=sw_iface2, + is_add=1 + ) + args2 = dict( + rx_sw_if_index=sw_iface2, + tx_sw_if_index=sw_iface1, + is_add=1 + ) + err_msg = f"Failed to add L2 patch between two interfaces " \ + f"on host {node['host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg) @@ -340,15 +349,17 @@ class L2Util(object): :type if_2: str :type set_up: bool """ - - cmd = 'brctl addbr {0}'.format(br_name) + cmd = f"brctl addbr {br_name}" exec_cmd_no_error(node, cmd, sudo=True) - cmd = 'brctl addif {0} {1}'.format(br_name, if_1) + + cmd = f"brctl addif {br_name} {if_1}" exec_cmd_no_error(node, cmd, sudo=True) - cmd = 'brctl addif {0} {1}'.format(br_name, if_2) + + cmd = f"brctl addif {br_name} {if_2}" exec_cmd_no_error(node, cmd, sudo=True) + if set_up: - cmd = 'ip link set dev {0} up'.format(br_name) + cmd = f"ip link set dev {br_name} up" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod @@ -366,15 +377,15 @@ class L2Util(object): :type br_name: str :type set_down: bool """ - if set_down: - cmd = 'ip link set dev {0} down'.format(br_name) + cmd = f"ip link set dev {br_name} down" exec_cmd_no_error(node, cmd, sudo=True) - cmd = 'brctl delbr {0}'.format(br_name) + + cmd = f"brctl delbr {br_name}" exec_cmd_no_error(node, cmd, sudo=True) @staticmethod - def vpp_get_bridge_domain_data(node, bd_id=0xffffffff): + def vpp_get_bridge_domain_data(node, bd_id=Constants.BITWISE_NON_ZERO): """Get all bridge domain data from a VPP node. If a domain ID number is provided, return only data for the matching bridge domain. @@ -386,23 +397,27 @@ class L2Util(object): or a single dictionary for the specified bridge domain. :rtype: list or dict """ + cmd = u"bridge_domain_dump" + args = dict( + bd_id=int(bd_id) + ) + err_msg = f"Failed to get L2FIB dump on host {node[u'host']}" - cmd = 'bridge_domain_dump' - args = dict(bd_id=int(bd_id)) - err_msg = 'Failed to get L2FIB dump on host {host}'.format( - host=node['host']) with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) - if bd_id == Constants.BITWISE_NON_ZERO: - return details + retval = details if bd_id == Constants.BITWISE_NON_ZERO else None + for bridge_domain in details: - if bridge_domain['bd_id'] == bd_id: - return bridge_domain + if bridge_domain[u"bd_id"] == bd_id: + retval = bridge_domain + + return retval @staticmethod - def l2_vlan_tag_rewrite(node, interface, tag_rewrite_method, - push_dot1q=True, tag1_id=None, tag2_id=None): + def l2_vlan_tag_rewrite( + node, interface, tag_rewrite_method, push_dot1q=True, tag1_id=None, + tag2_id=None): """Rewrite tags in ethernet frame. :param node: Node to rewrite tags. @@ -419,27 +434,29 @@ class L2Util(object): :type tag1_id: int :type tag2_id: int """ - tag1_id = int(tag1_id) if tag1_id else 0 tag2_id = int(tag2_id) if tag2_id else 0 - vtr_oper = getattr(L2VtrOp, 'L2_VTR_{}'.format( - tag_rewrite_method.replace('-', '_').upper())) + vtr_oper = getattr( + L2VtrOp, f"L2_VTR_{tag_rewrite_method.replace(u'-', u'_').upper()}" + ) - if isinstance(interface, basestring): + if isinstance(interface, str): iface_key = Topology.get_interface_by_name(node, interface) sw_if_index = Topology.get_interface_sw_index(node, iface_key) else: sw_if_index = interface - cmd = 'l2_interface_vlan_tag_rewrite' - args = dict(sw_if_index=sw_if_index, - vtr_op=int(vtr_oper), - push_dot1q=int(push_dot1q), - tag1=tag1_id, - tag2=tag2_id) - err_msg = 'Failed to set VLAN TAG rewrite on host {host}'.format( - host=node['host']) + cmd = u"l2_interface_vlan_tag_rewrite" + args = dict( + sw_if_index=sw_if_index, + vtr_op=int(vtr_oper), + push_dot1q=int(push_dot1q), + tag1=tag1_id, + tag2=tag2_id + ) + err_msg = f"Failed to set VLAN TAG rewrite on host {node['host']}" + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -454,16 +471,17 @@ class L2Util(object): :returns: L2 FIB table. :rtype: list """ + cmd = u"l2_fib_table_dump" + args = dict( + bd_id=int(bd_id) + ) + err_msg = f"Failed to get L2FIB dump on host {node['host']}" - cmd = 'l2_fib_table_dump' - args = dict(bd_id=int(bd_id)) - err_msg = 'Failed to get L2FIB dump on host {host}'.format( - host=node['host']) with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) for fib_item in details: - fib_item['mac'] = L2Util.bin_to_mac(fib_item['mac']) + fib_item[u"mac"] = L2Util.bin_to_mac(fib_item[u"mac"]) return details @@ -480,13 +498,12 @@ class L2Util(object): :returns: L2 FIB entry :rtype: dict """ - bd_data = L2Util.vpp_get_bridge_domain_data(node) - bd_id = bd_data[bd_index-1]['bd_id'] + bd_id = bd_data[bd_index-1][u"bd_id"] table = L2Util.get_l2_fib_table(node, bd_id) for entry in table: - if entry['mac'] == mac: + if entry[u"mac"] == mac: return entry return {} diff --git a/resources/libraries/python/LimitUtil.py b/resources/libraries/python/LimitUtil.py index 45aeaff5a0..c34109c2b7 100644 --- a/resources/libraries/python/LimitUtil.py +++ b/resources/libraries/python/LimitUtil.py @@ -15,10 +15,10 @@ from resources.libraries.python.ssh import exec_cmd_no_error -__all__ = ["LimitUtil"] +__all__ = [u"LimitUtil"] -class LimitUtil(object): +class LimitUtil: """Class contains methods for getting or setting process resource limits.""" @staticmethod @@ -30,10 +30,8 @@ class LimitUtil(object): :type node: dict :type pid: int """ - command = 'prlimit --noheadings --pid={pid}'.format(pid=pid) - - message = 'Node {host} failed to run: {command}'.\ - format(host=node['host'], command=command) + command = f"prlimit --noheadings --pid={pid}" + message = f"Node {node[u'host']} failed to run: {command}" exec_cmd_no_error(node, command, sudo=True, message=message) @@ -50,11 +48,7 @@ class LimitUtil(object): :type resource: str :type limit: str """ - command = 'prlimit --{resource}={limit} --pid={pid}'.format( - resource=resource, limit=limit, pid=pid) - - message = 'Node {host} failed to run: {command}'.\ - format(host=node['host'], command=command) + command = f"prlimit --{resource}={limit} --pid={pid}" + message = f"Node {node[u'host']} failed to run: {command}" exec_cmd_no_error(node, command, sudo=True, message=message) - diff --git a/resources/libraries/python/LispSetup.py b/resources/libraries/python/LispSetup.py index 5fee5db87d..666b6e636d 100644 --- a/resources/libraries/python/LispSetup.py +++ b/resources/libraries/python/LispSetup.py @@ -15,15 +15,13 @@ from ipaddress import ip_address -from resources.libraries.python.topology import NodeType -from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.L2Util import L2Util +from resources.libraries.python.PapiExecutor import PapiSocketExecutor +from resources.libraries.python.topology import NodeType -class LispStatus(object): - """Class for lisp API.""" - def __init__(self): - pass +class LispStatus: + """Class for lisp API.""" @staticmethod def vpp_lisp_enable_disable(node, state): @@ -34,25 +32,21 @@ class LispStatus(object): :type node: dict :type state: str """ + args = dict(is_en=0 if state == u"disable" else 1) - args = dict(is_en=0 if state == 'disable' else 1) - - cmd = 'lisp_enable_disable' - err_msg = "Failed to set LISP status on host {host}".format( - host=node['host']) + cmd = u"lisp_enable_disable" + err_msg = f"Failed to set LISP status on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispRemoteMapping(object): +class LispRemoteMapping: """Class for lisp remote mapping API.""" - def __init__(self): - pass - @staticmethod - def vpp_add_lisp_remote_mapping(node, vni, deid, deid_prefix, seid, - seid_prefix, rloc, is_mac=False): + def vpp_add_lisp_remote_mapping( + node, vni, deid, deid_prefix, seid, seid_prefix, rloc, + is_mac=False): """Add lisp remote mapping on the VPP node in topology. :param node: VPP node. @@ -72,11 +66,10 @@ class LispRemoteMapping(object): :type rloc: str :type is_mac: bool """ - if not is_mac: - eid_type = 0 if ip_address(unicode(deid)).version == 4 else 1 - eid_packed = ip_address(unicode(deid)).packed - seid_packed = ip_address(unicode(seid)).packed + eid_type = 0 if ip_address(deid).version == 4 else 1 + eid_packed = ip_address(deid).packed + seid_packed = ip_address(seid).packed eid_len = deid_prefix seid_len = seid_prefix else: @@ -86,30 +79,35 @@ class LispRemoteMapping(object): eid_len = 0 seid_len = 0 - rlocs = [dict(is_ip4=1 if ip_address(unicode(rloc)).version == 4 else 0, - addr=ip_address(unicode(rloc)).packed)] - - args = dict(is_add=1, - is_src_dst=1, - vni=int(vni), - eid_type=eid_type, - eid=eid_packed, - eid_len=eid_len, - seid=seid_packed, - seid_len=seid_len, - rloc_num=1, - rlocs=rlocs) - - cmd = 'lisp_add_del_remote_mapping' - err_msg = "Failed to add remote mapping on host {host}".format( - host=node['host']) + rlocs = [ + dict( + is_ip4=1 if ip_address(rloc).version == 4 else 0, + addr=ip_address(rloc).packed + ) + ] + + args = dict( + is_add=1, + is_src_dst=1, + vni=int(vni), + eid_type=eid_type, + eid=eid_packed, + eid_len=eid_len, + seid=seid_packed, + seid_len=seid_len, + rloc_num=1, + rlocs=rlocs + ) + + cmd = u"lisp_add_del_remote_mapping" + err_msg = f"Failed to add remote mapping on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def vpp_del_lisp_remote_mapping(node, vni, deid, deid_prefix, seid, - seid_prefix, rloc): + def vpp_del_lisp_remote_mapping( + node, vni, deid, deid_prefix, seid, seid_prefix, rloc): """Delete lisp remote mapping on the VPP node in topology. :param node: VPP node. @@ -127,14 +125,13 @@ class LispRemoteMapping(object): :type seid_prefix: int :type rloc: str """ - # used only with IPs is_mac = False if not is_mac: - eid_type = 0 if ip_address(unicode(deid)).version == 4 else 1 - eid_packed = ip_address(unicode(deid)).packed - seid_packed = ip_address(unicode(seid)).packed + eid_type = 0 if ip_address(deid).version == 4 else 1 + eid_packed = ip_address(deid).packed + seid_packed = ip_address(seid).packed eid_len = deid_prefix seid_len = seid_prefix else: @@ -144,36 +141,38 @@ class LispRemoteMapping(object): eid_len = 0 seid_len = 0 - rlocs = [dict(is_ip4=1 if ip_address(unicode(rloc)).version == 4 else 0, - addr=ip_address(unicode(rloc)).packed)] - - args = dict(is_add=0, - is_src_dst=1, - vni=int(vni), - eid_type=eid_type, - eid=eid_packed, - eid_len=eid_len, - seid=seid_packed, - seid_len=seid_len, - rloc_num=1, - rlocs=rlocs) - - cmd = 'lisp_add_del_remote_mapping' - err_msg = "Failed to delete remote mapping on host {host}".format( - host=node['host']) + rlocs = [ + dict( + is_ip4=1 if ip_address(str(rloc)).version == 4 else 0, + addr=ip_address(str(rloc)).packed + ) + ] + + args = dict( + is_add=0, + is_src_dst=1, + vni=int(vni), + eid_type=eid_type, + eid=eid_packed, + eid_len=eid_len, + seid=seid_packed, + seid_len=seid_len, + rloc_num=1, + rlocs=rlocs + ) + + cmd = u"lisp_add_del_remote_mapping" + err_msg = f"Failed to delete remote mapping on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispAdjacency(object): +class LispAdjacency: """Class for lisp adjacency API.""" - def __init__(self): - pass - @staticmethod - def vpp_add_lisp_adjacency(node, vni, deid, deid_prefix, seid, - seid_prefix, is_mac=False): + def vpp_add_lisp_adjacency( + node, vni, deid, deid_prefix, seid, seid_prefix, is_mac=False): """Add lisp adjacency on the VPP node in topology. :param node: VPP node. @@ -191,11 +190,10 @@ class LispAdjacency(object): :type seid_prefix: int :type is_mac: bool """ - if not is_mac: - eid_type = 0 if ip_address(unicode(deid)).version == 4 else 1 - reid = ip_address(unicode(deid)).packed - leid = ip_address(unicode(seid)).packed + eid_type = 0 if ip_address(deid).version == 4 else 1 + reid = ip_address(deid).packed + leid = ip_address(seid).packed reid_len = deid_prefix leid_len = seid_prefix else: @@ -205,24 +203,25 @@ class LispAdjacency(object): reid_len = 0 leid_len = 0 - args = dict(is_add=1, - vni=int(vni), - eid_type=eid_type, - reid=reid, - reid_len=reid_len, - leid=leid, - leid_len=leid_len) + args = dict( + is_add=1, + vni=int(vni), + eid_type=eid_type, + reid=reid, + reid_len=reid_len, + leid=leid, + leid_len=leid_len + ) - cmd = 'lisp_add_del_adjacency' - err_msg = "Failed to add lisp adjacency on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_adjacency" + err_msg = f"Failed to add lisp adjacency on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def vpp_del_lisp_adjacency(node, vni, deid, deid_prefix, seid, - seid_prefix): + def vpp_del_lisp_adjacency( + node, vni, deid, deid_prefix, seid, seid_prefix): """Delete lisp adjacency on the VPP node in topology. :param node: VPP node. @@ -238,14 +237,13 @@ class LispAdjacency(object): :type seid: str :type seid_prefix: int """ - # used only with IPs is_mac = False if not is_mac: - eid_type = 0 if ip_address(unicode(deid)).version == 4 else 1 - reid = ip_address(unicode(deid)).packed - leid = ip_address(unicode(seid)).packed + eid_type = 0 if ip_address(deid).version == 4 else 1 + reid = ip_address(deid).packed + leid = ip_address(seid).packed reid_len = deid_prefix leid_len = seid_prefix else: @@ -255,27 +253,25 @@ class LispAdjacency(object): reid_len = 0 leid_len = 0 - args = dict(is_add=0, - vni=int(vni), - eid_type=eid_type, - reid=reid, - reid_len=reid_len, - leid=leid, - leid_len=leid_len) + args = dict( + is_add=0, + vni=int(vni), + eid_type=eid_type, + reid=reid, + reid_len=reid_len, + leid=leid, + leid_len=leid_len + ) - cmd = 'lisp_add_del_adjacency' - err_msg = "Failed to delete lisp adjacency on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_adjacency" + err_msg = f"Failed to delete lisp adjacency on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispGpeStatus(object): +class LispGpeStatus: """Clas for LISP GPE status manipulation.""" - def __init__(self): - pass - @staticmethod def vpp_lisp_gpe_enable_disable(node, state): """Change the state of LISP GPE - enable or disable. @@ -285,45 +281,35 @@ class LispGpeStatus(object): :type node: dict :type state: str """ + args = dict(is_en=0 if state == u"disable" else 1) - args = dict(is_en=0 if state == 'disable' else 1) - - cmd = 'gpe_enable_disable' - err_msg = "Failed to set LISP GPE status on host {host}".format( - host=node['host']) + cmd = u"gpe_enable_disable" + err_msg = f"Failed to set LISP GPE status on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispGpeForwardEntry(object): +class LispGpeForwardEntry: """The functionality needed for these methods is not implemented in VPP (VAT). Bug https://jira.fd.io/browse/VPP-334 was open to cover this issue. TODO: Implement when VPP-334 is fixed. """ - def __init__(self): - pass - @staticmethod def add_lisp_gpe_forward_entry(node, *args): """Not implemented""" # TODO: Implement when VPP-334 is fixed. - pass @staticmethod def del_lisp_gpe_forward_entry(node, *args): """Not implemented""" # TODO: Implement when VPP-334 is fixed. - pass -class LispMapResolver(object): +class LispMapResolver: """Class for Lisp map resolver API.""" - def __init__(self): - pass - @staticmethod def vpp_add_map_resolver(node, map_resolver_ip): """Set lisp map resolver on the VPP node in topology. @@ -333,15 +319,14 @@ class LispMapResolver(object): :type node: dict :type map_resolver_ip: str """ + args = dict( + is_add=1, + is_ipv6=0 if ip_address(map_resolver_ip).version == 4 else 1, + ip_address=ip_address(map_resolver_ip).packed + ) - args = dict(is_add=1, - is_ipv6=0 if ip_address(unicode(map_resolver_ip)).version \ - == 4 else 1, - ip_address=ip_address(unicode(map_resolver_ip)).packed) - - cmd = 'lisp_add_del_map_resolver' - err_msg = "Failed to add map resolver on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_map_resolver" + err_msg = f"Failed to add map resolver on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -355,28 +340,24 @@ class LispMapResolver(object): :type node: dict :type map_resolver_ip: str """ + args = dict( + is_add=0, + is_ipv6=0 if ip_address(map_resolver_ip).version == 4 else 1, + ip_address=ip_address(map_resolver_ip).packed + ) - args = dict(is_add=0, - is_ipv6=0 if ip_address(unicode(map_resolver_ip)).version \ - == 4 else 1, - ip_address=ip_address(unicode(map_resolver_ip)).packed) - - cmd = 'lisp_add_del_map_resolver' - err_msg = "Failed to delete map resolver on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_map_resolver" + err_msg = f"Failed to delete map resolver on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispLocalEid(object): +class LispLocalEid: """Class for Lisp local eid API.""" - def __init__(self): - pass - @staticmethod - def vpp_add_lisp_local_eid(node, locator_set_name, vni, eid, - prefix_len=None): + def vpp_add_lisp_local_eid( + node, locator_set_name, vni, eid, prefix_len=None): """Set lisp eid address on the VPP node in topology. :param node: VPP node. @@ -390,31 +371,31 @@ class LispLocalEid(object): :type eid: str :type prefix_len: int """ - if prefix_len: - eid_type = 0 if ip_address(unicode(eid)).version == 4 else 1 - eid_packed = ip_address(unicode(eid)).packed + eid_type = 0 if ip_address(eid).version == 4 else 1 + eid_packed = ip_address(eid).packed else: eid_type = 2 eid_packed = L2Util.mac_to_bin(eid) - args = dict(is_add=1, - eid_type=eid_type, - eid=eid_packed, - prefix_len=prefix_len, - locator_set_name=locator_set_name, - vni=int(vni)) + args = dict( + is_add=1, + eid_type=eid_type, + eid=eid_packed, + prefix_len=prefix_len, + locator_set_name=locator_set_name, + vni=int(vni) + ) - cmd = 'lisp_add_del_local_eid' - err_msg = "Failed to add local eid on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_local_eid" + err_msg = f"Failed to add local eid on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def vpp_del_lisp_local_eid(node, locator_set_name, vni, eid, - prefix_len=None): + def vpp_del_lisp_local_eid( + node, locator_set_name, vni, eid, prefix_len=None): """Set lisp eid addres on the VPP node in topology. :param node: VPP node. @@ -428,34 +409,31 @@ class LispLocalEid(object): :type eid: str :type prefix_len: int """ - if prefix_len: - eid_type = 0 if ip_address(unicode(eid)).version == 4 else 1 - eid_packed = ip_address(unicode(eid)).packed + eid_type = 0 if ip_address(eid).version == 4 else 1 + eid_packed = ip_address(eid).packed else: eid_type = 2 eid_packed = L2Util.mac_to_bin(eid) - args = dict(is_add=0, - eid_type=eid_type, - eid=eid_packed, - prefix_len=prefix_len, - locator_set_name=locator_set_name, - vni=int(vni)) + args = dict( + is_add=0, + eid_type=eid_type, + eid=eid_packed, + prefix_len=prefix_len, + locator_set_name=locator_set_name, + vni=int(vni) + ) - cmd = 'lisp_add_del_local_eid' - err_msg = "Failed to delete local eid on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_local_eid" + err_msg = f"Failed to delete local eid on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispLocator(object): +class LispLocator: """Class for the Lisp Locator API.""" - def __init__(self): - pass - @staticmethod def vpp_add_lisp_locator(node, locator_name, sw_if_index, priority, weight): """Set lisp locator on the VPP node in topology. @@ -472,15 +450,16 @@ class LispLocator(object): :type weight: int """ - args = dict(is_add=1, - locator_set_name=locator_name, - sw_if_index=sw_if_index, - priority=priority, - weight=weight) + args = dict( + is_add=1, + locator_set_name=locator_name, + sw_if_index=sw_if_index, + priority=priority, + weight=weight + ) - cmd = 'lisp_add_del_locator' - err_msg = "Failed to add locator on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_locator" + err_msg = f"Failed to add locator on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -500,26 +479,23 @@ class LispLocator(object): :type priority: int :type weight: int """ + args = dict( + is_add=0, + locator_set_name=locator_name, + sw_if_index=sw_if_index, + priority=priority, + weight=weight + ) - args = dict(is_add=0, - locator_set_name=locator_name, - sw_if_index=sw_if_index, - priority=priority, - weight=weight) - - cmd = 'lisp_add_del_locator' - err_msg = "Failed to delete locator on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_locator" + err_msg = f"Failed to delete locator on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispLocatorSet(object): +class LispLocatorSet: """Class for Lisp Locator Set API.""" - def __init__(self): - pass - @staticmethod def vpp_add_lisp_locator_set(node, name): """Add lisp locator_set on VPP. @@ -529,15 +505,15 @@ class LispLocatorSet(object): :type node: dict :type name: str """ + args = dict( + is_add=1, + locator_set_name=name, + locator_num=0, + locators=[] + ) - args = dict(is_add=1, - locator_set_name=name, - locator_num=0, - locators=[]) - - cmd = 'lisp_add_del_locator_set' - err_msg = "Failed to add locator set on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_locator_set" + err_msg = f"Failed to add locator set on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -551,25 +527,22 @@ class LispLocatorSet(object): :type node: dict :type name: str """ + args = dict( + is_add=0, + locator_set_name=name, + locator_num=0, + locators=[] + ) - args = dict(is_add=0, - locator_set_name=name, - locator_num=0, - locators=[]) - - cmd = 'lisp_add_del_locator_set' - err_msg = "Failed to delete locator set on host {host}".format( - host=node['host']) + cmd = u"lisp_add_del_locator_set" + err_msg = f"Failed to delete locator set on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) -class LispSetup(object): +class LispSetup: """Lisp setup in topology.""" - def __init__(self): - pass - @staticmethod def vpp_set_lisp_locator_set(node, locator_set_list): """Set lisp locator_sets on VPP node in topology. @@ -580,25 +553,22 @@ class LispSetup(object): :type locator_set_list: list """ - if node['type'] != NodeType.DUT: - raise ValueError('Node is not DUT') + if node[u"type"] != NodeType.DUT: + raise ValueError(u"Node is not DUT") lisp_locator = LispLocator() lisp_locator_set = LispLocatorSet() for locator_set in locator_set_list: - locator_set_name = locator_set.get('locator-set') - locator_list = locator_set.get('locator') - lisp_locator_set.vpp_add_lisp_locator_set(node, - locator_set_name) + locator_set_name = locator_set.get(u"locator-set") + locator_list = locator_set.get(u"locator") + lisp_locator_set.vpp_add_lisp_locator_set(node, locator_set_name) for locator in locator_list: - sw_if_index = locator.get('locator-index') - priority = locator.get('priority') - weight = locator.get('weight') - lisp_locator.vpp_add_lisp_locator(node, - locator_set_name, - sw_if_index, - priority, - weight) + sw_if_index = locator.get(u"locator-index") + priority = locator.get(u"priority") + weight = locator.get(u"weight") + lisp_locator.vpp_add_lisp_locator( + node, locator_set_name, sw_if_index, priority, weight + ) @staticmethod def vpp_unset_lisp_locator_set(node, locator_set_list): @@ -609,27 +579,23 @@ class LispSetup(object): :type node: dict :type locator_set_list: list """ - - if node['type'] != NodeType.DUT: - raise ValueError('Lisp locator set, node is not DUT') + if node[u"type"] != NodeType.DUT: + raise ValueError(u"Lisp locator set, node is not DUT") lisp_locator = LispLocator() lisp_locator_set = LispLocatorSet() for locator_set in locator_set_list: - locator_set_name = locator_set.get('locator-set') - locator_list = locator_set.get('locator') + locator_set_name = locator_set.get(u"locator-set") + locator_list = locator_set.get(u"locator") for locator in locator_list: - sw_if_index = locator.get('locator-index') - priority = locator.get('priority') - weight = locator.get('weight') - lisp_locator.vpp_del_lisp_locator(node, - locator_set_name, - sw_if_index, - priority, - weight) - - lisp_locator_set.vpp_del_lisp_locator_set(node, - locator_set_name) + sw_if_index = locator.get(u"locator-index") + priority = locator.get(u"priority") + weight = locator.get(u"weight") + lisp_locator.vpp_del_lisp_locator( + node, locator_set_name, sw_if_index, priority, weight + ) + + lisp_locator_set.vpp_del_lisp_locator_set(node, locator_set_name) @staticmethod def vpp_set_lisp_eid_table(node, eid_table): @@ -640,23 +606,20 @@ class LispSetup(object): :type node: dict :type eid_table: dict """ - - if node['type'] != NodeType.DUT: - raise ValueError('Node is not DUT') + if node[u"type"] != NodeType.DUT: + raise ValueError(u"Node is not DUT") lisp_locator_set = LispLocatorSet() lisp_eid = LispLocalEid() for eid in eid_table: - vni = eid.get('vni') - eid_address = eid.get('eid') - eid_prefix_len = eid.get('eid-prefix-len') - locator_set_name = eid.get('locator-set') + vni = eid.get(u"vni") + eid_address = eid.get(u"eid") + eid_prefix_len = eid.get(u"eid-prefix-len") + locator_set_name = eid.get(u"locator-set") lisp_locator_set.vpp_add_lisp_locator_set(node, locator_set_name) - lisp_eid.vpp_add_lisp_local_eid(node, - locator_set_name, - vni, - eid_address, - eid_prefix_len) + lisp_eid.vpp_add_lisp_local_eid( + node, locator_set_name, vni, eid_address, eid_prefix_len + ) @staticmethod def vpp_unset_lisp_eid_table(node, eid_table): @@ -667,26 +630,23 @@ class LispSetup(object): :type node: dict :type eid_table: dict """ - - if node['type'] != NodeType.DUT: - raise ValueError('Node is not DUT') + if node[u"type"] != NodeType.DUT: + raise ValueError(u"Node is not DUT") locator_set_list = [] lisp_locator_set = LispLocatorSet() lisp_eid = LispLocalEid() for eid in eid_table: - vni = eid.get('vni') - eid_address = eid.get('eid') - eid_prefix_len = eid.get('eid-prefix-len') - locator_set_name = eid.get('locator-set') + vni = eid.get(u"vni") + eid_address = eid.get(u"eid") + eid_prefix_len = eid.get(u"eid-prefix-len") + locator_set_name = eid.get(u"locator-set") if locator_set_name not in locator_set_list: locator_set_list.append(locator_set_name) - lisp_eid.vpp_del_lisp_local_eid(node, - locator_set_name, - vni, - eid_address, - eid_prefix_len) + lisp_eid.vpp_del_lisp_local_eid( + node, locator_set_name, vni, eid_address, eid_prefix_len + ) for locator_set_name in locator_set_list: lisp_locator_set.vpp_del_lisp_locator_set(node, locator_set_name) @@ -700,10 +660,9 @@ class LispSetup(object): :type node: dict :type map_resolver: dict """ - lisp_map_res = LispMapResolver() for map_ip in map_resolver: - lisp_map_res.vpp_add_map_resolver(node, map_ip.get('map resolver')) + lisp_map_res.vpp_add_map_resolver(node, map_ip.get(u"map resolver")) @staticmethod def vpp_unset_lisp_map_resolver(node, map_resolver): @@ -714,12 +673,11 @@ class LispSetup(object): :type node: dict :type map_resolver: dict """ - lisp_map_res = LispMapResolver() for map_ip in map_resolver: - lisp_map_res.vpp_del_map_resolver(node, map_ip.get('map resolver')) + lisp_map_res.vpp_del_map_resolver(node, map_ip.get(u"map resolver")) -class LispEidTableMap(object): +class LispEidTableMap: """ Class for EID table map. """ @@ -738,7 +696,6 @@ class LispEidTableMap(object): :type bd_id: int :type vrf: int """ - # adding default mapping vni=0, vrf=0 needs to be skipped skip = False @@ -752,14 +709,15 @@ class LispEidTableMap(object): if (int(vrf) == 0) and (int(vni) == 0): skip = True - args = dict(is_add=1, - vni=int(vni), - dp_table=int(dp_table), - is_l2=is_l2) + args = dict( + is_add=1, + vni=int(vni), + dp_table=int(dp_table), + is_l2=is_l2 + ) - cmd = 'lisp_eid_table_add_del_map' - err_msg = "Failed to add eid table map on host {host}".format( - host=node['host']) + cmd = u"lisp_eid_table_add_del_map" + err_msg = f"Failed to add eid table map on host {node[u'host']}" if not skip: with PapiSocketExecutor(node) as papi_exec: diff --git a/resources/libraries/python/LispUtil.py b/resources/libraries/python/LispUtil.py index 114cd72202..a77f4ad854 100644 --- a/resources/libraries/python/LispUtil.py +++ b/resources/libraries/python/LispUtil.py @@ -13,19 +13,16 @@ """Lisp utilities library.""" -from robot.api import logger from ipaddress import IPv4Address, IPv6Address +from robot.api import logger -from resources.libraries.python.topology import Topology -from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.L2Util import L2Util +from resources.libraries.python.PapiExecutor import PapiSocketExecutor +from resources.libraries.python.topology import Topology -class LispUtil(object): +class LispUtil: """Implements keywords for Lisp tests.""" - def __init__(self): - pass - @staticmethod def vpp_show_lisp_state(node): """Get lisp state from VPP node. @@ -35,17 +32,17 @@ class LispUtil(object): :returns: Lisp gpe state. :rtype: dict """ - cmd = 'show_lisp_status' - err_msg = "Failed to get LISP status on host {host}".format( - host=node['host']) + cmd = u"show_lisp_status" + err_msg = f"Failed to get LISP status on host {node['host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["feature_status"] = "enabled" if reply["feature_status"] else \ - "disabled" - data["gpe_status"] = "enabled" if reply["gpe_status"] else "disabled" + data[u"feature_status"] = u"enabled" if reply[u"feature_status"] \ + else u"disabled" + data[u"gpe_status"] = u"enabled" if reply[u"gpe_status"] \ + else u"disabled" return data @staticmethod @@ -60,24 +57,27 @@ class LispUtil(object): :returns: Lisp locator_set data as python list. :rtype: list """ + ifilter = {u"_": 0, u"_local": 1, u"_remote": 2} + args = dict( + filter=ifilter[u"_" + items_filter] + ) - ifilter = {"_": 0, "_local": 1, "_remote": 2} - args = dict(filter=ifilter["_" + items_filter]) - - cmd = 'lisp_locator_set_dump' - err_msg = "Failed to get LISP locator set on host {host}".format( - host=node['host']) + cmd = u"lisp_locator_set_dump" + err_msg = f"Failed to get LISP locator set on host {node['host']}" try: with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) - data = [] + data = list() for locator in details: - data.append({"ls_name": locator["ls_name"].rstrip('\x00'), - "ls_index": locator["ls_index"]}) + data.append( + {u"ls_name": locator[u"ls_name"].rstrip(b'\0'), + u"ls_index": locator[u"ls_index"]} + ) return data - except (ValueError, LookupError): - return [] + except (ValueError, LookupError) as err: + logger.warn(f"Failed to get LISP locator set {err}") + return list() @staticmethod def vpp_show_lisp_eid_table(node): @@ -88,31 +88,34 @@ class LispUtil(object): :returns: Lisp eid table as python list. :rtype: list """ - - cmd = 'lisp_eid_table_dump' - err_msg = "Failed to get LISP eid table on host {host}".format( - host=node['host']) + cmd = u"lisp_eid_table_dump" + err_msg = f"Failed to get LISP eid table on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) - data = [] + data = list() for eid_details in details: - eid = 'Bad eid type' - if eid_details["eid_type"] == 0: - prefix = str(eid_details["eid_prefix_len"]) - eid = str(IPv4Address(eid_details["eid"][0:4])) + "/" + prefix - elif eid_details["eid_type"] == 1: - prefix = str(eid_details["eid_prefix_len"]) - eid = str(IPv6Address(eid_details["eid"])) + "/" + prefix - elif eid_details["eid_type"] == 2: - eid = str(L2Util.bin_to_mac(eid_details["eid"][0:6])) - data.append({"action": eid_details["action"], - "is_local": eid_details["is_local"], - "eid": eid, - "vni": eid_details["vni"], - "ttl": eid_details["ttl"], - "authoritative": eid_details["authoritative"]}) + eid = u"Bad eid type" + if eid_details[u"eid_type"] == 0: + prefix = str(eid_details[u"eid_prefix_len"]) + eid = str(IPv4Address(eid_details[u"eid"][0:4])) + u"/" + \ + prefix + elif eid_details[u"eid_type"] == 1: + prefix = str(eid_details[u"eid_prefix_len"]) + eid = str(IPv6Address(eid_details[u"eid"])) + u"/" + prefix + elif eid_details[u"eid_type"] == 2: + eid = str(L2Util.bin_to_mac(eid_details[u"eid"][0:6])) + data.append( + { + u"action": eid_details[u"action"], + u"is_local": eid_details[u"is_local"], + u"eid": eid, + u"vni": eid_details[u"vni"], + u"ttl": eid_details[u"ttl"], + u"authoritative": eid_details[u"authoritative"] + } + ) return data @staticmethod @@ -124,22 +127,20 @@ class LispUtil(object): :returns: Lisp map resolver as python list. :rtype: list """ - - cmd = 'lisp_map_resolver_dump' - err_msg = "Failed to get LISP map resolver on host {host}".format( - host=node['host']) + cmd = u"lisp_map_resolver_dump" + err_msg = f"Failed to get LISP map resolver on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) - data = [] + data = list() for resolver in details: - address = 'Bad is_ipv6 flag' - if resolver["is_ipv6"] == 0: - address = str(IPv4Address(resolver["ip_address"][0:4])) - elif resolver["is_ipv6"] == 1: - address = str(IPv6Address(resolver["ip_address"])) - data.append({"map resolver": address}) + address = u"Bad is_ipv6 flag" + if resolver[u"is_ipv6"] == 0: + address = str(IPv4Address(resolver[u"ip_address"][0:4])) + elif resolver[u"is_ipv6"] == 1: + address = str(IPv6Address(resolver[u"ip_address"])) + data.append({u"map resolver": address}) return data @staticmethod @@ -151,16 +152,15 @@ class LispUtil(object): :returns: LISP Map Register as python dict. :rtype: dict """ - - cmd = 'show_lisp_map_register_state' - err_msg = "Failed to get LISP map register state on host {host}".format( - host=node['host']) + cmd = u"show_lisp_map_register_state" + err_msg = f"Failed to get LISP map register state on host " \ + f"{node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["state"] = "enabled" if reply["is_enabled"] else "disabled" + data[u"state"] = u"enabled" if reply[u"is_enabled"] else u"disabled" logger.info(data) return data @@ -173,16 +173,15 @@ class LispUtil(object): :returns: LISP Map Request mode as python dict. :rtype: dict """ - - cmd = 'show_lisp_map_request_mode' - err_msg = "Failed to get LISP map request mode on host {host}".format( - host=node['host']) + cmd = u"show_lisp_map_request_mode" + err_msg = f"Failed to get LISP map request mode on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["map_request_mode"] = "src-dst" if reply["mode"] else "dst-only" + data[u"map_request_mode"] = u"src-dst" if reply[u"mode"] \ + else u"dst-only" logger.info(data) return data @@ -195,22 +194,20 @@ class LispUtil(object): :returns: LISP Map Server as python list. :rtype: list """ - - cmd = 'lisp_map_server_dump' - err_msg = "Failed to get LISP map server on host {host}".format( - host=node['host']) + cmd = u"lisp_map_server_dump" + err_msg = f"Failed to get LISP map server on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) - data = [] + data = list() for server in details: - address = 'Bad is_ipv6 flag' - if server["is_ipv6"] == 0: - address = str(IPv4Address(server["ip_address"][0:4])) - elif server["is_ipv6"] == 1: - address = str(IPv6Address(server["ip_address"])) - data.append({"map-server": address}) + address = u"Bad is_ipv6 flag" + if server[u"is_ipv6"] == 0: + address = str(IPv4Address(server[u"ip_address"][0:4])) + elif server[u"is_ipv6"] == 1: + address = str(IPv6Address(server[u"ip_address"])) + data.append({u"map-server": address}) logger.info(data) return data @@ -223,24 +220,21 @@ class LispUtil(object): :returns: LISP PETR configuration as python dict. :rtype: dict """ - -# Note: VAT is returning ipv6 address instead of ipv4 - - cmd = 'show_lisp_use_petr' - err_msg = "Failed to get LISP petr config on host {host}".format( - host=node['host']) + # Note: VAT is returning ipv6 address instead of ipv4 + cmd = u"show_lisp_use_petr" + err_msg = f"Failed to get LISP petr config on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["status"] = "enabled" if reply["status"] else "disabled" - address = 'Bad is_ip4 flag' - if reply["is_ip4"] == 0: - address = str(IPv6Address(reply["address"])) - elif reply["is_ip4"] == 1: - address = str(IPv4Address(reply["address"][0:4])) - data["address"] = address + data[u"status"] = u"enabled" if reply[u"status"] else u"disabled" + address = u"Bad is_ip4 flag" + if reply[u"is_ip4"] == 0: + address = str(IPv6Address(reply[u"address"])) + elif reply[u"is_ip4"] == 1: + address = str(IPv4Address(reply[u"address"][0:4])) + data[u"address"] = address logger.info(data) return data @@ -253,16 +247,14 @@ class LispUtil(object): :returns: LISP RLOC configuration as python dict. :rtype: dict """ - - cmd = 'show_lisp_rloc_probe_state' - err_msg = "Failed to get LISP rloc config on host {host}".format( - host=node['host']) + cmd = u"show_lisp_rloc_probe_state" + err_msg = f"Failed to get LISP rloc config on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["state"] = "enabled" if reply["is_enabled"] else "disabled" + data[u"state"] = u"enabled" if reply[u"is_enabled"] else u"disabled" logger.info(data) return data @@ -275,16 +267,14 @@ class LispUtil(object): :returns: Lisp PITR config data. :rtype: dict """ - - cmd = 'show_lisp_pitr' - err_msg = "Failed to get LISP pitr on host {host}".format( - host=node['host']) + cmd = u"show_lisp_pitr" + err_msg = f"Failed to get LISP pitr on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) data = dict() - data["status"] = "enabled" if reply["status"] else "disabled" + data[u"status"] = u"enabled" if reply[u"status"] else u"disabled" return data @staticmethod @@ -296,19 +286,20 @@ class LispUtil(object): :type lisp_val1: list :type lisp_val2: list """ - len1 = len(lisp_val1) len2 = len(lisp_val2) + if len1 != len2: - raise RuntimeError('Values are not same. ' - 'Value 1 {} \n' - 'Value 2 {}.'.format(lisp_val1, - lisp_val2)) + raise RuntimeError( + f"Values are not same. Value 1 {lisp_val1} \n" + f"Value 2 {lisp_val2}." + ) for tmp in lisp_val1: if tmp not in lisp_val2: - raise RuntimeError('Value {} not found in vpp:\n' - '{}'.format(tmp, lisp_val2)) + raise RuntimeError( + f"Value {tmp} not found in vpp:\n{lisp_val2}" + ) def lisp_locator_s_should_be_equal(self, locator_set1, locator_set2): """Fail if the lisp values are not equal. @@ -318,11 +309,12 @@ class LispUtil(object): :type locator_set1: list :type locator_set2: list """ + locator_set_list = list() - locator_set_list = [] for item in locator_set1: if item not in locator_set_list: locator_set_list.append(item) + self.lisp_should_be_equal(locator_set_list, locator_set2) @staticmethod @@ -338,16 +330,15 @@ class LispUtil(object): from VAT. :rtype: tuple """ - topo = Topology() + locator_set_list = list() + locator_set_list_vat = list() - locator_set_list = [] - locator_set_list_vat = [] i = 0 for num in range(0, int(locator_set_number)): - locator_list = [] - for interface in node['interfaces'].values(): - link = interface.get('link') + locator_list = list() + for interface in list(node[u"interfaces"].values()): + link = interface.get(u"link") i += 1 if link is None: continue @@ -355,18 +346,24 @@ class LispUtil(object): if_name = topo.get_interface_by_link_name(node, link) sw_if_index = topo.get_interface_sw_index(node, if_name) if if_name is not None: - locator = {'locator-index': sw_if_index, - 'priority': i, - 'weight': i} + locator = { + u"locator-index": sw_if_index, + u"priority": i, + u"weight": i + } locator_list.append(locator) - l_name = 'ls{0}'.format(num) - locator_set = {'locator-set': l_name, - 'locator': locator_list} + l_name = f"ls{num}" + locator_set = { + u"locator-set": l_name, + u"locator": locator_list + } locator_set_list.append(locator_set) - locator_set_vat = {"ls_name": l_name, - "ls_index": num} + locator_set_vat = { + u"ls_name": l_name, + u"ls_index": num + } locator_set_list_vat.append(locator_set_vat) return locator_set_list, locator_set_list_vat @@ -384,15 +381,15 @@ class LispUtil(object): from VAT. :rtype: tuple """ - topo = Topology() - locator_set_list = [] - locator_set_list_vat = [] + locator_set_list = list() + locator_set_list_vat = list() + i = 0 for num in range(0, int(locator_set_number)): locator_list = [] - for interface in node['interfaces'].values(): - link = interface.get('link') + for interface in list(node[u"interfaces"].values()): + link = interface.get(u"link") i += 1 if link is None: continue @@ -400,17 +397,23 @@ class LispUtil(object): if_name = topo.get_interface_by_link_name(node, link) sw_if_index = topo.get_interface_sw_index(node, if_name) if if_name is not None: - l_name = 'ls{0}'.format(num) - locator = {'locator-index': sw_if_index, - 'priority': i, - 'weight': i} + l_name = f"ls{num}" + locator = { + u"locator-index": sw_if_index, + u"priority": i, + u"weight": i + } locator_list.append(locator) - locator_set = {'locator-set': l_name, - 'locator': locator_list} + locator_set = { + u"locator-set": l_name, + u"locator": locator_list + } locator_set_list.append(locator_set) - locator_set_vat = {"ls_name": l_name, - "ls_index": num} + locator_set_vat = { + u"ls_name": l_name, + u"ls_index": num + } locator_set_list_vat.append(locator_set_vat) return locator_set_list, locator_set_list_vat @@ -421,5 +424,4 @@ class LispUtil(object): :param lisp_params: Should be empty list. :type lisp_params: list """ - self.lisp_should_be_equal([], lisp_params) diff --git a/resources/libraries/python/LoadBalancerUtil.py b/resources/libraries/python/LoadBalancerUtil.py index 77f6412973..c8e6c0ddd7 100644 --- a/resources/libraries/python/LoadBalancerUtil.py +++ b/resources/libraries/python/LoadBalancerUtil.py @@ -13,12 +13,14 @@ """Loadbalancer util library.""" -from socket import htonl from ipaddress import ip_address +from socket import htonl + from resources.libraries.python.topology import NodeType, Topology from resources.libraries.python.PapiExecutor import PapiSocketExecutor -class LoadBalancerUtil(object): + +class LoadBalancerUtil: """Basic Loadbalancer parameter configuration.""" @staticmethod @@ -43,28 +45,33 @@ class LoadBalancerUtil(object): :returns: Nothing. :raises ValueError: If the node has an unknown node type. """ - if node['type'] == NodeType.DUT: - ip4_src_addr = ip_address(unicode(kwargs.pop('ip4_src_addr', - '255.255.255.255'))) - ip6_src_addr = ip_address(unicode(kwargs.pop('ip6_src_addr',\ - 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'))) - flow_timeout = kwargs.pop('flow_timeout', 40) - sticky_buckets_per_core = kwargs.pop('buckets_per_core', 1024) - - cmd = 'lb_conf' - err_msg = 'Failed to set lb conf on host {host}'.format( - host=node['host']) - - args = dict(ip4_src_address=str(ip4_src_addr), - ip6_src_address=str(ip6_src_addr), - sticky_buckets_per_core=sticky_buckets_per_core, - flow_timeout=flow_timeout) + if node[u"type"] == NodeType.DUT: + ip4_src_addr = ip_address( + kwargs.pop(u"ip4_src_addr", u"255.255.255.255") + ) + ip6_src_addr = ip_address( + kwargs.pop( + u"ip6_src_addr", u"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" + ) + ) + flow_timeout = kwargs.pop(u"flow_timeout", 40) + sticky_buckets_per_core = kwargs.pop(u"buckets_per_core", 1024) + + cmd = u"lb_conf" + err_msg = f"Failed to set lb conf on host {node[u'host']}" + args = dict( + ip4_src_address=str(ip4_src_addr), + ip6_src_address=str(ip6_src_addr), + sticky_buckets_per_core=sticky_buckets_per_core, + flow_timeout=flow_timeout + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) else: - raise ValueError('Node {host} has unknown NodeType: "{type}"' - .format(host=node['host'], type=node['type'])) + raise ValueError( + f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'" + ) @staticmethod def vpp_lb_add_del_vip(node, **kwargs): @@ -91,40 +98,44 @@ class LoadBalancerUtil(object): :returns: Nothing. :raises ValueError: If the node has an unknown node type. """ - if node['type'] == NodeType.DUT: - vip_addr = kwargs.pop('vip_addr', '0.0.0.0') - protocol = kwargs.pop('protocol', 255) - port = kwargs.pop('port', 0) - encap = kwargs.pop('encap', 0) - dscp = kwargs.pop('dscp', 0) - srv_type = kwargs.pop('srv_type', 0) - target_port = kwargs.pop('target_port', 0) - node_port = kwargs.pop('node_port', 0) - new_len = kwargs.pop('new_len', 1024) - is_del = kwargs.pop('is_del', 0) - - cmd = 'lb_add_del_vip' - err_msg = 'Failed to add vip on host {host}'.format( - host=node['host']) - - vip_addr = ip_address(unicode(vip_addr)).packed - args = dict(pfx={'len': 128, - 'address': {'un': {'ip4': vip_addr}, 'af': 0}}, - protocol=protocol, - port=port, - encap=htonl(encap), - dscp=dscp, - type=srv_type, - target_port=target_port, - node_port=node_port, - new_flows_table_length=int(new_len), - is_del=is_del) + if node[u"type"] == NodeType.DUT: + vip_addr = kwargs.pop(u"vip_addr", "0.0.0.0") + protocol = kwargs.pop(u"protocol", 255) + port = kwargs.pop(u"port", 0) + encap = kwargs.pop(u"encap", 0) + dscp = kwargs.pop(u"dscp", 0) + srv_type = kwargs.pop(u"srv_type", 0) + target_port = kwargs.pop(u"target_port", 0) + node_port = kwargs.pop(u"node_port", 0) + new_len = kwargs.pop(u"new_len", 1024) + is_del = kwargs.pop(u"is_del", 0) + + cmd = u"lb_add_del_vip" + err_msg = f"Failed to add vip on host {node[u'host']}" + + vip_addr = ip_address(vip_addr).packed + args = dict( + pfx={ + u"len": 128, + u"address": {u"un": {u"ip": vip_addr}, u"af": 0} + }, + protocol=protocol, + port=port, + encap=htonl(encap), + dscp=dscp, + type=srv_type, + target_port=target_port, + node_port=node_port, + new_flows_table_length=int(new_len), + is_del=is_del + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) else: - raise ValueError('Node {host} has unknown NodeType: "{type}"' - .format(host=node['host'], type=node['type'])) + raise ValueError( + f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'" + ) @staticmethod def vpp_lb_add_del_as(node, **kwargs): @@ -146,34 +157,38 @@ class LoadBalancerUtil(object): :returns: Nothing. :raises ValueError: If the node has an unknown node type. """ - if node['type'] == NodeType.DUT: - cmd = 'lb_add_del_as' - err_msg = 'Failed to add lb as on host {host}'.format( - host=node['host']) - - vip_addr = kwargs.pop('vip_addr', '0.0.0.0') - protocol = kwargs.pop('protocol', 255) - port = kwargs.pop('port', 0) - as_addr = kwargs.pop('as_addr', '0.0.0.0') - is_del = kwargs.pop('is_del', 0) - is_flush = kwargs.pop('is_flush', 0) - - vip_addr = ip_address(unicode(vip_addr)).packed - as_addr = ip_address(unicode(as_addr)).packed - - args = dict(pfx={'len': 128, - 'address': {'un': {'ip4': vip_addr}, 'af': 0}}, - protocol=protocol, - port=port, - as_address={'un': {'ip4': as_addr}, 'af': 0}, - is_del=is_del, - is_flush=is_flush) + if node[u"type"] == NodeType.DUT: + cmd = u"lb_add_del_as" + err_msg = f"Failed to add lb as on host {node[u'host']}" + + vip_addr = kwargs.pop(u"vip_addr", "0.0.0.0") + protocol = kwargs.pop(u"protocol", 255) + port = kwargs.pop(u"port", 0) + as_addr = kwargs.pop(u"as_addr", u"0.0.0.0") + is_del = kwargs.pop(u"is_del", 0) + is_flush = kwargs.pop(u"is_flush", 0) + + vip_addr = ip_address(vip_addr).packed + as_addr = ip_address(as_addr).packed + + args = dict( + pfx={ + u"len": 128, + u"address": {u"un": {u"ip": vip_addr}, u"af": 0} + }, + protocol=protocol, + port=port, + as_address={u"un": {u"ip": as_addr}, u"af": 0}, + is_del=is_del, + is_flush=is_flush + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) else: - raise ValueError('Node {host} has unknown NodeType: "{type}"' - .format(host=node['host'], type=node['type'])) + raise ValueError( + f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'" + ) @staticmethod def vpp_lb_add_del_intf_nat4(node, **kwargs): @@ -190,18 +205,21 @@ class LoadBalancerUtil(object): :returns: Nothing. :raises ValueError: If the node has an unknown node type. """ - if node['type'] == NodeType.DUT: - cmd = 'lb_add_del_intf_nat4' - err_msg = 'Failed to add interface nat4 on host {host}'.format( - host=node['host']) + if node[u"type"] == NodeType.DUT: + cmd = u"lb_add_del_intf_nat4" + err_msg = f"Failed to add interface nat4 on host {node[u'host']}" - is_add = kwargs.pop('is_add', True) - interface = kwargs.pop('interface', 0) + is_add = kwargs.pop(u"is_add", True) + interface = kwargs.pop(u"interface", 0) sw_if_index = Topology.get_interface_sw_index(node, interface) - args = dict(is_add=is_add, sw_if_index=sw_if_index) + args = dict( + is_add=is_add, + sw_if_index=sw_if_index + ) with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) else: - raise ValueError('Node {host} has unknown NodeType: "{type}"' - .format(host=node['host'], type=node['type'])) + raise ValueError( + f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'" + ) diff --git a/resources/libraries/python/LocalExecution.py b/resources/libraries/python/LocalExecution.py index f9a7b94d8e..ea40156404 100644 --- a/resources/libraries/python/LocalExecution.py +++ b/resources/libraries/python/LocalExecution.py @@ -34,13 +34,13 @@ from robot.api import logger from resources.libraries.python.OptionString import OptionString -__all__ = ["run"] +__all__ = [u"run"] -MESSAGE_TEMPLATE = "Command {com} ended with RC {ret} and output:\n{out}" +MESSAGE_TEMPLATE = u"Command {com} ended with RC {ret} and output:\n{out}" -def run(command, msg="", check=True, log=False, console=False): +def run(command, msg=u"", check=True, log=False, console=False): """Wrapper around subprocess.check_output that can tolerates nonzero RCs. Stderr is redirected to stdout, so it is part of output @@ -73,24 +73,24 @@ def run(command, msg="", check=True, log=False, console=False): """ if isinstance(command, OptionString): command = command.parts - if not hasattr(command, "__iter__"): + if not hasattr(command, u"__iter__"): # Strings are indexable, but turning into iterator is not supported. - raise TypeError("Command {cmd!r} is not an iterable.".format( - cmd=command)) + raise TypeError(f"Command {command!r} is not an iterable.") ret_code = 0 - output = "" + output = u"" try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: output = err.output ret_code = err.returncode if check: - raise RuntimeError(MESSAGE_TEMPLATE.format( - com=err.cmd, ret=ret_code, out=output)) + raise RuntimeError( + MESSAGE_TEMPLATE.format(com=err.cmd, ret=ret_code, out=output) + ) if log: message = MESSAGE_TEMPLATE.format(com=command, ret=ret_code, out=output) if msg: - message = msg + ": " + message + message = f"{msg}: {message}" if console: logger.console(message) else: diff --git a/resources/libraries/python/MLRsearch/AbstractMeasurer.py b/resources/libraries/python/MLRsearch/AbstractMeasurer.py index c9b5987124..622b8fdba6 100644 --- a/resources/libraries/python/MLRsearch/AbstractMeasurer.py +++ b/resources/libraries/python/MLRsearch/AbstractMeasurer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,11 +16,9 @@ from abc import ABCMeta, abstractmethod -class AbstractMeasurer(object): +class AbstractMeasurer(metaclass=ABCMeta): """Abstract class defining common API for measurement providers.""" - __metaclass__ = ABCMeta - @abstractmethod def measure(self, duration, transmit_rate): """Perform trial measurement and return the result. @@ -32,4 +30,3 @@ class AbstractMeasurer(object): :returns: Structure containing the result of the measurement. :rtype: ReceiveRateMeasurement.ReceiveRateMeasurement """ - pass diff --git a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py index 08f8b7e0a9..f4f2d3f096 100644 --- a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py +++ b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,11 +16,9 @@ from abc import ABCMeta, abstractmethod -class AbstractSearchAlgorithm(object): +class AbstractSearchAlgorithm(metaclass=ABCMeta): """Abstract class defining common API for search algorithms.""" - __metaclass__ = ABCMeta - def __init__(self, measurer): """Store the rate provider. @@ -48,4 +46,3 @@ class AbstractSearchAlgorithm(object): :rtype: NdrPdrResult.NdrPdrResult """ # TODO: Do we agree on arguments related to precision or trial duration? - pass diff --git a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py index 1ecd42e7dd..29b72505de 100644 --- a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py +++ b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -79,7 +79,7 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): TODO: Support configurable number of Packet Loss Ratios. """ - class ProgressState(object): + class ProgressState: """Structure containing data to be passed around in recursion.""" def __init__( @@ -113,9 +113,10 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): self.minimum_transmit_rate = float(minimum_transmit_rate) self.maximum_transmit_rate = float(maximum_transmit_rate) - def __init__(self, measurer, final_relative_width=0.005, - final_trial_duration=30.0, initial_trial_duration=1.0, - number_of_intermediate_phases=2, timeout=600.0, doublings=1): + def __init__( + self, measurer, final_relative_width=0.005, + final_trial_duration=30.0, initial_trial_duration=1.0, + number_of_intermediate_phases=2, timeout=600.0, doublings=1): """Store the measurer object and additional arguments. :param measurer: Rate provider to use by this search object. @@ -147,7 +148,6 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): self.timeout = float(timeout) self.doublings = int(doublings) - @staticmethod def double_relative_width(relative_width): """Return relative width corresponding to double logarithmic width. @@ -173,8 +173,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): :rtype: float """ return current_bound * ( - 1.0 - MultipleLossRatioSearch.double_relative_width( - relative_width)) + 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width) + ) @staticmethod def expand_down(relative_width, doublings, current_bound): @@ -191,7 +191,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): """ for _ in range(doublings): relative_width = MultipleLossRatioSearch.double_relative_width( - relative_width) + relative_width + ) return current_bound * (1.0 - relative_width) @staticmethod @@ -206,8 +207,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): :rtype: float """ return current_bound / ( - 1.0 - MultipleLossRatioSearch.double_relative_width( - relative_width)) + 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width) + ) @staticmethod def expand_up(relative_width, doublings, current_bound): @@ -224,7 +225,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): """ for _ in range(doublings): relative_width = MultipleLossRatioSearch.double_relative_width( - relative_width) + relative_width + ) return current_bound / (1.0 - relative_width) @staticmethod @@ -250,7 +252,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): :rtype: float """ return current_bound / ( - 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width)) + 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width) + ) def narrow_down_ndr_and_pdr( self, minimum_transmit_rate, maximum_transmit_rate, @@ -278,31 +281,32 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): initial_width_goal = self.double_relative_width(initial_width_goal) max_lo = maximum_transmit_rate * (1.0 - initial_width_goal) mrr = max( - minimum_transmit_rate, - min(max_lo, line_measurement.receive_rate)) + minimum_transmit_rate, min(max_lo, line_measurement.receive_rate) + ) mrr_measurement = self.measurer.measure( - self.initial_trial_duration, mrr) + self.initial_trial_duration, mrr + ) # Attempt to get narrower width. if mrr_measurement.loss_fraction > 0.0: max2_lo = mrr * (1.0 - initial_width_goal) mrr2 = min(max2_lo, mrr_measurement.receive_rate) else: mrr2 = mrr / (1.0 - initial_width_goal) - if mrr2 > minimum_transmit_rate and mrr2 < maximum_transmit_rate: + if minimum_transmit_rate < mrr2 < maximum_transmit_rate: line_measurement = mrr_measurement mrr_measurement = self.measurer.measure( self.initial_trial_duration, mrr2) if mrr2 > mrr: - buf = line_measurement - line_measurement = mrr_measurement - mrr_measurement = buf + line_measurement, mrr_measurement = \ + (mrr_measurement, line_measurement) starting_interval = ReceiveRateInterval( mrr_measurement, line_measurement) starting_result = NdrPdrResult(starting_interval, starting_interval) state = self.ProgressState( starting_result, self.number_of_intermediate_phases, self.final_trial_duration, self.final_relative_width, - packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate) + packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate + ) state = self.ndrpdr(state) return state.result @@ -318,15 +322,18 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): """ # TODO: Implement https://stackoverflow.com/a/24683360 # to avoid the string manipulation if log verbosity is too low. - logging.info("result before update: %s", state.result) + logging.info(f"result before update: {state.result}") logging.debug( - "relative widths in goals: %s", state.result.width_in_goals( - self.final_relative_width)) + f"relative widths in goals: " + f"{state.result.width_in_goals(self.final_relative_width)}" + ) measurement = self.measurer.measure(state.duration, transmit_rate) ndr_interval = self._new_interval( - state.result.ndr_interval, measurement, 0.0) + state.result.ndr_interval, measurement, 0.0 + ) pdr_interval = self._new_interval( - state.result.pdr_interval, measurement, state.packet_loss_ratio) + state.result.pdr_interval, measurement, state.packet_loss_ratio + ) state.result = NdrPdrResult(ndr_interval, pdr_interval) return state @@ -387,11 +394,13 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): # "invalid upper bound at maximal rate" case. new_lo = measurement - return ReceiveRateInterval(old_lo if new_lo is None else new_lo, - old_hi if new_hi is None else new_hi) + return ReceiveRateInterval( + old_lo if new_lo is None else new_lo, + old_hi if new_hi is None else new_hi + ) def ndrpdr(self, state): - """Pefrom trials for this phase. Return the new state when done. + """Perform trials for this phase. Return the new state when done. :param state: State before this phase. :type state: ProgressState @@ -409,7 +418,8 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): duration_multiplier = state.duration / self.initial_trial_duration phase_exponent = float(state.phases) / saved_phases state.duration = self.initial_trial_duration * math.pow( - duration_multiplier, phase_exponent) + duration_multiplier, phase_exponent + ) # Shorter durations do not need that narrow widths. saved_width = state.width_goal state.width_goal = self.double_relative_width(state.width_goal) @@ -421,11 +431,12 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): state.phases = saved_phases # Not needed, but just in case. logging.info( - "starting iterations with duration %s and relative width goal %s", - state.duration, state.width_goal) + f"starting iterations with duration {state.duration} and relative " + f"width goal {state.width_goal}" + ) while 1: if time.time() > start_time + self.timeout: - raise RuntimeError("Optimized search takes too long.") + raise RuntimeError(u"Optimized search takes too long.") # Order of priorities: invalid bounds (nl, pl, nh, ph), # then narrowing relative Tr widths. # Durations are not priorities yet, @@ -435,14 +446,17 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): pdr_lo = state.result.pdr_interval.measured_low pdr_hi = state.result.pdr_interval.measured_high ndr_rel_width = max( - state.width_goal, state.result.ndr_interval.rel_tr_width) + state.width_goal, state.result.ndr_interval.rel_tr_width + ) pdr_rel_width = max( - state.width_goal, state.result.pdr_interval.rel_tr_width) + state.width_goal, state.result.pdr_interval.rel_tr_width + ) # If we are hitting maximal or minimal rate, we cannot shift, # but we can re-measure. - new_tr = self._ndrpdr_loss_fraction(state, - ndr_lo, ndr_hi, pdr_lo, pdr_hi, - ndr_rel_width, pdr_rel_width) + new_tr = self._ndrpdr_loss_fraction( + state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width, + pdr_rel_width + ) if new_tr is not None: state = self._measure_and_update_state(state, new_tr) @@ -461,8 +475,9 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): and pdr_lo.loss_fraction > state.packet_loss_ratio): pdr_rel_width = 0.0 - new_tr = self._ndrpdr_width_goal(state, ndr_lo, pdr_lo, - ndr_rel_width, pdr_rel_width) + new_tr = self._ndrpdr_width_goal( + state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width + ) if new_tr is not None: state = self._measure_and_update_state(state, new_tr) @@ -470,9 +485,10 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): # We do not need to improve width, but there still might be # some measurements with smaller duration. - new_tr = self._ndrpdr_duration(state, - ndr_lo, ndr_hi, pdr_lo, pdr_hi, - ndr_rel_width, pdr_rel_width) + new_tr = self._ndrpdr_duration( + state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width, + pdr_rel_width + ) if new_tr is not None: state = self._measure_and_update_state(state, new_tr) @@ -480,12 +496,13 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): # Widths are narrow (or lower bound minimal), bound measurements # are long enough, we can return. - logging.info("phase done") + logging.info(u"phase done") break return state - def _ndrpdr_loss_fraction(self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, - ndr_rel_width, pdr_rel_width): + def _ndrpdr_loss_fraction( + self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width, + pdr_rel_width): """Perform loss_fraction-based trials within a ndrpdr phase :param state: current state @@ -509,50 +526,54 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): if ndr_lo.loss_fraction > 0.0: if ndr_lo.target_tr > state.minimum_transmit_rate: result = max( - state.minimum_transmit_rate, - self.expand_down( - ndr_rel_width, self.doublings, ndr_lo.target_tr)) - logging.info("ndr lo external %s", result) + state.minimum_transmit_rate, self.expand_down( + ndr_rel_width, self.doublings, ndr_lo.target_tr + ) + ) + logging.info(f"ndr lo external {result}") elif ndr_lo.duration < state.duration: result = state.minimum_transmit_rate - logging.info("ndr lo minimal re-measure") + logging.info(u"ndr lo minimal re-measure") if result is None and pdr_lo.loss_fraction > state.packet_loss_ratio: if pdr_lo.target_tr > state.minimum_transmit_rate: result = max( - state.minimum_transmit_rate, - self.expand_down( - pdr_rel_width, self.doublings, pdr_lo.target_tr)) - logging.info("pdr lo external %s", result) + state.minimum_transmit_rate, self.expand_down( + pdr_rel_width, self.doublings, pdr_lo.target_tr + ) + ) + logging.info(f"pdr lo external {result}") elif pdr_lo.duration < state.duration: result = state.minimum_transmit_rate - logging.info("pdr lo minimal re-measure") + logging.info(u"pdr lo minimal re-measure") if result is None and ndr_hi.loss_fraction <= 0.0: if ndr_hi.target_tr < state.maximum_transmit_rate: result = min( - state.maximum_transmit_rate, - self.expand_up( - ndr_rel_width, self.doublings, ndr_hi.target_tr)) - logging.info("ndr hi external %s", result) + state.maximum_transmit_rate, self.expand_up( + ndr_rel_width, self.doublings, ndr_hi.target_tr + ) + ) + logging.info(f"ndr hi external {result}") elif ndr_hi.duration < state.duration: result = state.maximum_transmit_rate - logging.info("ndr hi maximal re-measure") + logging.info(u"ndr hi maximal re-measure") if result is None and pdr_hi.loss_fraction <= state.packet_loss_ratio: if pdr_hi.target_tr < state.maximum_transmit_rate: result = min( - state.maximum_transmit_rate, - self.expand_up( - pdr_rel_width, self.doublings, pdr_hi.target_tr)) - logging.info("pdr hi external %s", result) + state.maximum_transmit_rate, self.expand_up( + pdr_rel_width, self.doublings, pdr_hi.target_tr + ) + ) + logging.info(f"pdr hi external {result}") elif pdr_hi.duration < state.duration: result = state.maximum_transmit_rate - logging.info("ndr hi maximal re-measure") + logging.info(u"ndr hi maximal re-measure") return result - def _ndrpdr_width_goal(self, state, ndr_lo, pdr_lo, - ndr_rel_width, pdr_rel_width): + def _ndrpdr_width_goal( + self, state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width): """Perform width_goal-based trials within a ndrpdr phase :param state: current state @@ -573,18 +594,19 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): # We have to narrow NDR width first, as NDR internal search # can invalidate PDR (but not vice versa). result = self.half_step_up(ndr_rel_width, ndr_lo.target_tr) - logging.info("Bisecting for NDR at %s", result) + logging.info(f"Bisecting for NDR at {result}") elif pdr_rel_width > state.width_goal: - # PDR iternal search. + # PDR internal search. result = self.half_step_up(pdr_rel_width, pdr_lo.target_tr) - logging.info("Bisecting for PDR at %s", result) + logging.info(f"Bisecting for PDR at {result}") else: result = None return result @staticmethod - def _ndrpdr_duration(state, ndr_lo, pdr_lo, ndr_hi, pdr_hi, - ndr_rel_width, pdr_rel_width): + def _ndrpdr_duration( + state, ndr_lo, pdr_lo, ndr_hi, pdr_hi, ndr_rel_width, + pdr_rel_width): """Perform duration-based trials within a ndrpdr phase :param state: current state @@ -608,18 +630,18 @@ class MultipleLossRatioSearch(AbstractSearchAlgorithm): # creating invalid bounds to resolve (thus broadening width). if ndr_lo.duration < state.duration: result = ndr_lo.target_tr - logging.info("re-measuring NDR lower bound") + logging.info(u"re-measuring NDR lower bound") elif pdr_lo.duration < state.duration: result = pdr_lo.target_tr - logging.info("re-measuring PDR lower bound") + logging.info(u"re-measuring PDR lower bound") # Except when lower bounds have high loss fraction, in that case # we do not need to re-measure _upper_ bounds. elif ndr_hi.duration < state.duration and ndr_rel_width > 0.0: result = ndr_hi.target_tr - logging.info("re-measuring NDR upper bound") + logging.info(u"re-measuring NDR upper bound") elif pdr_hi.duration < state.duration and pdr_rel_width > 0.0: result = pdr_hi.target_tr - logging.info("re-measuring PDR upper bound") + logging.info(u"re-measuring PDR upper bound") else: result = None return result diff --git a/resources/libraries/python/MLRsearch/NdrPdrResult.py b/resources/libraries/python/MLRsearch/NdrPdrResult.py index 7b8cfd6449..3454ef1957 100644 --- a/resources/libraries/python/MLRsearch/NdrPdrResult.py +++ b/resources/libraries/python/MLRsearch/NdrPdrResult.py @@ -13,11 +13,10 @@ """Module defining NdrPdrResult class.""" -from resources.libraries.python.MLRsearch.ReceiveRateInterval \ - import ReceiveRateInterval +from .ReceiveRateInterval import ReceiveRateInterval -class NdrPdrResult(object): +class NdrPdrResult: """Two measurement intervals, return value of search algorithms. Partial fraction is NOT part of the result. Pdr interval should be valid @@ -34,11 +33,13 @@ class NdrPdrResult(object): # TODO: Type checking is not very pythonic, # perhaps users can fix wrong usage without it? if not isinstance(ndr_interval, ReceiveRateInterval): - raise TypeError("ndr_interval, is not a ReceiveRateInterval: " - "{ndr!r}".format(ndr=ndr_interval)) + raise TypeError( + f"ndr_interval, is not a ReceiveRateInterval: {ndr_interval!r}" + ) if not isinstance(pdr_interval, ReceiveRateInterval): - raise TypeError("pdr_interval, is not a ReceiveRateInterval: " - "{pdr!r}".format(pdr=pdr_interval)) + raise TypeError( + f"pdr_interval, is not a ReceiveRateInterval: {pdr_interval!r}" + ) self.ndr_interval = ndr_interval self.pdr_interval = pdr_interval @@ -51,16 +52,14 @@ class NdrPdrResult(object): :returns: Message containing NDR and PDR widths in goals. :rtype: str """ - return "ndr {ndr_in_goals}; pdr {pdr_in_goals}".format( - ndr_in_goals=self.ndr_interval.width_in_goals(relative_width_goal), - pdr_in_goals=self.pdr_interval.width_in_goals(relative_width_goal)) + return f"ndr {self.ndr_interval.width_in_goals(relative_width_goal)};" \ + f" pdr {self.pdr_interval.width_in_goals(relative_width_goal)}" def __str__(self): """Return string as tuple of named values.""" - return "NDR={ndr!s};PDR={pdr!s}".format( - ndr=self.ndr_interval, pdr=self.pdr_interval) + return f"NDR={self.ndr_interval!s};PDR={self.pdr_interval!s}" def __repr__(self): """Return string evaluable as a constructor call.""" - return "NdrPdrResult(ndr_interval={ndr!r},pdr_interval={pdr!r})".format( - ndr=self.ndr_interval, pdr=self.pdr_interval) + return f"NdrPdrResult(ndr_interval={self.ndr_interval!r}," \ + f"pdr_interval={self.pdr_interval!r})" diff --git a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py index ec3cbb7462..eff23e8bcc 100644 --- a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py +++ b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py @@ -15,11 +15,10 @@ import math -from resources.libraries.python.MLRsearch.ReceiveRateMeasurement \ - import ReceiveRateMeasurement +from .ReceiveRateMeasurement import ReceiveRateMeasurement -class ReceiveRateInterval(object): +class ReceiveRateInterval: """Structure defining two Rr measurements, and their relation.""" def __init__(self, measured_low, measured_high): @@ -33,11 +32,15 @@ class ReceiveRateInterval(object): # TODO: Type checking is not very pythonic, # perhaps users can fix wrong usage without it? if not isinstance(measured_low, ReceiveRateMeasurement): - raise TypeError("measured_low is not a ReceiveRateMeasurement: " - "{low!r}".format(low=measured_low)) + raise TypeError( + f"measured_low is not a ReceiveRateMeasurement: " + f"{measured_low!r}" + ) if not isinstance(measured_high, ReceiveRateMeasurement): - raise TypeError("measured_high is not a ReceiveRateMeasurement: " - "{high!r}".format(high=measured_high)) + raise TypeError( + f"measured_high is not a ReceiveRateMeasurement: " + f"{measured_high!r}" + ) self.measured_low = measured_low self.measured_high = measured_high # Declare secondary quantities to appease pylint. @@ -51,9 +54,11 @@ class ReceiveRateInterval(object): """Sort bounds by target Tr, compute secondary quantities.""" if self.measured_low.target_tr > self.measured_high.target_tr: self.measured_low, self.measured_high = ( - self.measured_high, self.measured_low) + self.measured_high, self.measured_low + ) self.abs_tr_width = ( - self.measured_high.target_tr - self.measured_low.target_tr) + self.measured_high.target_tr - self.measured_low.target_tr + ) self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr def width_in_goals(self, relative_width_goal): @@ -75,11 +80,9 @@ class ReceiveRateInterval(object): def __str__(self): """Return string as half-open interval.""" - return "[{low!s};{high!s})".format( - low=self.measured_low, high=self.measured_high) + return f"[{self.measured_low!s};{self.measured_high!s})" def __repr__(self): """Return string evaluable as a constructor call.""" - return ("ReceiveRateInterval(measured_low={low!r}" - ",measured_high={high!r})".format( - low=self.measured_low, high=self.measured_high)) + return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \ + f"measured_high={self.measured_high!r})" diff --git a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py index d052ebd3bf..31a6f8202e 100644 --- a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py +++ b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -14,7 +14,7 @@ """Module defining ReceiveRateMeasurement class.""" -class ReceiveRateMeasurement(object): +class ReceiveRateMeasurement: """Structure defining the result of single Rr measurement.""" def __init__(self, duration, target_tr, transmit_count, loss_count): @@ -43,12 +43,12 @@ class ReceiveRateMeasurement(object): def __str__(self): """Return string reporting input and loss fraction.""" - return "d={dur!s},Tr={rate!s},Df={frac!s}".format( - dur=self.duration, rate=self.target_tr, frac=self.loss_fraction) + return f"d={self.duration!s},Tr={self.target_tr!s}," \ + f"Df={self.loss_fraction!s}" def __repr__(self): """Return string evaluable as a constructor call.""" - return ("ReceiveRateMeasurement(duration={dur!r},target_tr={rate!r}" - ",transmit_count={trans!r},loss_count={loss!r})".format( - dur=self.duration, rate=self.target_tr, - trans=self.transmit_count, loss=self.loss_count)) + return f"ReceiveRateMeasurement(duration={self.duration!r}," \ + f"target_tr={self.target_tr!r}," \ + f"transmit_count={self.transmit_count!r}," \ + f"loss_count={self.loss_count!r})" diff --git a/resources/libraries/python/Memif.py b/resources/libraries/python/Memif.py index 24fda52677..2128d30428 100644 --- a/resources/libraries/python/Memif.py +++ b/resources/libraries/python/Memif.py @@ -15,6 +15,7 @@ from enum import IntEnum + from robot.api import logger from resources.libraries.python.topology import NodeType, Topology @@ -27,7 +28,7 @@ class MemifRole(IntEnum): SLAVE = 1 -class Memif(object): +class Memif: """Memif interface class""" def __init__(self): @@ -42,18 +43,18 @@ class Memif(object): :returns: List of memif interfaces extracted from Papi response. :rtype: list """ - cmd = "memif_dump" + cmd = u"memif_dump" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details() for memif in details: - memif["hw_addr"] = str(memif["hw_addr"]) - memif["role"] = memif["role"].value - memif["mode"] = memif["mode"].value - memif["flags"] = memif["flags"].value \ - if hasattr(memif["flags"], 'value') else int(memif["flags"]) + memif[u"hw_addr"] = str(memif[u"hw_addr"]) + memif[u"role"] = memif[u"role"].value + memif[u"mode"] = memif[u"mode"].value + memif[u"flags"] = memif[u"flags"].value \ + if hasattr(memif[u"flags"], u"value") else int(memif[u"flags"]) - logger.debug("MEMIF details:\n{details}".format(details=details)) + logger.debug(f"MEMIF details:\n{details}") return details @@ -73,13 +74,12 @@ class Memif(object): includes only retval. :rtype: dict """ - cmd = 'memif_socket_filename_add_del' - err_msg = 'Failed to create memif socket on host {host}'.format( - host=node['host']) + cmd = u"memif_socket_filename_add_del" + err_msg = f"Failed to create memif socket on host {node[u'host']}" args = dict( is_add=is_add, socket_id=int(sid), - socket_filename=str('/tmp/' + filename) + socket_filename=str(u"/tmp/" + filename) ) with PapiSocketExecutor(node) as papi_exec: return papi_exec.add(cmd, **args).get_reply(err_msg) @@ -103,23 +103,23 @@ class Memif(object): :returns: sw_if_index :rtype: int """ - cmd = 'memif_create' - err_msg = 'Failed to create memif interface on host {host}'.format( - host=node['host']) + cmd = u"memif_create" + err_msg = f"Failed to create memif interface on host {node[u'host']}" args = dict( role=role, rx_queues=int(rxq), tx_queues=int(txq), socket_id=int(sid), id=int(mid), - secret="" + secret=u"" ) + with PapiSocketExecutor(node) as papi_exec: return papi_exec.add(cmd, **args).get_sw_if_index(err_msg) @staticmethod - def create_memif_interface(node, filename, mid, sid, rxq=1, txq=1, - role="SLAVE"): + def create_memif_interface( + node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE"): """Create Memif interface on the given node. :param node: Given node to create Memif interface on. @@ -140,7 +140,6 @@ class Memif(object): :rtype: int :raises ValueError: If command 'create memif' fails. """ - role = getattr(MemifRole, role.upper()).value # Create socket @@ -148,10 +147,11 @@ class Memif(object): # Create memif sw_if_index = Memif._memif_create( - node, mid, sid, rxq=rxq, txq=txq, role=role) + node, mid, sid, rxq=rxq, txq=txq, role=role + ) # Update Topology - if_key = Topology.add_new_port(node, 'memif') + if_key = Topology.add_new_port(node, u"memif") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = Memif.vpp_get_memif_interface_name(node, sw_if_index) @@ -160,7 +160,9 @@ class Memif(object): ifc_mac = Memif.vpp_get_memif_interface_mac(node, sw_if_index) Topology.update_interface_mac_address(node, if_key, ifc_mac) - Topology.update_interface_memif_socket(node, if_key, '/tmp/' + filename) + Topology.update_interface_memif_socket( + node, if_key, u"/tmp/" + filename + ) Topology.update_interface_memif_id(node, if_key, mid) Topology.update_interface_memif_role(node, if_key, str(role)) @@ -173,7 +175,6 @@ class Memif(object): :param node: Given node to show Memif data on. :type node: dict """ - Memif._memif_details(node) @staticmethod @@ -184,7 +185,7 @@ class Memif(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: Memif.show_memif(node) @staticmethod @@ -198,12 +199,11 @@ class Memif(object): :returns: Memif interface name, or None if not found. :rtype: str """ - details = Memif._memif_details(node) for memif in details: - if memif["sw_if_index"] == sw_if_index: - return memif["if_name"] + if memif[u"sw_if_index"] == sw_if_index: + return memif[u"if_name"] return None @staticmethod @@ -217,10 +217,9 @@ class Memif(object): :returns: Memif interface MAC address, or None if not found. :rtype: str """ - details = Memif._memif_details(node) for memif in details: - if memif["sw_if_index"] == sw_if_index: - return memif["hw_addr"] + if memif[u"sw_if_index"] == sw_if_index: + return memif[u"hw_addr"] return None diff --git a/resources/libraries/python/NATUtil.py b/resources/libraries/python/NATUtil.py index f018d38335..2d5c1c7b76 100644 --- a/resources/libraries/python/NATUtil.py +++ b/resources/libraries/python/NATUtil.py @@ -15,7 +15,6 @@ from pprint import pformat from socket import AF_INET, inet_pton - from enum import IntEnum from robot.api import logger @@ -37,7 +36,7 @@ class NATConfigFlags(IntEnum): NAT_IS_EXT_HOST_VALID = 0x80 -class NATUtil(object): +class NATUtil: """This class defines the methods to set NAT.""" def __init__(self): @@ -54,28 +53,29 @@ class NATUtil(object): :type int_in: str :type int_out: str """ - - cmd = 'nat44_interface_add_del_feature' + cmd = u"nat44_interface_add_del_feature" int_in_idx = InterfaceUtil.get_sw_if_index(node, int_in) - err_msg = 'Failed to set inside interface {int} for NAT44 on host ' \ - '{host}'.format(int=int_in, host=node['host']) + err_msg = f"Failed to set inside interface {int_in} for NAT44 " \ + f"on host {node[u'host']}" args_in = dict( sw_if_index=int_in_idx, is_add=1, - flags=getattr(NATConfigFlags, "NAT_IS_INSIDE").value + flags=getattr(NATConfigFlags, u"NAT_IS_INSIDE").value ) + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args_in).get_reply(err_msg) int_out_idx = InterfaceUtil.get_sw_if_index(node, int_out) - err_msg = 'Failed to set outside interface {int} for NAT44 on host ' \ - '{host}'.format(int=int_out, host=node['host']) + err_msg = f"Failed to set outside interface {int_out} for NAT44 " \ + f"on host {node[u'host']}" args_in = dict( sw_if_index=int_out_idx, is_add=1, - flags=getattr(NATConfigFlags, "NAT_IS_OUTSIDE").value + flags=getattr(NATConfigFlags, u"NAT_IS_OUTSIDE").value ) + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args_in).get_reply(err_msg) @@ -94,10 +94,9 @@ class NATUtil(object): :type ip_out: str :type subnet_out: str or int """ - - cmd = 'nat_det_add_del_map' - err_msg = 'Failed to set deterministic behaviour of NAT on host ' \ - '{host}'.format(host=node['host']) + cmd = u"nat_det_add_del_map" + err_msg = f"Failed to set deterministic behaviour of NAT " \ + f"on host {node[u'host']}" args_in = dict( is_add=True, in_addr=inet_pton(AF_INET, str(ip_in)), @@ -105,6 +104,7 @@ class NATUtil(object): out_addr=inet_pton(AF_INET, str(ip_out)), out_plen=int(subnet_out) ) + with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args_in).get_reply(err_msg) @@ -127,22 +127,22 @@ class NATUtil(object): :param node: DUT node. :type node: dict """ + cmd = u"nat_show_config" + err_msg = f"Failed to get NAT configuration on host {node[u'host']}" - cmd = 'nat_show_config' - err_msg = 'Failed to get NAT configuration on host {host}'.\ - format(host=node['host']) with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply(err_msg) - logger.debug("NAT Configuration:\n{reply}".format(reply=pformat(reply))) + + logger.debug(f"NAT Configuration:\n{pformat(reply)}") cmds = [ - "nat_worker_dump", - "nat44_interface_addr_dump", - "nat44_address_dump", - "nat44_static_mapping_dump", - "nat44_user_dump", - "nat44_interface_dump", - "nat44_user_session_dump", - "nat_det_map_dump" + u"nat_worker_dump", + u"nat44_interface_addr_dump", + u"nat44_address_dump", + u"nat44_static_mapping_dump", + u"nat44_user_dump", + u"nat44_interface_dump", + u"nat44_user_session_dump", + u"nat_det_map_dump" ] PapiSocketExecutor.dump_and_log(node, cmds) diff --git a/resources/libraries/python/Namespaces.py b/resources/libraries/python/Namespaces.py index 00d615350e..2618f3d19b 100644 --- a/resources/libraries/python/Namespaces.py +++ b/resources/libraries/python/Namespaces.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,7 +16,7 @@ from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd -class Namespaces(object): +class Namespaces: """Linux namespace utilities.""" def __init__(self): self._namespaces = [] @@ -29,7 +29,8 @@ class Namespaces(object): :type node: dict :type namespace_name: str """ - cmd = ('ip netns add {0}'.format(namespace_name)) + cmd = f"ip netns add {namespace_name}" + exec_cmd_no_error(node, cmd, sudo=True) self._namespaces.append(namespace_name) @@ -45,17 +46,19 @@ class Namespaces(object): :type interface: str :raises RuntimeError: Interface could not be attached. """ - cmd = 'ip link set {0} netns {1}'.format(interface, namespace) - (ret_code, _, stderr) = exec_cmd(node, cmd, timeout=5, sudo=True) + cmd = f"ip link set {interface} netns {namespace}" + + ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True) if ret_code != 0: - raise RuntimeError( - 'Could not attach interface, reason:{}'.format(stderr)) - cmd = 'ip netns exec {} ip link set {} up'.format( - namespace, interface) - (ret_code, _, stderr) = exec_cmd(node, cmd, timeout=5, sudo=True) + raise RuntimeError(f"Could not attach interface, reason:\n{stderr}") + + cmd = f"ip netns exec {namespace} ip link set {interface} up" + + ret_code, _, stderr = exec_cmd(node, cmd, timeout=5, sudo=True) if ret_code != 0: raise RuntimeError( - 'Could not set interface state, reason:{}'.format(stderr)) + f"Could not set interface state, reason:\n{stderr}" + ) @staticmethod def create_bridge_for_int_in_namespace( @@ -71,14 +74,15 @@ class Namespaces(object): :type bridge_name: str :type interfaces: list """ - cmd = 'ip netns exec {} brctl addbr {}'.format(namespace, bridge_name) + cmd = f"ip netns exec {namespace} brctl addbr {bridge_name}" exec_cmd_no_error(node, cmd, sudo=True) + for interface in interfaces: - cmd = 'ip netns exec {} brctl addif {} {}'.format( - namespace, bridge_name, interface) + cmd = f"ip netns exec {namespace} brctl addif {bridge_name} " \ + f"{interface}" exec_cmd_no_error(node, cmd, sudo=True) - cmd = 'ip netns exec {} ip link set dev {} up'.format( - namespace, bridge_name) + + cmd = f"ip netns exec {namespace} ip link set dev {bridge_name} up" exec_cmd_no_error(node, cmd, sudo=True) def clean_up_namespaces(self, node): @@ -89,8 +93,8 @@ class Namespaces(object): :raises RuntimeError: Namespaces could not be cleaned properly. """ for namespace in self._namespaces: - print "Cleaning namespace {}".format(namespace) - cmd = 'ip netns delete {}'.format(namespace) - (ret_code, _, _) = exec_cmd(node, cmd, timeout=5, sudo=True) + print(f"Cleaning namespace {namespace}") + cmd = f"ip netns delete {namespace}" + ret_code, _, _ = exec_cmd(node, cmd, timeout=5, sudo=True) if ret_code != 0: - raise RuntimeError('Could not delete namespace') + raise RuntimeError(u"Could not delete namespace") diff --git a/resources/libraries/python/NodePath.py b/resources/libraries/python/NodePath.py index ec84a8b08b..e97bde87ad 100644 --- a/resources/libraries/python/NodePath.py +++ b/resources/libraries/python/NodePath.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,7 +16,7 @@ from resources.libraries.python.topology import Topology -class NodePath(object): +class NodePath: """Path utilities for nodes in the topology. :Example: @@ -103,7 +103,7 @@ class NodePath(object): """ nodes = self._nodes if len(nodes) < 2: - raise RuntimeError('Not enough nodes to compute path') + raise RuntimeError(u"Not enough nodes to compute path") for idx in range(0, len(nodes) - 1): topo = Topology() @@ -111,12 +111,14 @@ class NodePath(object): node2 = nodes[idx + 1] n1_list = self._nodes_filter[idx] n2_list = self._nodes_filter[idx + 1] - links = topo.get_active_connecting_links(node1, node2, - filter_list_node1=n1_list, - filter_list_node2=n2_list) + links = topo.get_active_connecting_links( + node1, node2, filter_list_node1=n1_list, + filter_list_node2=n2_list + ) if not links: - raise RuntimeError('No link between {0} and {1}'.format( - node1['host'], node2['host'])) + raise RuntimeError( + f"No link between {node1[u'host']} and {node2[u'host']}" + ) if always_same_link: l_set = set(links).intersection(self._links) @@ -124,8 +126,9 @@ class NodePath(object): l_set = set(links).difference(self._links) if not l_set: raise RuntimeError( - 'No free link between {0} and {1}, all links already ' - 'used'.format(node1['host'], node2['host'])) + f"No free link between {node1[u'host']} and " + f"{node2[u'host']}, all links already used" + ) if not l_set: link = links.pop() @@ -162,7 +165,7 @@ class NodePath(object): .. note:: Call compute_path before. """ if not self._path: - raise RuntimeError('No path for topology') + raise RuntimeError(u"No path for topology") return self._path[0] def last_interface(self): @@ -174,7 +177,7 @@ class NodePath(object): .. note:: Call compute_path before. """ if not self._path: - raise RuntimeError('No path for topology') + raise RuntimeError(u"No path for topology") return self._path[-1] def first_ingress_interface(self): @@ -186,7 +189,7 @@ class NodePath(object): .. note:: Call compute_path before. """ if not self._path: - raise RuntimeError('No path for topology') + raise RuntimeError(u"No path for topology") return self._path[1] def last_egress_interface(self): @@ -198,5 +201,5 @@ class NodePath(object): .. note:: Call compute_path before. """ if not self._path: - raise RuntimeError('No path for topology') + raise RuntimeError(u"No path for topology") return self._path[-2] diff --git a/resources/libraries/python/OptionString.py b/resources/libraries/python/OptionString.py index 7163d057ec..9a30d37b9a 100644 --- a/resources/libraries/python/OptionString.py +++ b/resources/libraries/python/OptionString.py @@ -14,10 +14,10 @@ """Utility function for handling options without doubled or trailing spaces.""" -class OptionString(object): +class OptionString: """Class serving as a builder for option strings. - Motivation: Both manual contatenation and .join() methods + Motivation: Both manual concatenation and .join() methods are prone to leaving superfluous spaces if some parts of options are optional (missing, empty). @@ -36,7 +36,7 @@ class OptionString(object): the best fitting one, without much logic near the call site. """ - def __init__(self, parts=tuple(), prefix=""): + def __init__(self, parts=tuple(), prefix=u""): """Create instance with listed strings as parts to use. Prefix will be converted to string and stripped. @@ -44,8 +44,8 @@ class OptionString(object): TODO: Support users calling with parts being a string? - :param parts: List of of stringifiable objects to become parts. - :param prefix: Subtring to prepend to every parameter (not value). + :param parts: List of stringifiable objects to become parts. + :param prefix: Substring to prepend to every parameter (not value). :type parts: Iterable of object :type prefix: object """ @@ -58,8 +58,7 @@ class OptionString(object): :returns: Executable constructor call as string. :rtype: str """ - return "OptionString(parts={parts!r},prefix={prefix!r})".format( - parts=self.parts, prefix=self.prefix) + return f"OptionString(parts={self.parts!r},prefix={self.prefix!r})" # TODO: Would we ever need a copy() method? # Currently, superstring "master" is mutable but unique, @@ -106,7 +105,7 @@ class OptionString(object): :returns: The converted part without prefix, empty means not added. :rtype: str """ - part = "" if part is None else str(part).strip() + part = u"" if part is None else str(part).strip() if part: prefixed_part = self.prefix + part if prefixed else part self.parts.append(prefixed_part) @@ -120,7 +119,7 @@ class OptionString(object): Parameter is prefixed before adding. :param parameter: Parameter object, usually a word starting with dash. - :type variable: object + :type parameter: object :returns: Self, to enable method chaining. :rtype: OptionString """ @@ -137,7 +136,7 @@ class OptionString(object): :param parameter: Parameter object, usually a word starting with dash. :param condition: Do not add if truth value of this is false. - :type variable: object + :type parameter: object :type condition: object :returns: Self, to enable method chaining. :rtype: OptionString @@ -155,7 +154,7 @@ class OptionString(object): :param parameter: Parameter object, usually a word starting with dash. :param value: Value object. Prefix is never added. - :type variable: object + :type parameter: object :type value: object :returns: Self, to enable method chaining. :rtype: OptionString @@ -178,7 +177,7 @@ class OptionString(object): :param parameter: Parameter object, usually a word starting with dash. :param value: Value object. Prefix is never added. - :type variable: object + :type parameter: object :type value: object :returns: Self, to enable method chaining. :rtype: OptionString @@ -187,7 +186,7 @@ class OptionString(object): # pylint: disable=protected-access if temp._check_and_add(parameter, prefixed=True): if temp._check_and_add(value, prefixed=False): - self.parts.append("=".join(temp.parts)) + self.parts.append(u"=".join(temp.parts)) return self def add_with_value_if(self, parameter, value, condition): @@ -201,7 +200,7 @@ class OptionString(object): :param parameter: Parameter object, usually a word starting with dash. :param value: Value object. Prefix is never added. :param condition: Do not add if truth value of this is false. - :type variable: object + :type parameter: object :type value: object :type condition: object :returns: Self, to enable method chaining. @@ -222,7 +221,7 @@ class OptionString(object): :param parameter: Parameter object, usually a word starting with dash. :param value: Value object. Prefix is never added. :param condition: Do not add if truth value of this is false. - :type variable: object + :type parameter: object :type value: object :type condition: object :returns: Self, to enable method chaining. @@ -232,7 +231,7 @@ class OptionString(object): self.add_equals(parameter, value) return self - def add_with_value_from_dict(self, parameter, key, mapping, default=""): + def add_with_value_from_dict(self, parameter, key, mapping, default=u""): """Add parameter with value from dict under key, or default. If key is missing, default is used as value. @@ -254,7 +253,7 @@ class OptionString(object): value = mapping.get(key, default) return self.add_with_value(parameter, value) - def add_equals_from_dict(self, parameter, key, mapping, default=""): + def add_equals_from_dict(self, parameter, key, mapping, default=u""): """Add parameter=value to options where value is from dict. If key is missing, default is used as value. @@ -276,7 +275,7 @@ class OptionString(object): value = mapping.get(key, default) return self.add_equals(parameter, value) - def add_if_from_dict(self, parameter, key, mapping, default="False"): + def add_if_from_dict(self, parameter, key, mapping, default=u"False"): """Add parameter based on if the condition in dict is true. If key is missing, default is used as condition. @@ -300,7 +299,7 @@ class OptionString(object): return self.add_if(parameter, condition) def add_with_value_if_from_dict( - self, parameter, value, key, mapping, default="False"): + self, parameter, value, key, mapping, default=u"False"): """Add parameter and value based on condition in dict. If key is missing, default is used as condition. @@ -326,7 +325,7 @@ class OptionString(object): return self.add_with_value_if(parameter, value, condition) def add_equals_if_from_dict( - self, parameter, value, key, mapping, default="False"): + self, parameter, value, key, mapping, default=u"False"): """Add parameter=value based on condition in dict. If key is missing, default is used as condition. @@ -361,4 +360,4 @@ class OptionString(object): :returns: Space separated string of options. :rtype: str """ - return " ".join(self.parts) + return u" ".join(self.parts) diff --git a/resources/libraries/python/PLRsearch/Integrator.py b/resources/libraries/python/PLRsearch/Integrator.py index 86181eaa56..331bd8475b 100644 --- a/resources/libraries/python/PLRsearch/Integrator.py +++ b/resources/libraries/python/PLRsearch/Integrator.py @@ -23,6 +23,7 @@ import copy import traceback import dill + from numpy import random # TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH, @@ -58,7 +59,7 @@ def try_estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False): # so we have to catch them all. traceback_string = traceback.format_exc() communication_pipe.send(traceback_string) - # After sendig, re-raise, so usages other than "one process per call" + # After sending, re-raise, so usages other than "one process per call" # keep behaving correctly. raise @@ -86,7 +87,8 @@ def generate_sample(averages, covariance_matrix, dimension, scale_coeff): covariance_matrix[first][second] *= scale_coeff while 1: sample_point = random.multivariate_normal( - averages, covariance_matrix, 1)[0].tolist() + averages, covariance_matrix, 1 + )[0].tolist() # Multivariate Gauss can fall outside (-1, 1) interval for first in range(dimension): sample_coordinate = sample_point[first] @@ -187,14 +189,15 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False): :raises numpy.linalg.LinAlgError: If the focus shape gets singular (due to rounding errors). Try changing scale_coeff. """ - debug_list = list() trace_list = list() # Block until input object appears. dimension, dilled_function, param_focus_tracker, max_samples = ( - communication_pipe.recv()) - debug_list.append("Called with param_focus_tracker {tracker!r}" - .format(tracker=param_focus_tracker)) + communication_pipe.recv() + ) + debug_list.append( + f"Called with param_focus_tracker {param_focus_tracker!r}" + ) def trace(name, value): """ @@ -210,7 +213,7 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False): :type value: object """ if trace_enabled: - trace_list.append(name + " " + repr(value)) + trace_list.append(f"{name} {value!r}") value_logweight_function = dill.loads(dilled_function) samples = 0 @@ -235,33 +238,39 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False): break sample_point = generate_sample( param_focus_tracker.averages, param_focus_tracker.covariance_matrix, - dimension, scale_coeff) - trace("sample_point", sample_point) + dimension, scale_coeff + ) + trace(u"sample_point", sample_point) samples += 1 - trace("samples", samples) + trace(u"samples", samples) value, log_weight = value_logweight_function(trace, *sample_point) - trace("value", value) - trace("log_weight", log_weight) - trace("focus tracker before adding", param_focus_tracker) + trace(u"value", value) + trace(u"log_weight", log_weight) + trace(u"focus tracker before adding", param_focus_tracker) # Update focus related statistics. param_distance = param_focus_tracker.add_without_dominance_get_distance( - sample_point, log_weight) + sample_point, log_weight + ) # The code above looked at weight (not importance). # The code below looks at importance (not weight). log_rarity = param_distance / 2.0 - trace("log_rarity", log_rarity) + trace(u"log_rarity", log_rarity) log_importance = log_weight + log_rarity - trace("log_importance", log_importance) + trace(u"log_importance", log_importance) value_tracker.add(value, log_importance) # Update sampled statistics. param_sampled_tracker.add_get_shift(sample_point, log_importance) - debug_list.append("integrator used " + str(samples) + " samples") - debug_list.append(" ".join([ - "value_avg", str(value_tracker.average), - "param_sampled_avg", repr(param_sampled_tracker.averages), - "param_sampled_cov", repr(param_sampled_tracker.covariance_matrix), - "value_log_variance", str(value_tracker.log_variance), - "value_log_secondary_variance", - str(value_tracker.secondary.log_variance)])) + debug_list.append(f"integrator used {samples!s} samples") + debug_list.append( + u" ".join([ + u"value_avg", str(value_tracker.average), + u"param_sampled_avg", repr(param_sampled_tracker.averages), + u"param_sampled_cov", repr(param_sampled_tracker.covariance_matrix), + u"value_log_variance", str(value_tracker.log_variance), + u"value_log_secondary_variance", + str(value_tracker.secondary.log_variance) + ]) + ) communication_pipe.send( - (value_tracker, param_focus_tracker, debug_list, trace_list, samples)) + (value_tracker, param_focus_tracker, debug_list, trace_list, samples) + ) diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py index b7c9344391..e20d293d3c 100644 --- a/resources/libraries/python/PLRsearch/PLRsearch.py +++ b/resources/libraries/python/PLRsearch/PLRsearch.py @@ -17,20 +17,22 @@ import logging import math import multiprocessing import time + from collections import namedtuple import dill + from scipy.special import erfcx, erfc # TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH, # then switch to absolute imports within PLRsearch package. # Current usage of relative imports is just a short term workaround. from . import Integrator -from .log_plus import log_plus, log_minus from . import stat_trackers +from .log_plus import log_plus, log_minus -class PLRsearch(object): +class PLRsearch: """A class to encapsulate data relevant for the search method. The context is performance testing of packet processing systems. @@ -41,7 +43,7 @@ class PLRsearch(object): Two constants are stored as class fields for speed. - Method othed than search (and than __init__) + Method other than search (and than __init__) are just internal code structure. TODO: Those method names should start with underscore then. @@ -168,20 +170,23 @@ class PLRsearch(object): stop_time = time.time() + self.timeout min_rate = float(min_rate) max_rate = float(max_rate) - logging.info("Started search with min_rate %(min)r, max_rate %(max)r", - {"min": min_rate, "max": max_rate}) + logging.info( + f"Started search with min_rate {min_rate!r}, " + f"max_rate {max_rate!r}" + ) trial_result_list = list() trial_number = self.trial_number_offset focus_trackers = (None, None) transmit_rate = (min_rate + max_rate) / 2.0 lossy_loads = [max_rate] - zeros = 0 # How many cosecutive zero loss results are happening. + zeros = 0 # How many consecutive zero loss results are happening. while 1: trial_number += 1 - logging.info("Trial %(number)r", {"number": trial_number}) + logging.info(f"Trial {trial_number!r}") results = self.measure_and_compute( self.trial_duration_per_trial * trial_number, transmit_rate, - trial_result_list, min_rate, max_rate, focus_trackers) + trial_result_list, min_rate, max_rate, focus_trackers + ) measurement, average, stdev, avg1, avg2, focus_trackers = results zeros += 1 # TODO: Ratio of fill rate to drain rate seems to have @@ -212,9 +217,10 @@ class PLRsearch(object): # in order to get to usable loses at higher loads. if len(lossy_loads) > 3: lossy_loads = lossy_loads[3:] - logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r", - {"z": zeros, "o": (avg1 + avg2) / 2.0, - "n": next_load, "s": lossy_loads}) + logging.debug( + f"Zeros {zeros!r} orig {(avg1 + avg2) / 2.0!r} " + f"next {next_load!r} loads {lossy_loads!r}" + ) transmit_rate = min(max_rate, max(min_rate, next_load)) @staticmethod @@ -255,21 +261,22 @@ class PLRsearch(object): # TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization chi = (load - mrr) / spread chi0 = -mrr / spread - trace("stretch: load", load) - trace("mrr", mrr) - trace("spread", spread) - trace("chi", chi) - trace("chi0", chi0) + trace(u"stretch: load", load) + trace(u"mrr", mrr) + trace(u"spread", spread) + trace(u"chi", chi) + trace(u"chi0", chi0) if chi > 0: log_lps = math.log( - load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread) - trace("big loss direct log_lps", log_lps) + load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread + ) + trace(u"big loss direct log_lps", log_lps) else: two_positive = log_plus(chi, 2 * chi0 - log_2) two_negative = log_plus(chi0, 2 * chi - log_2) if two_positive <= two_negative: log_lps = log_minus(chi, chi0) + log_spread - trace("small loss crude log_lps", log_lps) + trace(u"small loss crude log_lps", log_lps) return log_lps two = log_minus(two_positive, two_negative) three_positive = log_plus(two_positive, 3 * chi - log_3) @@ -277,11 +284,11 @@ class PLRsearch(object): three = log_minus(three_positive, three_negative) if two == three: log_lps = two + log_spread - trace("small loss approx log_lps", log_lps) + trace(u"small loss approx log_lps", log_lps) else: log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0)) log_lps += log_spread - trace("small loss direct log_lps", log_lps) + trace(u"small loss direct log_lps", log_lps) return log_lps @staticmethod @@ -320,26 +327,26 @@ class PLRsearch(object): # TODO: The stretch sign is just to have less minuses. Worth changing? chi = (mrr - load) / spread chi0 = mrr / spread - trace("Erf: load", load) - trace("mrr", mrr) - trace("spread", spread) - trace("chi", chi) - trace("chi0", chi0) + trace(u"Erf: load", load) + trace(u"mrr", mrr) + trace(u"spread", spread) + trace(u"chi", chi) + trace(u"chi0", chi0) if chi >= -1.0: - trace("positive, b roughly bigger than m", None) + trace(u"positive, b roughly bigger than m", None) if chi > math.exp(10): first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10) - trace("approximated first", first) + trace(u"approximated first", first) else: first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi)) - trace("exact first", first) + trace(u"exact first", first) first -= chi * chi second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0)) second -= chi0 * chi0 intermediate = log_minus(first, second) - trace("first", first) + trace(u"first", first) else: - trace("negative, b roughly smaller than m", None) + trace(u"negative, b roughly smaller than m", None) exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi) exp_first *= math.exp(-chi * chi) exp_first -= 2 * chi @@ -350,11 +357,11 @@ class PLRsearch(object): second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0)) second -= chi0 * chi0 intermediate = math.log(exp_first - math.exp(second)) - trace("exp_first", exp_first) - trace("second", second) - trace("intermediate", intermediate) + trace(u"exp_first", exp_first) + trace(u"second", second) + trace(u"intermediate", intermediate) result = intermediate + math.log(spread) - math.log(erfc(-chi0)) - trace("result", result) + trace(u"result", result) return result @staticmethod @@ -385,7 +392,7 @@ class PLRsearch(object): :type lfit_func: Function from 3 floats to float. :type min_rate: float :type max_rate: float - :type log_lps_target: float + :type loss_ratio_target: float :type mrr: float :type spread: float :returns: Load [pps] which achieves the target with given parameters. @@ -397,17 +404,17 @@ class PLRsearch(object): loss_ratio = -1 while loss_ratio != loss_ratio_target: rate = (rate_hi + rate_lo) / 2.0 - if rate == rate_hi or rate == rate_lo: + if rate in (rate_hi, rate_lo): break loss_rate = math.exp(lfit_func(trace, rate, mrr, spread)) loss_ratio = loss_rate / rate if loss_ratio > loss_ratio_target: - trace("halving down", rate) + trace(u"halving down", rate) rate_hi = rate elif loss_ratio < loss_ratio_target: - trace("halving up", rate) + trace(u"halving up", rate) rate_lo = rate - trace("found", rate) + trace(u"found", rate) return rate @staticmethod @@ -428,36 +435,39 @@ class PLRsearch(object): :param trace: A multiprocessing-friendly logging function (closure). :param lfit_func: Fitting function, typically lfit_spread or lfit_erf. - :param result_list: List of trial measurement results. + :param trial_result_list: List of trial measurement results. :param mrr: The mrr parameter for the fitting function. :param spread: The spread parameter for the fittinmg function. :type trace: function (str, object) -> None :type lfit_func: Function from 3 floats to float. - :type result_list: list of MLRsearch.ReceiveRateMeasurement + :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement :type mrr: float :type spread: float :returns: Logarithm of result weight for given function and parameters. :rtype: float """ log_likelihood = 0.0 - trace("log_weight for mrr", mrr) - trace("spread", spread) + trace(u"log_weight for mrr", mrr) + trace(u"spread", spread) for result in trial_result_list: - trace("for tr", result.target_tr) - trace("lc", result.loss_count) - trace("d", result.duration) + trace(u"for tr", result.target_tr) + trace(u"lc", result.loss_count) + trace(u"d", result.duration) log_avg_loss_per_second = lfit_func( - trace, result.target_tr, mrr, spread) + trace, result.target_tr, mrr, spread + ) log_avg_loss_per_trial = ( - log_avg_loss_per_second + math.log(result.duration)) + log_avg_loss_per_second + math.log(result.duration) + ) # Poisson probability computation works nice for logarithms. log_trial_likelihood = ( result.loss_count * log_avg_loss_per_trial - - math.exp(log_avg_loss_per_trial)) + - math.exp(log_avg_loss_per_trial) + ) log_trial_likelihood -= math.lgamma(1 + result.loss_count) log_likelihood += log_trial_likelihood - trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial)) - trace("log_trial_likelihood", log_trial_likelihood) + trace(u"avg_loss_per_trial", math.exp(log_avg_loss_per_trial)) + trace(u"log_trial_likelihood", log_trial_likelihood) return log_likelihood def measure_and_compute( @@ -512,12 +522,11 @@ class PLRsearch(object): :rtype: _ComputeResult """ logging.debug( - "measure_and_compute started with self %(self)r, trial_duration " - "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, " - "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r", - {"self": self, "dur": trial_duration, "tr": transmit_rate, - "trl": trial_result_list, "mr": max_rate, "track": focus_trackers, - "ms": max_samples}) + f"measure_and_compute started with self {self!r}, trial_duration " + f"{trial_duration!r}, transmit_rate {transmit_rate!r}, " + f"trial_result_list {trial_result_list!r}, max_rate {max_rate!r}, " + f"focus_trackers {focus_trackers!r}, max_samples {max_samples!r}" + ) # Preparation phase. dimension = 2 stretch_focus_tracker, erf_focus_tracker = focus_trackers @@ -536,11 +545,10 @@ class PLRsearch(object): start computation, return the boss pipe end. :param fitting_function: lfit_erf or lfit_stretch. - :param bias_avg: Tuple of floats to start searching around. - :param bias_cov: Covariance matrix defining initial focus shape. + :param focus_tracker: Tracker initialized to speed up the numeric + computation. :type fitting_function: Function from 3 floats to float. - :type bias_avg: 2-tuple of floats - :type bias_cov: 2-tuple of 2-tuples of floats + :type focus_tracker: None or stat_trackers.VectorStatTracker :returns: Boss end of communication pipe. :rtype: multiprocessing.Connection """ @@ -579,27 +587,31 @@ class PLRsearch(object): mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0 spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr)) logweight = self.log_weight( - trace, fitting_function, trial_result_list, mrr, spread) - value = math.log(self.find_critical_rate( - trace, fitting_function, min_rate, max_rate, - self.packet_loss_ratio_target, mrr, spread)) + trace, fitting_function, trial_result_list, mrr, spread + ) + value = math.log( + self.find_critical_rate( + trace, fitting_function, min_rate, max_rate, + self.packet_loss_ratio_target, mrr, spread + ) + ) return value, logweight dilled_function = dill.dumps(value_logweight_func) boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() boss_pipe_end.send( - (dimension, dilled_function, focus_tracker, max_samples)) + (dimension, dilled_function, focus_tracker, max_samples) + ) worker = multiprocessing.Process( - target=Integrator.try_estimate_nd, args=( - worker_pipe_end, 10.0, self.trace_enabled)) + target=Integrator.try_estimate_nd, + args=(worker_pipe_end, 10.0, self.trace_enabled) + ) worker.daemon = True worker.start() return boss_pipe_end - erf_pipe = start_computing( - self.lfit_erf, erf_focus_tracker) - stretch_pipe = start_computing( - self.lfit_stretch, stretch_focus_tracker) + erf_pipe = start_computing(self.lfit_erf, erf_focus_tracker) + stretch_pipe = start_computing(self.lfit_stretch, stretch_focus_tracker) # Measurement phase. measurement = self.measurer.measure(trial_duration, transmit_rate) @@ -623,38 +635,38 @@ class PLRsearch(object): """ pipe.send(None) if not pipe.poll(10.0): - raise RuntimeError( - "Worker {name} did not finish!".format(name=name)) + raise RuntimeError(f"Worker {name} did not finish!") result_or_traceback = pipe.recv() try: value_tracker, focus_tracker, debug_list, trace_list, sampls = ( - result_or_traceback) + result_or_traceback + ) except ValueError: raise RuntimeError( - "Worker {name} failed with the following traceback:\n{tr}" - .format(name=name, tr=result_or_traceback)) - logging.info("Logs from worker %(name)r:", {"name": name}) + f"Worker {name} failed with the following traceback:\n" + f"{result_or_traceback}" + ) + logging.info(f"Logs from worker {name!r}:") for message in debug_list: logging.info(message) for message in trace_list: logging.debug(message) - logging.debug("trackers: value %(val)r focus %(foc)r", { - "val": value_tracker, "foc": focus_tracker}) + logging.debug( + f"trackers: value {value_tracker!r} focus {focus_tracker!r}" + ) return _PartialResult(value_tracker, focus_tracker, sampls) - stretch_result = stop_computing("stretch", stretch_pipe) - erf_result = stop_computing("erf", erf_pipe) + stretch_result = stop_computing(u"stretch", stretch_pipe) + erf_result = stop_computing(u"erf", erf_pipe) result = PLRsearch._get_result(measurement, stretch_result, erf_result) logging.info( - "measure_and_compute finished with trial result %(res)r " - "avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r " - "new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r " - "erf samples %(es)r", - {"res": result.measurement, - "avg": result.avg, "stdev": result.stdev, - "a1": result.stretch_exp_avg, "a2": result.erf_exp_avg, - "nt": result.trackers, "ot": old_trackers, - "ss": stretch_result.samples, "es": erf_result.samples}) + f"measure_and_compute finished with trial result " + f"{result.measurement!r} avg {result.avg!r} stdev {result.stdev!r} " + f"stretch {result.stretch_exp_avg!r} erf {result.erf_exp_avg!r} " + f"new trackers {result.trackers!r} old trackers {old_trackers!r} " + f"stretch samples {stretch_result.samples!r} erf samples " + f"{erf_result.samples!r}" + ) return result @staticmethod @@ -692,7 +704,8 @@ class PLRsearch(object): # Named tuples, for multiple local variables to be passed as return value. _PartialResult = namedtuple( - "_PartialResult", "value_tracker focus_tracker samples") + u"_PartialResult", u"value_tracker focus_tracker samples" +) """Two stat trackers and sample counter. :param value_tracker: Tracker for the value (critical load) being integrated. @@ -704,8 +717,9 @@ _PartialResult = namedtuple( """ _ComputeResult = namedtuple( - "_ComputeResult", - "measurement avg stdev stretch_exp_avg erf_exp_avg trackers") + u"_ComputeResult", + u"measurement avg stdev stretch_exp_avg erf_exp_avg trackers" +) """Measurement, 4 computation result values, pair of trackers. :param measurement: The trial measurement result obtained during computation. diff --git a/resources/libraries/python/PLRsearch/log_plus.py b/resources/libraries/python/PLRsearch/log_plus.py index 1c802a5599..62378f6f2c 100644 --- a/resources/libraries/python/PLRsearch/log_plus.py +++ b/resources/libraries/python/PLRsearch/log_plus.py @@ -24,7 +24,7 @@ functions of this module use None as -inf. TODO: Figure out a more performant way of handling -inf. -The functions handle the common task of adding or substracting +The functions handle the common task of adding or subtracting two numbers where both operands and the result is given in logarithm form. There are conditionals to make sure overflow does not happen (if possible) during the computation.""" @@ -33,7 +33,7 @@ import math def log_plus(first, second): - """Return logarithm of the sum of two exponentials. + """Return logarithm of the sum of two exponents. Basically math.log(math.exp(first) + math.exp(second)) which avoids overflow and uses None as math.log(0.0). @@ -47,19 +47,19 @@ def log_plus(first, second): :returns: Logarithm of the sum (or None if zero). :rtype: float """ - if first is None: return second if second is None: return first if second > first: - return second + math.log(1.0 + math.exp(first - second)) + retval = second + math.log(1.0 + math.exp(first - second)) else: - return first + math.log(1.0 + math.exp(second - first)) + retval = first + math.log(1.0 + math.exp(second - first)) + return retval def log_minus(first, second): - """Return logarithm of the difference of two exponentials. + """Return logarithm of the difference of two exponents. Basically math.log(math.exp(first) - math.exp(second)) which avoids overflow and uses None as math.log(0.0). @@ -75,18 +75,18 @@ def log_minus(first, second): :rtype: float :raises RuntimeError: If the difference would be non-positive. """ - if first is None: - raise RuntimeError("log_minus: does not suport None first") + raise RuntimeError(u"log_minus: does not support None first") if second is None: return first if second >= first: - raise RuntimeError("log_minus: first has to be bigger than second") + raise RuntimeError(u"log_minus: first has to be bigger than second") factor = -math.expm1(second - first) if factor <= 0.0: - raise RuntimeError("log_minus: non-positive number to log") + msg = u"log_minus: non-positive number to log" else: return first + math.log(factor) + raise RuntimeError(msg) def safe_exp(log_value): diff --git a/resources/libraries/python/PLRsearch/stat_trackers.py b/resources/libraries/python/PLRsearch/stat_trackers.py index 58ad98fd2e..2a7a05cae6 100644 --- a/resources/libraries/python/PLRsearch/stat_trackers.py +++ b/resources/libraries/python/PLRsearch/stat_trackers.py @@ -32,7 +32,7 @@ import numpy from .log_plus import log_plus, safe_exp -class ScalarStatTracker(object): +class ScalarStatTracker: """Class for tracking one-dimensional samples. Variance of one-dimensional data cannot be negative, @@ -61,13 +61,11 @@ class ScalarStatTracker(object): def __repr__(self): """Return string, which interpreted constructs state of self. - :returns: Expression contructing an equivalent instance. + :returns: Expression constructing an equivalent instance. :rtype: str """ - return ("ScalarStatTracker(log_sum_weight={lsw!r},average={a!r}," - "log_variance={lv!r})".format( - lsw=self.log_sum_weight, a=self.average, - lv=self.log_variance)) + return f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r}," \ + f"average={self.average!r},log_variance={self.log_variance!r})" def copy(self): """Return new ScalarStatTracker instance with the same state as self. @@ -79,7 +77,8 @@ class ScalarStatTracker(object): :rtype: ScalarStatTracker """ return ScalarStatTracker( - self.log_sum_weight, self.average, self.log_variance) + self.log_sum_weight, self.average, self.log_variance + ) def add(self, scalar_value, log_weight=0.0): """Return updated stats corresponding to addition of another sample. @@ -134,7 +133,6 @@ class ScalarDualStatTracker(ScalarStatTracker): One typical use is for Monte Carlo integrator to decide whether the partial sums so far are reliable enough. """ - def __init__( self, log_sum_weight=None, average=0.0, log_variance=None, log_sum_secondary_weight=None, secondary_average=0.0, @@ -168,7 +166,8 @@ class ScalarDualStatTracker(ScalarStatTracker): # so in case of diamond inheritance mismatch would be probable. ScalarStatTracker.__init__(self, log_sum_weight, average, log_variance) self.secondary = ScalarStatTracker( - log_sum_secondary_weight, secondary_average, log_secondary_variance) + log_sum_secondary_weight, secondary_average, log_secondary_variance + ) self.max_log_weight = max_log_weight def __repr__(self): @@ -178,14 +177,12 @@ class ScalarDualStatTracker(ScalarStatTracker): :rtype: str """ sec = self.secondary - return ( - "ScalarDualStatTracker(log_sum_weight={lsw!r},average={a!r}," - "log_variance={lv!r},log_sum_secondary_weight={lssw!r}," - "secondary_average={sa!r},log_secondary_variance={lsv!r}," - "max_log_weight={mlw!r})".format( - lsw=self.log_sum_weight, a=self.average, lv=self.log_variance, - lssw=sec.log_sum_weight, sa=sec.average, lsv=sec.log_variance, - mlw=self.max_log_weight)) + return f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"\ + f"average={self.average!r},log_variance={self.log_variance!r}," \ + f"log_sum_secondary_weight={sec.log_sum_weight!r}," \ + f"secondary_average={sec.average!r}," \ + f"log_secondary_variance={sec.log_variance!r}," \ + f"max_log_weight={self.max_log_weight!r})" def add(self, scalar_value, log_weight=0.0): """Return updated both stats after addition of another sample. @@ -209,7 +206,6 @@ class ScalarDualStatTracker(ScalarStatTracker): primary.add(scalar_value, log_weight) return self - def get_pessimistic_variance(self): """Return estimate of variance reflecting weight effects. @@ -231,7 +227,7 @@ class ScalarDualStatTracker(ScalarStatTracker): return var_combined -class VectorStatTracker(object): +class VectorStatTracker: """Class for tracking multi-dimensional samples. Contrary to one-dimensional data, multi-dimensional covariance matrix @@ -248,11 +244,11 @@ class VectorStatTracker(object): def __init__( self, dimension=2, log_sum_weight=None, averages=None, covariance_matrix=None): - """Initialize new tracker instance, two-dimenstional empty by default. + """Initialize new tracker instance, two-dimensional empty by default. If any of latter two arguments is None, it means the tracker state is invalid. Use reset method - to create empty tracker of constructed dimentionality. + to create empty tracker of constructed dimensionality. :param dimension: Number of scalar components of samples. :param log_sum_weight: Natural logarithm of sum of weights @@ -273,14 +269,13 @@ class VectorStatTracker(object): def __repr__(self): """Return string, which interpreted constructs state of self. - :returns: Expression contructing an equivalent instance. + :returns: Expression constructing an equivalent instance. :rtype: str """ - return ( - "VectorStatTracker(dimension={d!r},log_sum_weight={lsw!r}," - "averages={a!r},covariance_matrix={cm!r})".format( - d=self.dimension, lsw=self.log_sum_weight, a=self.averages, - cm=self.covariance_matrix)) + return f"VectorStatTracker(dimension={self.dimension!r}," \ + f"log_sum_weight={self.log_sum_weight!r}," \ + f"averages={self.averages!r}," \ + f"covariance_matrix={self.covariance_matrix!r})" def copy(self): """Return new instance with the same state as self. @@ -293,7 +288,8 @@ class VectorStatTracker(object): """ return VectorStatTracker( self.dimension, self.log_sum_weight, self.averages[:], - copy.deepcopy(self.covariance_matrix)) + copy.deepcopy(self.covariance_matrix) + ) def reset(self): """Return state set to empty data of proper dimensionality. @@ -303,8 +299,9 @@ class VectorStatTracker(object): """ self.averages = [0.0 for _ in range(self.dimension)] # TODO: Examine whether we can gain speed by tracking triangle only. - self.covariance_matrix = [[0.0 for _ in range(self.dimension)] - for _ in range(self.dimension)] + self.covariance_matrix = [ + [0.0 for _ in range(self.dimension)] for _ in range(self.dimension) + ] # TODO: In Python3, list comprehensions are generators, # so they are not indexable. Put list() when converting. return self @@ -338,10 +335,12 @@ class VectorStatTracker(object): old_log_sum_weight = self.log_sum_weight old_averages = self.averages if not old_averages: - shift = [0.0 for index in range(dimension)] + shift = [0.0 for _ in range(dimension)] else: - shift = [vector_value[index] - old_averages[index] - for index in range(dimension)] + shift = [ + vector_value[index] - old_averages[index] + for index in range(dimension) + ] if old_log_sum_weight is None: # First sample. self.log_sum_weight = log_weight @@ -352,8 +351,10 @@ class VectorStatTracker(object): new_log_sum_weight = log_plus(old_log_sum_weight, log_weight) data_ratio = math.exp(old_log_sum_weight - new_log_sum_weight) sample_ratio = math.exp(log_weight - new_log_sum_weight) - new_averages = [old_averages[index] + shift[index] * sample_ratio - for index in range(dimension)] + new_averages = [ + old_averages[index] + shift[index] * sample_ratio + for index in range(dimension) + ] # It is easier to update covariance matrix in-place. for second in range(dimension): for first in range(dimension): @@ -375,7 +376,7 @@ class VectorStatTracker(object): If the weight of the incoming sample is far bigger than the weight of all the previous data together, - convariance matrix would suffer from underflows. + covariance matrix would suffer from underflow. To avoid that, this method manipulates both weights before calling add(). diff --git a/resources/libraries/python/PacketVerifier.py b/resources/libraries/python/PacketVerifier.py index 978babf7ce..397ce76f49 100644 --- a/resources/libraries/python/PacketVerifier.py +++ b/resources/libraries/python/PacketVerifier.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -67,28 +67,33 @@ import os import select -from scapy.config import conf from scapy.all import ETH_P_IP, ETH_P_IPV6, ETH_P_ALL, ETH_P_ARP +from scapy.config import conf from scapy.layers.inet6 import IPv6 from scapy.layers.l2 import Ether, ARP +from scapy.packet import Raw # Enable libpcap's L2listen conf.use_pcap = True import scapy.arch.pcapdnet # pylint: disable=C0413, unused-import -__all__ = ['RxQueue', 'TxQueue', 'Interface', 'create_gratuitous_arp_request', - 'auto_pad', 'checksum_equal'] +__all__ = [ + u"RxQueue", u"TxQueue", u"Interface", u"create_gratuitous_arp_request", + u"auto_pad", u"checksum_equal" +] # TODO: http://stackoverflow.com/questions/320232/ # ensuring-subprocesses-are-dead-on-exiting-python-program -class PacketVerifier(object): +class PacketVerifier: """Base class for TX and RX queue objects for packet verifier.""" def __init__(self, interface_name): - os.system('sudo echo 1 > /proc/sys/net/ipv6/conf/{0}/disable_ipv6' - .format(interface_name)) - os.system('sudo ip link set {0} up promisc on'.format(interface_name)) + os.system( + f"sudo echo 1 > /proc/sys/net/ipv6/conf/{interface_name}/" + f"disable_ipv6" + ) + os.system(f"sudo ip link set {interface_name} up promisc on") self._ifname = interface_name @@ -111,8 +116,7 @@ def extract_one_packet(buf): try: ether_type = Ether(buf[0:14]).type except AttributeError: - raise RuntimeError( - 'No EtherType in packet {0}'.format(buf.__repr__())) + raise RuntimeError(f"No EtherType in packet {buf!r}") if ether_type == ETH_P_IP: # 14 is Ethernet fame header size. @@ -124,17 +128,16 @@ def extract_one_packet(buf): return None elif ether_type == ETH_P_IPV6: if not Ether(buf[0:14+6]).haslayer(IPv6): - raise RuntimeError( - 'Invalid IPv6 packet {0}'.format(buf.__repr__())) + raise RuntimeError(f"Invalid IPv6 packet {buf!r}") # ... to add to the above, 40 bytes is the length of IPV6 header. # The ipv6.len only contains length of the payload and not the header - pkt_len = Ether(buf)['IPv6'].plen + 14 + 40 + pkt_len = Ether(buf)[u"IPv6"].plen + 14 + 40 if len(buf) < 60: return None elif ether_type == ETH_P_ARP: pkt = Ether(buf[:20]) if not pkt.haslayer(ARP): - raise RuntimeError('Incomplete ARP packet') + raise RuntimeError(u"Incomplete ARP packet") # len(eth) + arp(2 hw addr type + 2 proto addr type # + 1b len + 1b len + 2b operation) @@ -146,10 +149,10 @@ def extract_one_packet(buf): elif ether_type == 32821: # RARP (Reverse ARP) pkt = Ether(buf[:20]) pkt.type = ETH_P_ARP # Change to ARP so it works with scapy - pkt = Ether(str(pkt)) + pkt = Ether(pkt) if not pkt.haslayer(ARP): pkt.show() - raise RuntimeError('Incomplete RARP packet') + raise RuntimeError(u"Incomplete RARP packet") # len(eth) + arp(2 hw addr type + 2 proto addr type # + 1b len + 1b len + 2b operation) @@ -159,7 +162,7 @@ def extract_one_packet(buf): del pkt else: - raise RuntimeError('Unknown protocol {0}'.format(ether_type)) + raise RuntimeError(f"Unknown protocol {ether_type}") if pkt_len < 60: pkt_len = 60 @@ -222,25 +225,23 @@ class RxQueue(PacketVerifier): if ignore is not None: for ig_pkt in ignore: # Auto pad all packets in ignore list - ignore_list.append(auto_pad(ig_pkt)) + ignore_list.append(str(auto_pad(ig_pkt))) while True: - (rlist, _, _) = select.select([self._sock], [], [], timeout) + rlist, _, _ = select.select([self._sock], [], [], timeout) if self._sock not in rlist: return None pkt = self._sock.recv(0x7fff) - pkt_pad = auto_pad(pkt) - print 'Received packet on {0} of len {1}'\ - .format(self._ifname, len(pkt)) + pkt_pad = str(auto_pad(pkt)) + print(f"Received packet on {self._ifname} of len {len(pkt)}") if verbose: pkt.show2() # pylint: disable=no-member - print + print() if pkt_pad in ignore_list: ignore_list.remove(pkt_pad) - print 'Received packet ignored.' + print(u"Received packet ignored.") continue - else: - return pkt + return pkt class TxQueue(PacketVerifier): @@ -259,21 +260,20 @@ class TxQueue(PacketVerifier): """Send packet out of the bound interface. :param pkt: Packet to send. - :param verbose: Used to supress detailed logging of sent packets. + :param verbose: Used to suppress detailed logging of sent packets. :type pkt: string or scapy Packet derivative. :type verbose: bool """ - print 'Sending packet out of {0} of len {1}'.format(self._ifname, - len(pkt)) + pkt = auto_pad(pkt) + print(f"Sending packet out of {self._ifname} of len {len(pkt)}") if verbose: - Ether(str(pkt)).show2() - print + pkt.show2() + print() - pkt = auto_pad(str(pkt)) self._sock.send(pkt) -class Interface(object): +class Interface: """Class for network interfaces. Contains methods for sending and receiving packets.""" def __init__(self, if_name): @@ -305,16 +305,17 @@ class Interface(object): def create_gratuitous_arp_request(src_mac, src_ip): """Creates scapy representation of gratuitous ARP request.""" - return (Ether(src=src_mac, dst='ff:ff:ff:ff:ff:ff') / - ARP(psrc=src_ip, hwsrc=src_mac, pdst=src_ip)) + return (Ether(src=src_mac, dst=u"ff:ff:ff:ff:ff:ff") / + ARP(psrc=src_ip, hwsrc=src_mac, pdst=src_ip) + ) def auto_pad(packet): """Pads zeroes at the end of the packet if the total len < 60 bytes.""" - padded = str(packet) - if len(padded) < 60: - padded += ('\0' * (60 - len(padded))) - return padded + # padded = str(packet) + if len(packet) < 60: + packet[Raw].load += (b"\0" * (60 - len(packet))) + return packet def checksum_equal(chksum1, chksum2): diff --git a/resources/libraries/python/PapiExecutor.py b/resources/libraries/python/PapiExecutor.py index 0e0a9332c4..7f226f4ff3 100644 --- a/resources/libraries/python/PapiExecutor.py +++ b/resources/libraries/python/PapiExecutor.py @@ -14,7 +14,6 @@ """Python API executor library. """ -import binascii import copy import glob import json @@ -31,7 +30,6 @@ from robot.api import logger from resources.libraries.python.Constants import Constants from resources.libraries.python.LocalExecution import run from resources.libraries.python.FilteredLogger import FilteredLogger -from resources.libraries.python.PythonThree import raise_from from resources.libraries.python.PapiHistory import PapiHistory from resources.libraries.python.ssh import ( SSH, SSHTimeout, exec_cmd_no_error, scp_node) @@ -39,7 +37,7 @@ from resources.libraries.python.topology import Topology, SocketType from resources.libraries.python.VppApiCrc import VppApiCrcChecker -__all__ = ["PapiExecutor", "PapiSocketExecutor"] +__all__ = [u"PapiExecutor", u"PapiSocketExecutor"] def dictize(obj): @@ -56,14 +54,14 @@ def dictize(obj): from tuple, including its read-only __getitem__ attribute, so we cannot monkey-patch it. - TODO: Create a proxy for namedtuple to allow that. + TODO: Create a proxy for named tuple to allow that. :param obj: Arbitrary object to dictize. :type obj: object :returns: Dictized object. :rtype: same as obj type or collections.OrderedDict """ - if not hasattr(obj, "_asdict"): + if not hasattr(obj, u"_asdict"): return obj ret = obj._asdict() old_get = ret.__getitem__ @@ -72,7 +70,7 @@ def dictize(obj): return ret -class PapiSocketExecutor(object): +class PapiSocketExecutor: """Methods for executing VPP Python API commands on forwarded socket. The current implementation connects for the duration of resource manager. @@ -171,39 +169,40 @@ class PapiSocketExecutor(object): return cls = self.__class__ # Shorthand for setting class fields. package_path = None - tmp_dir = tempfile.mkdtemp(dir="/tmp") + tmp_dir = tempfile.mkdtemp(dir=u"/tmp") try: # Pack, copy and unpack Python part of VPP installation from _node. # TODO: Use rsync or recursive version of ssh.scp_node instead? node = self._node - exec_cmd_no_error(node, ["rm", "-rf", "/tmp/papi.txz"]) + exec_cmd_no_error(node, [u"rm", u"-rf", u"/tmp/papi.txz"]) # Papi python version depends on OS (and time). # Python 2.7 or 3.4, site-packages or dist-packages. - installed_papi_glob = "/usr/lib/python*/*-packages/vpp_papi" + installed_papi_glob = u"/usr/lib/python3*/*-packages/vpp_papi" # We need to wrap this command in bash, in order to expand globs, # and as ssh does join, the inner command has to be quoted. - inner_cmd = " ".join([ - "tar", "cJf", "/tmp/papi.txz", "--exclude=*.pyc", - installed_papi_glob, "/usr/share/vpp/api"]) - exec_cmd_no_error(node, ["bash", "-c", "'" + inner_cmd + "'"]) - scp_node(node, tmp_dir + "/papi.txz", "/tmp/papi.txz", get=True) - run(["tar", "xf", tmp_dir + "/papi.txz", "-C", tmp_dir]) - api_json_directory = tmp_dir + "/usr/share/vpp/api" + inner_cmd = u" ".join([ + u"tar", u"cJf", u"/tmp/papi.txz", u"--exclude=*.pyc", + installed_papi_glob, u"/usr/share/vpp/api" + ]) + exec_cmd_no_error(node, [u"bash", u"-c", u"'" + inner_cmd + u"'"]) + scp_node(node, tmp_dir + u"/papi.txz", u"/tmp/papi.txz", get=True) + run([u"tar", u"xf", tmp_dir + u"/papi.txz", u"-C", tmp_dir]) + api_json_directory = tmp_dir + u"/usr/share/vpp/api" # Perform initial checks before .api.json files are gone, # by creating the checker instance. cls.crc_checker = VppApiCrcChecker(api_json_directory) # When present locally, we finally can find the installation path. package_path = glob.glob(tmp_dir + installed_papi_glob)[0] # Package path has to be one level above the vpp_papi directory. - package_path = package_path.rsplit('/', 1)[0] + package_path = package_path.rsplit(u"/", 1)[0] sys.path.append(package_path) - # pylint: disable=import-error + # pylint: disable=import-outside-toplevel, import-error from vpp_papi.vpp_papi import VPPApiClient as vpp_class vpp_class.apidir = api_json_directory # We need to create instance before removing from sys.path. cls.vpp_instance = vpp_class( - use_socket=True, server_address="TBD", async_thread=False, - read_timeout=14, logger=FilteredLogger(logger, "INFO")) + use_socket=True, server_address=u"TBD", async_thread=False, + read_timeout=14, logger=FilteredLogger(logger, u"INFO")) # Cannot use loglevel parameter, robot.api.logger lacks support. # TODO: Stop overriding read_timeout when VPP-1722 is fixed. finally: @@ -224,53 +223,58 @@ class PapiSocketExecutor(object): # Parsing takes longer than connecting, prepare instance before tunnel. vpp_instance = self.vpp_instance node = self._node - self._temp_dir = tempfile.mkdtemp(dir="/tmp") - self._local_vpp_socket = self._temp_dir + "/vpp-api.sock" - self._ssh_control_socket = self._temp_dir + "/ssh.sock" + self._temp_dir = tempfile.mkdtemp(dir=u"/tmp") + self._local_vpp_socket = self._temp_dir + u"/vpp-api.sock" + self._ssh_control_socket = self._temp_dir + u"/ssh.sock" ssh_socket = self._ssh_control_socket # Cleanup possibilities. - ret_code, _ = run(["ls", ssh_socket], check=False) + ret_code, _ = run([u"ls", ssh_socket], check=False) if ret_code != 2: # This branch never seems to be hit in CI, # but may be useful when testing manually. - run(["ssh", "-S", ssh_socket, "-O", "exit", "0.0.0.0"], - check=False, log=True) + run( + [u"ssh", u"-S", ssh_socket, u"-O", u"exit", u"0.0.0.0"], + check=False, log=True + ) # TODO: Is any sleep necessary? How to prove if not? - run(["sleep", "0.1"]) - run(["rm", "-vrf", ssh_socket]) + run([u"sleep", u"0.1"]) + run([u"rm", u"-vrf", ssh_socket]) # Even if ssh can perhaps reuse this file, # we need to remove it for readiness detection to work correctly. - run(["rm", "-rvf", self._local_vpp_socket]) + run([u"rm", u"-rvf", self._local_vpp_socket]) # On VIRL, the ssh user is not added to "vpp" group, # so we need to change remote socket file access rights. exec_cmd_no_error( - node, "chmod o+rwx " + self._remote_vpp_socket, sudo=True) + node, u"chmod o+rwx " + self._remote_vpp_socket, sudo=True + ) # We use sleep command. The ssh command will exit in 10 second, # unless a local socket connection is established, # in which case the ssh command will exit only when # the ssh connection is closed again (via control socket). # The log level is to supress "Warning: Permanently added" messages. ssh_cmd = [ - "ssh", "-S", ssh_socket, "-M", - "-o", "LogLevel=ERROR", "-o", "UserKnownHostsFile=/dev/null", - "-o", "StrictHostKeyChecking=no", "-o", "ExitOnForwardFailure=yes", - "-L", self._local_vpp_socket + ':' + self._remote_vpp_socket, - "-p", str(node['port']), node['username'] + "@" + node['host'], - "sleep", "10"] - priv_key = node.get("priv_key") + u"ssh", u"-S", ssh_socket, u"-M", + u"-o", u"LogLevel=ERROR", u"-o", u"UserKnownHostsFile=/dev/null", + u"-o", u"StrictHostKeyChecking=no", + u"-o", u"ExitOnForwardFailure=yes", + u"-L", self._local_vpp_socket + u":" + self._remote_vpp_socket, + u"-p", str(node[u"port"]), node[u"username"] + u"@" + node[u"host"], + u"sleep", u"10" + ] + priv_key = node.get(u"priv_key") if priv_key: # This is tricky. We need a file to pass the value to ssh command. - # And we need ssh command, because paramiko does not suport sockets + # And we need ssh command, because paramiko does not support sockets # (neither ssh_socket, nor _remote_vpp_socket). key_file = tempfile.NamedTemporaryFile() key_file.write(priv_key) # Make sure the content is written, but do not close yet. key_file.flush() - ssh_cmd[1:1] = ["-i", key_file.name] - password = node.get("password") + ssh_cmd[1:1] = [u"-i", key_file.name] + password = node.get(u"password") if password: # Prepend sshpass command to set password. - ssh_cmd[:0] = ["sshpass", "-p", password] + ssh_cmd[:0] = [u"sshpass", u"-p", password] time_stop = time.time() + 10.0 # subprocess.Popen seems to be the best way to run commands # on background. Other ways (shell=True with "&" and ssh with -f) @@ -280,12 +284,14 @@ class PapiSocketExecutor(object): # Check socket presence on local side. while time.time() < time_stop: # It can take a moment for ssh to create the socket file. - ret_code, _ = run(["ls", "-l", self._local_vpp_socket], check=False) + ret_code, _ = run( + [u"ls", u"-l", self._local_vpp_socket], check=False + ) if not ret_code: break time.sleep(0.1) else: - raise RuntimeError("Local side socket has not appeared.") + raise RuntimeError(u"Local side socket has not appeared.") if priv_key: # Socket up means the key has been read. Delete file by closing it. key_file.close() @@ -293,16 +299,16 @@ class PapiSocketExecutor(object): vpp_instance.transport.server_address = self._local_vpp_socket # It seems we can get read error even if every preceding check passed. # Single retry seems to help. - for _ in xrange(2): + for _ in range(2): try: - vpp_instance.connect_sync("csit_socket") + vpp_instance.connect_sync(u"csit_socket") except (IOError, struct.error) as err: - logger.warn("Got initial connect error {err!r}".format(err=err)) + logger.warn(f"Got initial connect error {err!r}") vpp_instance.disconnect() else: break else: - raise RuntimeError("Failed to connect to VPP over a socket.") + raise RuntimeError(u"Failed to connect to VPP over a socket.") return self def __exit__(self, exc_type, exc_val, exc_tb): @@ -312,8 +318,9 @@ class PapiSocketExecutor(object): Arguments related to possible exception are entirely ignored. """ self.vpp_instance.disconnect() - run(["ssh", "-S", self._ssh_control_socket, "-O", "exit", "0.0.0.0"], - check=False) + run([ + u"ssh", u"-S", self._ssh_control_socket, u"-O", u"exit", u"0.0.0.0" + ], check=False) shutil.rmtree(self._temp_dir) def add(self, csit_papi_command, history=True, **kwargs): @@ -346,10 +353,15 @@ class PapiSocketExecutor(object): self.crc_checker.report_initial_conflicts() if history: PapiHistory.add_to_papi_history( - self._node, csit_papi_command, **kwargs) + self._node, csit_papi_command, **kwargs + ) self.crc_checker.check_api_name(csit_papi_command) self._api_command_list.append( - dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))) + dict( + api_name=csit_papi_command, + api_args=copy.deepcopy(kwargs) + ) + ) return self def get_replies(self, err_msg="Failed to get replies."): @@ -366,7 +378,7 @@ class PapiSocketExecutor(object): """ return self._execute(err_msg=err_msg) - def get_reply(self, err_msg="Failed to get reply."): + def get_reply(self, err_msg=u"Failed to get reply."): """Get reply from VPP Python API. The reply is parsed into dict-like object, @@ -382,11 +394,10 @@ class PapiSocketExecutor(object): """ replies = self.get_replies(err_msg=err_msg) if len(replies) != 1: - raise RuntimeError("Expected single reply, got {replies!r}".format( - replies=replies)) + raise RuntimeError(f"Expected single reply, got {replies!r}") return replies[0] - def get_sw_if_index(self, err_msg="Failed to get reply."): + def get_sw_if_index(self, err_msg=u"Failed to get reply."): """Get sw_if_index from reply from VPP Python API. Frequently, the caller is only interested in sw_if_index field @@ -401,8 +412,8 @@ class PapiSocketExecutor(object): :raises AssertionError: If retval is nonzero, parsing or ssh error. """ reply = self.get_reply(err_msg=err_msg) - logger.trace("Getting index from {reply!r}".format(reply=reply)) - return reply["sw_if_index"] + logger.trace(f"Getting index from {reply!r}") + return reply[u"sw_if_index"] def get_details(self, err_msg="Failed to get dump details."): """Get dump details from VPP Python API. @@ -422,8 +433,8 @@ class PapiSocketExecutor(object): return self._execute(err_msg) @staticmethod - def run_cli_cmd(node, cli_cmd, log=True, - remote_vpp_socket=Constants.SOCKSVR_PATH): + def run_cli_cmd( + node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH): """Run a CLI command as cli_inband, return the "reply" field of reply. Optionally, log the field value. @@ -439,17 +450,20 @@ class PapiSocketExecutor(object): :returns: CLI output. :rtype: str """ - cmd = 'cli_inband' - args = dict(cmd=cli_cmd) - err_msg = "Failed to run 'cli_inband {cmd}' PAPI command on host " \ - "{host}".format(host=node['host'], cmd=cli_cmd) + cmd = u"cli_inband" + args = dict( + cmd=cli_cmd + ) + err_msg = f"Failed to run 'cli_inband {cli_cmd}' PAPI command " \ + f"on host {node[u'host']}" + with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg)["reply"] if log: logger.info( - "{cmd} ({host} - {remote_vpp_socket}):\n{reply}". - format(cmd=cmd, reply=reply.encode('utf-8').strip(), - remote_vpp_socket=remote_vpp_socket, host=node['host'])) + f"{cmd} ({node[u'host']} - {remote_vpp_socket}):\n" + f"{reply.strip()}" + ) return reply @staticmethod @@ -467,7 +481,8 @@ class PapiSocketExecutor(object): if sockets: for socket in sockets.values(): PapiSocketExecutor.run_cli_cmd( - node, cli_cmd, log=log, remote_vpp_socket=socket) + node, cli_cmd, log=log, remote_vpp_socket=socket + ) @staticmethod def dump_and_log(node, cmds): @@ -481,10 +496,9 @@ class PapiSocketExecutor(object): with PapiSocketExecutor(node) as papi_exec: for cmd in cmds: dump = papi_exec.add(cmd).get_details() - logger.debug("{cmd}:\n{data}".format( - cmd=cmd, data=pformat(dump))) + logger.debug(f"{cmd}:\n{pformat(dump)}") - def _execute(self, err_msg="Undefined error message"): + def _execute(self, err_msg=u"Undefined error message", exp_rv=0): """Turn internal command list into data and execute; return replies. This method also clears the internal command list. @@ -509,43 +523,42 @@ class PapiSocketExecutor(object): self._api_command_list = list() replies = list() for command in local_list: - api_name = command["api_name"] + api_name = command[u"api_name"] papi_fn = getattr(vpp_instance.api, api_name) try: try: - reply = papi_fn(**command["api_args"]) + reply = papi_fn(**command[u"api_args"]) except (IOError, struct.error) as err: - # Ocassionally an error happens, try reconnect. - logger.warn("Reconnect after error: {err!r}".format( - err=err)) + # Occasionally an error happens, try reconnect. + logger.warn(f"Reconnect after error: {err!r}") self.vpp_instance.disconnect() - # Testing showes immediate reconnect fails. + # Testing shows immediate reconnect fails. time.sleep(1) - self.vpp_instance.connect_sync("csit_socket") - logger.trace("Reconnected.") - reply = papi_fn(**command["api_args"]) + self.vpp_instance.connect_sync(u"csit_socket") + logger.trace(u"Reconnected.") + reply = papi_fn(**command[u"api_args"]) except (AttributeError, IOError, struct.error) as err: - raise_from(AssertionError(err_msg), err, level="INFO") + raise AssertionError(err_msg) from err # *_dump commands return list of objects, convert, ordinary reply. if not isinstance(reply, list): reply = [reply] for item in reply: self.crc_checker.check_api_name(item.__class__.__name__) dict_item = dictize(item) - if "retval" in dict_item.keys(): + if u"retval" in dict_item.keys(): # *_details messages do not contain retval. - retval = dict_item["retval"] - if retval != 0: + retval = dict_item[u"retval"] + if retval != exp_rv: # TODO: What exactly to log and raise here? - err = AssertionError("Retval {rv!r}".format(rv=retval)) - # Lowering log level, some retval!=0 calls are expected. - # TODO: Expose level argument so callers can decide? - raise_from(AssertionError(err_msg), err, level="DEBUG") + raise AssertionError( + f"Retval {retval!r} does not match expected " + f"retval {exp_rv!r}" + ) replies.append(dict_item) return replies -class PapiExecutor(object): +class PapiExecutor: """Contains methods for executing VPP Python API commands on DUTs. TODO: Remove .add step, make get_stats accept paths directly. @@ -589,7 +602,6 @@ class PapiExecutor(object): :param node: Node to run command(s) on. :type node: dict """ - # Node to run command(s) on. self._node = node @@ -602,15 +614,16 @@ class PapiExecutor(object): try: self._ssh.connect(self._node) except IOError: - raise RuntimeError("Cannot open SSH connection to host {host} to " - "execute PAPI command(s)". - format(host=self._node["host"])) + raise RuntimeError( + f"Cannot open SSH connection to host {self._node[u'host']} " + f"to execute PAPI command(s)" + ) return self def __exit__(self, exc_type, exc_val, exc_tb): self._ssh.disconnect(self._node) - def add(self, csit_papi_command="vpp-stats", history=True, **kwargs): + def add(self, csit_papi_command=u"vpp-stats", history=True, **kwargs): """Add next command to internal command list; return self. The argument name 'csit_papi_command' must be unique enough as it cannot @@ -629,28 +642,36 @@ class PapiExecutor(object): """ if history: PapiHistory.add_to_papi_history( - self._node, csit_papi_command, **kwargs) - self._api_command_list.append(dict( - api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))) + self._node, csit_papi_command, **kwargs + ) + self._api_command_list.append( + dict( + api_name=csit_papi_command, api_args=copy.deepcopy(kwargs) + ) + ) return self - def get_stats(self, err_msg="Failed to get statistics.", timeout=120, - socket=Constants.SOCKSTAT_PATH): + def get_stats( + self, err_msg=u"Failed to get statistics.", timeout=120, + socket=Constants.SOCKSTAT_PATH): """Get VPP Stats from VPP Python API. :param err_msg: The message used if the PAPI command(s) execution fails. :param timeout: Timeout in seconds. + :param socket: Path to Stats socket to tunnel to. :type err_msg: str :type timeout: int + :type socket: str :returns: Requested VPP statistics. :rtype: list of dict """ - paths = [cmd['api_args']['path'] for cmd in self._api_command_list] + paths = [cmd[u"api_args"][u"path"] for cmd in self._api_command_list] self._api_command_list = list() stdout = self._execute_papi( - paths, method='stats', err_msg=err_msg, timeout=timeout, - socket=socket) + paths, method=u"stats", err_msg=err_msg, timeout=timeout, + socket=socket + ) return json.loads(stdout) @@ -675,27 +696,33 @@ class PapiExecutor(object): :rtype: dict or str or int """ if isinstance(val, dict): - for val_k, val_v in val.iteritems(): + for val_k, val_v in val.items(): val[str(val_k)] = process_value(val_v) - return val + retval = val elif isinstance(val, list): for idx, val_l in enumerate(val): val[idx] = process_value(val_l) - return val + retval = val else: - return binascii.hexlify(val) if isinstance(val, str) else val + retval = val.encode().hex() if isinstance(val, str) else val + return retval api_data_processed = list() for api in api_d: api_args_processed = dict() - for a_k, a_v in api["api_args"].iteritems(): + for a_k, a_v in api[u"api_args"].items(): api_args_processed[str(a_k)] = process_value(a_v) - api_data_processed.append(dict(api_name=api["api_name"], - api_args=api_args_processed)) + api_data_processed.append( + dict( + api_name=api[u"api_name"], + api_args=api_args_processed + ) + ) return api_data_processed - def _execute_papi(self, api_data, method='request', err_msg="", - timeout=120, socket=None): + def _execute_papi( + self, api_data, method=u"request", err_msg=u"", timeout=120, + socket=None): """Execute PAPI command(s) on remote node and store the result. :param api_data: List of APIs with their arguments. @@ -714,32 +741,31 @@ class PapiExecutor(object): :raises AssertionError: If PAPI command(s) execution has failed. """ if not api_data: - raise RuntimeError("No API data provided.") + raise RuntimeError(u"No API data provided.") json_data = json.dumps(api_data) \ - if method in ("stats", "stats_request") \ + if method in (u"stats", u"stats_request") \ else json.dumps(self._process_api_data(api_data)) - sock = " --socket {socket}".format(socket=socket) if socket else "" - cmd = ( - "{fw_dir}/{papi_provider} --method {method} --data '{json}'{socket}" - .format(fw_dir=Constants.REMOTE_FW_DIR, - papi_provider=Constants.RESOURCES_PAPI_PROVIDER, - method=method, json=json_data, socket=sock)) + sock = f" --socket {socket}" if socket else u"" + cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}" \ + f" --method {method} --data '{json_data}'{sock}" try: ret_code, stdout, _ = self._ssh.exec_command_sudo( - cmd=cmd, timeout=timeout, log_stdout_err=False) + cmd=cmd, timeout=timeout, log_stdout_err=False + ) # TODO: Fail on non-empty stderr? except SSHTimeout: - logger.error("PAPI command(s) execution timeout on host {host}:" - "\n{apis}".format(host=self._node["host"], - apis=api_data)) + logger.error( + f"PAPI command(s) execution timeout on host " + f"{self._node[u'host']}:\n{api_data}" + ) raise except Exception as exc: - raise_from(RuntimeError( - "PAPI command(s) execution on host {host} " - "failed: {apis}".format( - host=self._node["host"], apis=api_data)), exc) + raise RuntimeError( + f"PAPI command(s) execution on host {self._node[u'host']} " + f"failed: {api_data}" + ) from exc if ret_code != 0: raise AssertionError(err_msg) diff --git a/resources/libraries/python/PapiHistory.py b/resources/libraries/python/PapiHistory.py index c4ee4ee495..cacfbd6b19 100644 --- a/resources/libraries/python/PapiHistory.py +++ b/resources/libraries/python/PapiHistory.py @@ -17,13 +17,13 @@ from robot.api import logger from resources.libraries.python.topology import NodeType, DICT__nodes -__all__ = ["DICT__DUTS_PAPI_HISTORY", "PapiHistory"] +__all__ = [u"DICT__DUTS_PAPI_HISTORY", u"PapiHistory"] DICT__DUTS_PAPI_HISTORY = dict() -class PapiHistory(object): +class PapiHistory: """Contains methods to set up DUT PAPI command history. """ @@ -34,7 +34,7 @@ class PapiHistory(object): :param node: DUT node to reset PAPI command history for. :type node: dict """ - DICT__DUTS_PAPI_HISTORY[node['host']] = list() + DICT__DUTS_PAPI_HISTORY[node[u"host"]] = list() @staticmethod def reset_papi_history_on_all_duts(nodes): @@ -44,7 +44,7 @@ class PapiHistory(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: PapiHistory.reset_papi_history(node) @staticmethod @@ -85,16 +85,15 @@ class PapiHistory(object): """ if papi: args = list() - for key, val in kwargs.iteritems(): - args.append("{key}={val!r}".format(key=key, val=val)) - item = "{cmd}({args})".format(cmd=csit_papi_command, - args=",".join(args)) + for key, val in kwargs.items(): + args.append(f"{key}={val!r}") + item = f"{csit_papi_command}({u','.join(args)})" else: # This else part is here to store VAT commands. # VAT history is not used. # TODO: Remove when VatExecutor is completely removed. - item = "{cmd}".format(cmd=csit_papi_command) - DICT__DUTS_PAPI_HISTORY[node['host']].append(item) + item = f"{csit_papi_command}" + DICT__DUTS_PAPI_HISTORY[node[u"host"]].append(item) @staticmethod def show_papi_history(node): @@ -103,12 +102,11 @@ class PapiHistory(object): :param node: DUT node to show PAPI command history for. :type node: dict """ - history_list = DICT__DUTS_PAPI_HISTORY[node['host']] + history_list = DICT__DUTS_PAPI_HISTORY[node[u"host"]] if not history_list: - history_list = ("No PAPI command executed", ) - logger.info( - "{0} PAPI command history:\n{1}\n".format( - node['host'], "\n".join(history_list))) + history_list = (u"No PAPI command executed", ) + history = u'\n'.join(history_list) + logger.info(f"{node[u'host']} PAPI command history:\n{history}\n") @staticmethod def show_papi_history_on_all_duts(nodes): @@ -118,7 +116,7 @@ class PapiHistory(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: PapiHistory.show_papi_history(node) diff --git a/resources/libraries/python/Policer.py b/resources/libraries/python/Policer.py index a51f99609c..f6a852e417 100644 --- a/resources/libraries/python/Policer.py +++ b/resources/libraries/python/Policer.py @@ -81,7 +81,7 @@ class DSCP(IntEnum): D_EF = 46 -class Policer(object): +class Policer: """Policer utilities.""" # pylint: disable=too-many-arguments, too-many-locals @@ -128,50 +128,49 @@ class Policer(object): :type exceed_dscp: str :type violate_dscp: str """ - cmd = 'policer_add_del' + cmd = u"policer_add_del" args = dict( is_add=int(is_add), - name=str(policer_name), + name=str(policer_name).encode(encoding=u"utf-8"), cir=int(cir), eir=int(eir), cb=int(cbs), eb=int(ebs), rate_type=getattr(PolicerRateType, rate_type.upper()).value, round_type=getattr( - PolicerRoundType, 'ROUND_TO_{rt}'.format( - rt=round_type.upper())).value, - type=getattr(PolicerType, 'TYPE_{pt}'.format( - pt=policer_type.upper())).value, + PolicerRoundType, f"ROUND_TO_{round_type.upper()}" + ).value, + type=getattr(PolicerType, f"TYPE_{policer_type.upper()}").value, conform_action_type=getattr( - PolicerAction, conform_action_type.upper()).value, - conform_dscp=getattr(DSCP, 'D_{dscp}'.format( - dscp=conform_dscp.upper())).value + PolicerAction, conform_action_type.upper() + ).value, + conform_dscp=getattr(DSCP, f"D_{conform_dscp.upper()}").value if conform_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name else 0, exceed_action_type=getattr( - PolicerAction, exceed_action_type.upper()).value, - exceed_dscp=getattr(DSCP, 'D_{dscp}'.format( - dscp=exceed_dscp.upper())).value + PolicerAction, exceed_action_type.upper() + ).value, + exceed_dscp=getattr(DSCP, f"D_{exceed_dscp.upper()}").value if exceed_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name else 0, violate_action_type=getattr( - PolicerAction, violate_action_type.upper()).value, - violate_dscp=getattr(DSCP, 'D_{dscp}'.format( - dscp=violate_dscp.upper())).value + PolicerAction, violate_action_type.upper() + ).value, + violate_dscp=getattr(DSCP, f"D_{violate_dscp.upper()}").value if violate_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name else 0, - color_aware=1 if color_aware == "'ca'" else 0 + color_aware=1 if color_aware == u"'ca'" else 0 ) - err_msg = 'Failed to configure policer {pn} on host {host}'.format( - pn=policer_name, host=node['host']) + err_msg = f"Failed to configure policer {policer_name} " \ + f"on host {node['host']}" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd, **args).get_reply(err_msg) - return reply['policer_index'] + return reply[u"policer_index"] @staticmethod def policer_classify_set_interface( @@ -196,13 +195,12 @@ class Policer(object): :type ip6_table_index: int :type l2_table_index: int """ - if isinstance(interface, basestring): + if isinstance(interface, str): sw_if_index = Topology.get_interface_sw_index(node, interface) else: sw_if_index = interface - cmd = 'policer_classify_set_interface' - + cmd = u"policer_classify_set_interface" args = dict( is_add=int(is_add), sw_if_index=sw_if_index, @@ -210,8 +208,8 @@ class Policer(object): ip6_table_index=int(ip6_table_index), l2_table_index=int(l2_table_index) ) - err_msg = 'Failed to set/unset policer classify interface {ifc} ' \ - 'on host {host}'.format(ifc=interface, host=node['host']) + err_msg = f"Failed to set/unset policer classify interface " \ + f"{interface} on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -236,4 +234,4 @@ class Policer(object): :returns: DSCP numeric value. :rtype: int """ - return getattr(DSCP, 'D_{dscp}'.format(dscp=dscp.upper())).value + return getattr(DSCP, f"D_{dscp.upper()}").value diff --git a/resources/libraries/python/PythonThree.py b/resources/libraries/python/PythonThree.py deleted file mode 100644 index 6ecea80173..0000000000 --- a/resources/libraries/python/PythonThree.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library holding utility functions to be replaced by later Python builtins.""" - -from robot.api import logger - - -def raise_from(raising, excepted, level="WARN"): - """Function to be replaced by "raise from" in Python 3. - - Neither "six" nor "future" offer good enough implementation right now. - chezsoi.org/lucas/blog/displaying-chained-exceptions-stacktraces-in-python-2 - - Current implementation just logs the excepted error, and raises the new one. - For allower log level values, see: - robot-framework.readthedocs.io/en/latest/autodoc/robot.api.html#log-levels - - :param raising: The exception to raise. - :param excepted: The exception we excepted and want to log. - :param level: Robot logger logging level to log with. - :type raising: BaseException - :type excepted: BaseException - :type level: str - :raises: raising - """ - logger.write("Excepted: {exc!r}\nRaising: {rai!r}".format( - exc=excepted, rai=raising), level) - raise raising diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py index ad205c87a5..cfc4fbd7c9 100644 --- a/resources/libraries/python/QemuManager.py +++ b/resources/libraries/python/QemuManager.py @@ -20,14 +20,14 @@ from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.QemuUtils import QemuUtils from resources.libraries.python.topology import NodeType, Topology -__all__ = ["QemuManager"] +__all__ = [u"QemuManager"] -class QemuManager(object): +class QemuManager: """QEMU lifecycle management class""" # Use one instance of class per tests. - ROBOT_LIBRARY_SCOPE = 'TEST CASE' + ROBOT_LIBRARY_SCOPE = u"TEST CASE" def __init__(self, nodes): """Init QemuManager object.""" @@ -46,57 +46,62 @@ class QemuManager(object): :param kwargs: Named parameters. :type kwargs: dict """ - node = kwargs['node'] - nf_chains = int(kwargs['nf_chains']) - nf_nodes = int(kwargs['nf_nodes']) - queues = kwargs['rxq_count_int'] if kwargs['auto_scale'] else 1 - vs_dtc = kwargs['vs_dtc'] - nf_dtc = kwargs['vs_dtc'] if kwargs['auto_scale'] else kwargs['nf_dtc'] - nf_dtcr = kwargs['nf_dtcr'] if isinstance(kwargs['nf_dtcr'], int) else 2 + node = kwargs[u"node"] + nf_chains = int(kwargs[u"nf_chains"]) + nf_nodes = int(kwargs[u"nf_nodes"]) + queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1 + vs_dtc = kwargs[u"vs_dtc"] + nf_dtc = kwargs[u"vs_dtc"] if kwargs[u"auto_scale"] \ + else kwargs[u"nf_dtc"] + nf_dtcr = kwargs[u"nf_dtcr"] \ + if isinstance(kwargs[u"nf_dtcr"], int) else 2 img = Constants.QEMU_VM_KERNEL for nf_chain in range(1, nf_chains + 1): for nf_node in range(1, nf_nodes + 1): qemu_id = (nf_chain - 1) * nf_nodes + nf_node - name = '{node}_{qemu_id}'.format(node=node, qemu_id=qemu_id) - sock1 = '/var/run/vpp/sock-{qemu_id}-1'.format(qemu_id=qemu_id) - sock2 = '/var/run/vpp/sock-{qemu_id}-2'.format(qemu_id=qemu_id) + name = f"{node}_{qemu_id}" + sock1 = f"/var/run/vpp/sock-{qemu_id}-1" + sock2 = f"/var/run/vpp/sock-{qemu_id}-2" + idx1 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1 vif1_mac = Topology.get_interface_mac( - self.nodes[node], 'vhost{idx}'.format( - idx=(nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1)) \ - if kwargs['vnf'] == 'testpmd_mac' \ - else kwargs['tg_if1_mac'] if nf_node == 1 \ - else '52:54:00:00:{id:02x}:02'.format(id=qemu_id - 1) + self.nodes[node], f"vhost{idx1}" + ) if kwargs[u"vnf"] == u"testpmd_mac" \ + else kwargs[u"tg_if1_mac"] if nf_node == 1 \ + else f"52:54:00:00:{(qemu_id - 1):02x}:02" + idx2 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2 vif2_mac = Topology.get_interface_mac( - self.nodes[node], 'vhost{idx}'.format( - idx=(nf_chain - 1) * nf_nodes * 2 + nf_node * 2)) \ - if kwargs['vnf'] == 'testpmd_mac' \ - else kwargs['tg_if2_mac'] if nf_node == nf_nodes \ - else '52:54:00:00:{id:02x}:01'.format(id=qemu_id + 1) + self.nodes[node], f"vhost{idx2}" + ) if kwargs[u"vnf"] == u"testpmd_mac" \ + else kwargs[u"tg_if2_mac"] if nf_node == nf_nodes \ + else f"52:54:00:00:{(qemu_id + 1):02x}:01" self.machines_affinity[name] = CpuUtils.get_affinity_nf( nodes=self.nodes, node=node, nf_chains=nf_chains, nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node, - vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr) + vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr + ) self.machines[name] = QemuUtils( node=self.nodes[node], qemu_id=qemu_id, smp=len(self.machines_affinity[name]), mem=4096, - vnf=kwargs['vnf'], img=img) + vnf=kwargs[u"vnf"], img=img + ) self.machines[name].configure_kernelvm_vnf( - mac1='52:54:00:00:{id:02x}:01'.format(id=qemu_id), - mac2='52:54:00:00:{id:02x}:02'.format(id=qemu_id), - vif1_mac=vif1_mac, - vif2_mac=vif2_mac, - queues=queues, - jumbo_frames=kwargs['jumbo']) + mac1=f"52:54:00:00:{qemu_id:02x}:01", + mac2=f"52:54:00:00:{qemu_id:02x}:02", + vif1_mac=vif1_mac, vif2_mac=vif2_mac, queues=queues, + jumbo_frames=kwargs[u"jumbo"] + ) self.machines[name].qemu_add_vhost_user_if( - sock1, jumbo_frames=kwargs['jumbo'], queues=queues, - queue_size=kwargs['perf_qemu_qsz']) + sock1, jumbo_frames=kwargs[u"jumbo"], queues=queues, + queue_size=kwargs[u"perf_qemu_qsz"] + ) self.machines[name].qemu_add_vhost_user_if( - sock2, jumbo_frames=kwargs['jumbo'], queues=queues, - queue_size=kwargs['perf_qemu_qsz']) + sock2, jumbo_frames=kwargs[u"jumbo"], queues=queues, + queue_size=kwargs[u"perf_qemu_qsz"] + ) def construct_vms_on_all_nodes(self, **kwargs): """Construct 1..Mx1..N VMs(s) with specified name on all nodes. @@ -106,7 +111,7 @@ class QemuManager(object): """ self.initialize() for node in self.nodes: - if self.nodes[node]['type'] == NodeType.DUT: + if self.nodes[node][u"type"] == NodeType.DUT: self.construct_vms_on_node(node=node, **kwargs) def start_all_vms(self, pinning=False): @@ -115,8 +120,8 @@ class QemuManager(object): :param pinning: If True, then do also QEMU process pinning. :type pinning: bool """ - for machine, machine_affinity in zip(self.machines.values(), - self.machines_affinity.values()): + for machine, machine_affinity in \ + zip(self.machines.values(), self.machines_affinity.values()): machine.qemu_start() if pinning: machine.qemu_set_affinity(*machine_affinity) diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py index 17b5eabdc4..96b4ebdaf4 100644 --- a/resources/libraries/python/QemuUtils.py +++ b/resources/libraries/python/QemuUtils.py @@ -14,31 +14,34 @@ """QEMU utilities library.""" import json + from re import match from string import Template from time import sleep from robot.api import logger + from resources.libraries.python.Constants import Constants from resources.libraries.python.DpdkUtil import DpdkUtil from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.OptionString import OptionString -from resources.libraries.python.VppConfigGenerator import VppConfigGenerator -from resources.libraries.python.VPPUtil import VPPUtil from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error from resources.libraries.python.topology import NodeType, Topology +from resources.libraries.python.VppConfigGenerator import VppConfigGenerator +from resources.libraries.python.VPPUtil import VPPUtil -__all__ = ["QemuUtils"] +__all__ = [u"QemuUtils"] -class QemuUtils(object): +class QemuUtils: """QEMU utilities.""" # Use one instance of class per tests. - ROBOT_LIBRARY_SCOPE = 'TEST CASE' + ROBOT_LIBRARY_SCOPE = u"TEST CASE" - def __init__(self, node, qemu_id=1, smp=1, mem=512, vnf=None, - img=Constants.QEMU_VM_IMAGE): + def __init__( + self, node, qemu_id=1, smp=1, mem=512, vnf=None, + img=Constants.QEMU_VM_IMAGE): """Initialize QemuUtil class. :param node: Node to run QEMU on. @@ -57,138 +60,143 @@ class QemuUtils(object): self._vhost_id = 0 self._node = node self._arch = Topology.get_node_arch(self._node) - dpdk_target = 'arm64-armv8a' if self._arch == 'aarch64' \ - else 'x86_64-native' - self._testpmd_path = '{path}/{dpdk_target}-linuxapp-gcc/app'\ - .format(path=Constants.QEMU_VM_DPDK, dpdk_target=dpdk_target) + dpdk_target = u"arm64-armv8a" if self._arch == u"aarch64" \ + else u"x86_64-native" + self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \ + f"{dpdk_target}-linuxapp-gcc/app" self._vm_info = { - 'host': node['host'], - 'type': NodeType.VM, - 'port': 10021 + qemu_id, - 'serial': 4555 + qemu_id, - 'username': 'cisco', - 'password': 'cisco', - 'interfaces': {}, + u"host": node[u"host"], + u"type": NodeType.VM, + u"port": 10021 + qemu_id, + u"serial": 4555 + qemu_id, + u"username": 'cisco', + u"password": 'cisco', + u"interfaces": {}, } - if node['port'] != 22: - self._vm_info['host_port'] = node['port'] - self._vm_info['host_username'] = node['username'] - self._vm_info['host_password'] = node['password'] + if node[u"port"] != 22: + self._vm_info[u"host_port"] = node[u"port"] + self._vm_info[u"host_username"] = node[u"username"] + self._vm_info[u"host_password"] = node[u"password"] # Input Options. self._opt = dict() - self._opt['qemu_id'] = qemu_id - self._opt['mem'] = int(mem) - self._opt['smp'] = int(smp) - self._opt['img'] = img - self._opt['vnf'] = vnf + self._opt[u"qemu_id"] = qemu_id + self._opt[u"mem"] = int(mem) + self._opt[u"smp"] = int(smp) + self._opt[u"img"] = img + self._opt[u"vnf"] = vnf # Temporary files. self._temp = dict() - self._temp['pidfile'] = '/var/run/qemu_{id}.pid'.format(id=qemu_id) + self._temp[u"pidfile"] = f"/var/run/qemu_{qemu_id}.pid" if img == Constants.QEMU_VM_IMAGE: - self._opt['vm_type'] = 'nestedvm' - self._temp['qmp'] = '/var/run/qmp_{id}.sock'.format(id=qemu_id) - self._temp['qga'] = '/var/run/qga_{id}.sock'.format(id=qemu_id) + self._opt[u"vm_type"] = u"nestedvm" + self._temp[u"qmp"] = f"/var/run/qmp_{qemu_id}.sock" + self._temp[u"qga"] = f"/var/run/qga_{qemu_id}.sock" elif img == Constants.QEMU_VM_KERNEL: - self._opt['img'], _ = exec_cmd_no_error( - node, - 'ls -1 {img}* | tail -1'.format(img=Constants.QEMU_VM_KERNEL), - message='Qemu Kernel VM image not found!') - self._opt['vm_type'] = 'kernelvm' - self._temp['log'] = '/tmp/serial_{id}.log'.format(id=qemu_id) - self._temp['ini'] = '/etc/vm_init_{id}.conf'.format(id=qemu_id) - self._opt['initrd'], _ = exec_cmd_no_error( - node, - 'ls -1 {initrd}* | tail -1'.format( - initrd=Constants.QEMU_VM_KERNEL_INITRD), - message='Qemu Kernel initrd image not found!') + self._opt[u"img"], _ = exec_cmd_no_error( + node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1", + message=u"Qemu Kernel VM image not found!" + ) + self._opt[u"vm_type"] = u"kernelvm" + self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log" + self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf" + self._opt[u"initrd"], _ = exec_cmd_no_error( + node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1", + message=u"Qemu Kernel initrd image not found!" + ) else: - raise RuntimeError('QEMU: Unknown VM image option: {}'.format(img)) + raise RuntimeError(f"QEMU: Unknown VM image option: {img}") # Computed parameters for QEMU command line. - self._params = OptionString(prefix='-') + self._params = OptionString(prefix=u"-") self.add_params() def add_params(self): """Set QEMU command line parameters.""" self.add_default_params() - if self._opt.get('vm_type', '') == 'nestedvm': + if self._opt.get(u"vm_type", u"") == u"nestedvm": self.add_nestedvm_params() - elif self._opt.get('vm_type', '') == 'kernelvm': + elif self._opt.get(u"vm_type", u"") == u"kernelvm": self.add_kernelvm_params() else: - raise RuntimeError('QEMU: Unsupported VM type!') + raise RuntimeError(u"QEMU: Unsupported VM type!") def add_default_params(self): """Set default QEMU command line parameters.""" - self._params.add('daemonize') - self._params.add('nodefaults') - self._params.add_with_value('name', 'vnf{qemu},debug-threads=on'.format( - qemu=self._opt.get('qemu_id'))) - self._params.add('no-user-config') - self._params.add_with_value('monitor', 'none') - self._params.add_with_value('display', 'none') - self._params.add_with_value('vga', 'none') - self._params.add('enable-kvm') - self._params.add_with_value('pidfile', self._temp.get('pidfile')) - self._params.add_with_value('cpu', 'host') - - if self._arch == 'aarch64': - machine_args = 'virt,accel=kvm,usb=off,mem-merge=off,gic-version=3' - else: - machine_args = 'pc,accel=kvm,usb=off,mem-merge=off' + self._params.add(u"daemonize") + self._params.add(u"nodefaults") self._params.add_with_value( - 'machine', machine_args) - self._params.add_with_value( - 'smp', '{smp},sockets=1,cores={smp},threads=1'.format( - smp=self._opt.get('smp'))) + u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on" + ) + self._params.add(u"no-user-config") + self._params.add_with_value(u"monitor", u"none") + self._params.add_with_value(u"display", u"none") + self._params.add_with_value(u"vga", u"none") + self._params.add(u"enable-kvm") + self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile")) + self._params.add_with_value(u"cpu", u"host") + + if self._arch == u"aarch64": + machine_args = u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3" + else: + machine_args = u"pc,accel=kvm,usb=off,mem-merge=off" + self._params.add_with_value(u"machine", machine_args) self._params.add_with_value( - 'object', 'memory-backend-file,id=mem,size={mem}M,' - 'mem-path=/dev/hugepages,share=on'.format(mem=self._opt.get('mem'))) + u"smp", f"{self._opt.get(u'smp')},sockets=1," + f"cores={self._opt.get(u'smp')},threads=1" + ) self._params.add_with_value( - 'm', '{mem}M'.format(mem=self._opt.get('mem'))) - self._params.add_with_value('numa', 'node,memdev=mem') - self._params.add_with_value('balloon', 'none') + u"object", f"memory-backend-file,id=mem," + f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on" + ) + self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M") + self._params.add_with_value(u"numa", u"node,memdev=mem") + self._params.add_with_value(u"balloon", u"none") def add_nestedvm_params(self): """Set NestedVM QEMU parameters.""" self._params.add_with_value( - 'net', 'nic,macaddr=52:54:00:00:{qemu:02x}:ff'.format( - qemu=self._opt.get('qemu_id'))) + u"net", + f"nic,macaddr=52:54:00:00:{self._opt.get(u'qemu_id'):02x}:ff" + ) self._params.add_with_value( - 'net', 'user,hostfwd=tcp::{info[port]}-:22'.format( - info=self._vm_info)) - locking = ',file.locking=off' + u"net", f"user,hostfwd=tcp::{self._vm_info[u'port']}-:22" + ) + locking = u",file.locking=off" self._params.add_with_value( - 'drive', 'file={img},format=raw,cache=none,if=virtio{locking}'. - format(img=self._opt.get('img'), locking=locking)) + u"drive", f"file={self._opt.get(u'img')}," + f"format=raw,cache=none,if=virtio{locking}" + ) self._params.add_with_value( - 'qmp', 'unix:{qmp},server,nowait'.format(qmp=self._temp.get('qmp'))) + u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait" + ) self._params.add_with_value( - 'chardev', 'socket,host=127.0.0.1,port={info[serial]},' - 'id=gnc0,server,nowait'.format(info=self._vm_info)) - self._params.add_with_value('device', 'isa-serial,chardev=gnc0') + u"chardev", f"socket,host=127.0.0.1," + f"port={self._vm_info[u'serial']},id=gnc0,server,nowait") + self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0") self._params.add_with_value( - 'chardev', 'socket,path={qga},server,nowait,id=qga0'.format( - qga=self._temp.get('qga'))) - self._params.add_with_value('device', 'isa-serial,chardev=qga0') + u"chardev", f"socket,path={self._temp.get(u'qga')}," + f"server,nowait,id=qga0" + ) + self._params.add_with_value(u"device", u"isa-serial,chardev=qga0") def add_kernelvm_params(self): """Set KernelVM QEMU parameters.""" - console = 'ttyAMA0' if self._arch == 'aarch64' else 'ttyS0' - self._params.add_with_value('serial', 'file:{log}'.format( - log=self._temp.get('log'))) + console = u"ttyAMA0" if self._arch == u"aarch64" else u"ttyS0" self._params.add_with_value( - 'fsdev', 'local,id=root9p,path=/,security_model=none') + u"serial", f"file:{self._temp.get(u'log')}" + ) self._params.add_with_value( - 'device', 'virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot') + u"fsdev", u"local,id=root9p,path=/,security_model=none" + ) self._params.add_with_value( - 'kernel', '{img}'.format(img=self._opt.get('img'))) + u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot" + ) + self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}") + self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}") self._params.add_with_value( - 'initrd', '{initrd}'.format(initrd=self._opt.get('initrd'))) - self._params.add_with_value( - 'append', '"ro rootfstype=9p rootflags=trans=virtio ' - 'root=virtioroot console={console} tsc=reliable ' - 'hugepages=256 init={init} fastboot"'.format( - console=console, init=self._temp.get('ini'))) + u"append", f"'ro rootfstype=9p rootflags=trans=virtio " + f"root=virtioroot console={console} tsc=reliable hugepages=256 " + f"init={self._temp.get(u'ini')} fastboot'" + ) def create_kernelvm_config_vpp(self, **kwargs): """Create QEMU VPP config files. @@ -197,15 +205,12 @@ class QemuUtils(object): file. :type kwargs: dict """ - startup = ('/etc/vpp/vm_startup_{id}.conf'. - format(id=self._opt.get('qemu_id'))) - running = ('/etc/vpp/vm_running_{id}.exec'. - format(id=self._opt.get('qemu_id'))) + startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf" + running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec" - self._temp['startup'] = startup - self._temp['running'] = running - self._opt['vnf_bin'] = ('/usr/bin/vpp -c {startup}'. - format(startup=startup)) + self._temp[u"startup"] = startup + self._temp[u"running"] = running + self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}" # Create VPP startup configuration. vpp_config = VppConfigGenerator() @@ -214,31 +219,29 @@ class QemuUtils(object): vpp_config.add_unix_cli_listen() vpp_config.add_unix_exec(running) vpp_config.add_socksvr() - vpp_config.add_cpu_main_core('0') - if self._opt.get('smp') > 1: - vpp_config.add_cpu_corelist_workers('1-{smp}'.format( - smp=self._opt.get('smp')-1)) - vpp_config.add_dpdk_dev('0000:00:06.0', '0000:00:07.0') - vpp_config.add_dpdk_dev_default_rxq(kwargs['queues']) - vpp_config.add_dpdk_log_level('debug') - if not kwargs['jumbo_frames']: + vpp_config.add_cpu_main_core(u"0") + if self._opt.get(u"smp") > 1: + vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}") + vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0") + vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"]) + vpp_config.add_dpdk_log_level(u"debug") + if not kwargs[u"jumbo_frames"]: vpp_config.add_dpdk_no_multi_seg() vpp_config.add_dpdk_no_tx_checksum_offload() - vpp_config.add_plugin('disable', 'default') - vpp_config.add_plugin('enable', 'dpdk_plugin.so') + vpp_config.add_plugin(u"disable", [u"default"]) + vpp_config.add_plugin(u"enable", [u"dpdk_plugin.so"]) vpp_config.write_config(startup) # Create VPP running configuration. - template = '{res}/{tpl}.exec'.format(res=Constants.RESOURCES_TPL_VM, - tpl=self._opt.get('vnf')) - exec_cmd_no_error(self._node, 'rm -f {running}'.format(running=running), - sudo=True) + template = f"{Constants.RESOURCES_TPL_VM}/{self._opt.get(u'vnf')}.exec" + exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True) - with open(template, 'r') as src_file: + with open(template, "r") as src_file: src = Template(src_file.read()) exec_cmd_no_error( - self._node, "echo '{out}' | sudo tee {running}".format( - out=src.safe_substitute(**kwargs), running=running)) + self._node, f"echo '{src.safe_substitute(**kwargs)}' | " + f"sudo tee {running}" + ) def create_kernelvm_config_testpmd_io(self, **kwargs): """Create QEMU testpmd-io command line. @@ -247,19 +250,18 @@ class QemuUtils(object): :type kwargs: dict """ testpmd_cmd = DpdkUtil.get_testpmd_cmdline( - eal_corelist='0-{smp}'.format(smp=self._opt.get('smp') - 1), + eal_corelist=f"0-{self._opt.get(u'smp') - 1}", eal_driver=False, eal_in_memory=True, pmd_num_mbufs=16384, - pmd_rxq=kwargs['queues'], - pmd_txq=kwargs['queues'], + pmd_rxq=kwargs[u"queues"], + pmd_txq=kwargs[u"queues"], pmd_tx_offloads='0x0', pmd_disable_hw_vlan=False, - pmd_nb_cores=str(self._opt.get('smp') - 1)) + pmd_nb_cores=str(self._opt.get(u"smp") - 1) + ) - self._opt['vnf_bin'] = ('{testpmd_path}/{testpmd_cmd}'. - format(testpmd_path=self._testpmd_path, - testpmd_cmd=testpmd_cmd)) + self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}" def create_kernelvm_config_testpmd_mac(self, **kwargs): """Create QEMU testpmd-mac command line. @@ -268,22 +270,21 @@ class QemuUtils(object): :type kwargs: dict """ testpmd_cmd = DpdkUtil.get_testpmd_cmdline( - eal_corelist='0-{smp}'.format(smp=self._opt.get('smp') - 1), + eal_corelist=f"0-{self._opt.get(u'smp') - 1}", eal_driver=False, eal_in_memory=True, pmd_num_mbufs=16384, - pmd_fwd_mode='mac', - pmd_eth_peer_0='0,{mac}'.format(mac=kwargs['vif1_mac']), - pmd_eth_peer_1='1,{mac}'.format(mac=kwargs['vif2_mac']), - pmd_rxq=kwargs['queues'], - pmd_txq=kwargs['queues'], - pmd_tx_offloads='0x0', + pmd_fwd_mode=u"mac", + pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}", + pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}", + pmd_rxq=kwargs[u"queues"], + pmd_txq=kwargs[u"queues"], + pmd_tx_offloads=u"0x0", pmd_disable_hw_vlan=False, - pmd_nb_cores=str(self._opt.get('smp') - 1)) + pmd_nb_cores=str(self._opt.get(u"smp") - 1) + ) - self._opt['vnf_bin'] = ('{testpmd_path}/{testpmd_cmd}'. - format(testpmd_path=self._testpmd_path, - testpmd_cmd=testpmd_cmd)) + self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}" def create_kernelvm_init(self, **kwargs): """Create QEMU init script. @@ -291,18 +292,17 @@ class QemuUtils(object): :param kwargs: Key-value pairs to replace content of init startup file. :type kwargs: dict """ - template = '{res}/init.sh'.format(res=Constants.RESOURCES_TPL_VM) - init = self._temp.get('ini') - exec_cmd_no_error( - self._node, 'rm -f {init}'.format(init=init), sudo=True) + template = f"{Constants.RESOURCES_TPL_VM}/init.sh" + init = self._temp.get(u"ini") + exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True) - with open(template, 'r') as src_file: + with open(template, "r") as src_file: src = Template(src_file.read()) exec_cmd_no_error( - self._node, "echo '{out}' | sudo tee {init}".format( - out=src.safe_substitute(**kwargs), init=init)) - exec_cmd_no_error( - self._node, "chmod +x {init}".format(init=init), sudo=True) + self._node, f"echo '{src.safe_substitute(**kwargs)}' | " + f"sudo tee {init}" + ) + exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True) def configure_kernelvm_vnf(self, **kwargs): """Create KernelVM VNF configurations. @@ -310,15 +310,15 @@ class QemuUtils(object): :param kwargs: Key-value pairs for templating configs. :type kwargs: dict """ - if 'vpp' in self._opt.get('vnf'): + if u"vpp" in self._opt.get(u"vnf"): self.create_kernelvm_config_vpp(**kwargs) - elif 'testpmd_io' in self._opt.get('vnf'): + elif u"testpmd_io" in self._opt.get(u"vnf"): self.create_kernelvm_config_testpmd_io(**kwargs) - elif 'testpmd_mac' in self._opt.get('vnf'): + elif u"testpmd_mac" in self._opt.get(u"vnf"): self.create_kernelvm_config_testpmd_mac(**kwargs) else: - raise RuntimeError('QEMU: Unsupported VNF!') - self.create_kernelvm_init(vnf_bin=self._opt['vnf_bin']) + raise RuntimeError(u"QEMU: Unsupported VNF!") + self.create_kernelvm_init(vnf_bin=self._opt[u"vnf_bin"]) def get_qemu_pids(self): """Get QEMU CPU pids. @@ -326,9 +326,9 @@ class QemuUtils(object): :returns: List of QEMU CPU pids. :rtype: list of str """ - command = ("grep -rwl 'CPU' /proc/$(sudo cat {pidfile})/task/*/comm ". - format(pidfile=self._temp.get('pidfile'))) - command += (r"| xargs dirname | sed -e 's/\/.*\///g' | uniq") + command = f"grep -rwl 'CPU' /proc/$(sudo cat " \ + f"{self._temp.get(u'pidfile')})/task/*/comm " + command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq" stdout, _ = exec_cmd_no_error(self._node, command) return stdout.splitlines() @@ -349,19 +349,19 @@ class QemuUtils(object): sleep(1) continue for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus): - command = ('taskset -pc {host_cpu} {thread}'. - format(host_cpu=host_cpu, thread=qemu_cpu)) - message = ('QEMU: Set affinity failed on {host}!'. - format(host=self._node['host'])) - exec_cmd_no_error(self._node, command, sudo=True, - message=message) + command = f"taskset -pc {host_cpu} {qemu_cpu}" + message = f"QEMU: Set affinity failed " \ + f"on {self._node[u'host']}!" + exec_cmd_no_error( + self._node, command, sudo=True, message=message + ) break except (RuntimeError, ValueError): self.qemu_kill_all() raise else: self.qemu_kill_all() - raise RuntimeError('Failed to set Qemu threads affinity!') + raise RuntimeError(u"Failed to set Qemu threads affinity!") def qemu_set_scheduler_policy(self): """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU @@ -373,18 +373,18 @@ class QemuUtils(object): qemu_cpus = self.get_qemu_pids() for qemu_cpu in qemu_cpus: - command = ('chrt -r -p 1 {thread}'. - format(thread=qemu_cpu)) - message = ('QEMU: Set SCHED_RR failed on {host}'. - format(host=self._node['host'])) - exec_cmd_no_error(self._node, command, sudo=True, - message=message) + command = f"chrt -r -p 1 {qemu_cpu}" + message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}" + exec_cmd_no_error( + self._node, command, sudo=True, message=message + ) except (RuntimeError, ValueError): self.qemu_kill_all() raise - def qemu_add_vhost_user_if(self, socket, server=True, jumbo_frames=False, - queue_size=None, queues=1): + def qemu_add_vhost_user_if( + self, socket, server=True, jumbo_frames=False, queue_size=None, + queues=1): """Add Vhost-user interface. :param socket: Path of the unix socket. @@ -400,30 +400,29 @@ class QemuUtils(object): """ self._vhost_id += 1 self._params.add_with_value( - 'chardev', 'socket,id=char{vhost},path={socket}{server}'.format( - vhost=self._vhost_id, socket=socket, - server=',server' if server is True else '')) + u"chardev", f"socket,id=char{self._vhost_id}," + f"path={socket}{u',server' if server is True else u''}" + ) self._params.add_with_value( - 'netdev', 'vhost-user,id=vhost{vhost},chardev=char{vhost},' - 'queues={queues}'.format(vhost=self._vhost_id, queues=queues)) - mac = ('52:54:00:00:{qemu:02x}:{vhost:02x}'. - format(qemu=self._opt.get('qemu_id'), vhost=self._vhost_id)) - queue_size = ('rx_queue_size={queue_size},tx_queue_size={queue_size}'. - format(queue_size=queue_size)) if queue_size else '' - mbuf = 'on,host_mtu=9200' + u"netdev", f"vhost-user,id=vhost{self._vhost_id}," + f"chardev=char{self._vhost_id},queues={queues}" + ) + mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \ + f"{self._vhost_id:02x}" + queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \ + if queue_size else u"" + mbuf = u"on,host_mtu=9200" self._params.add_with_value( - 'device', 'virtio-net-pci,netdev=vhost{vhost},mac={mac},' - 'addr={addr}.0,mq=on,vectors={vectors},csum=off,gso=off,' - 'guest_tso4=off,guest_tso6=off,guest_ecn=off,mrg_rxbuf={mbuf},' - '{queue_size}'.format( - addr=self._vhost_id+5, vhost=self._vhost_id, mac=mac, - mbuf=mbuf if jumbo_frames else 'off', queue_size=queue_size, - vectors=(2 * queues + 2))) + u"device", f"virtio-net-pci,netdev=vhost{self._vhost_id},mac={mac}," + f"addr={self._vhost_id+5}.0,mq=on,vectors={2 * queues + 2}," + f"csum=off,gso=off,guest_tso4=off,guest_tso6=off,guest_ecn=off," + f"mrg_rxbuf={mbuf if jumbo_frames else u'off'},{queue_size}" + ) # Add interface MAC and socket to the node dict. - if_data = {'mac_address': mac, 'socket': socket} - if_name = 'vhost{vhost}'.format(vhost=self._vhost_id) - self._vm_info['interfaces'][if_name] = if_data + if_data = {u"mac_address": mac, u"socket": socket} + if_name = f"vhost{self._vhost_id}" + self._vm_info[u"interfaces"][if_name] = if_data # Add socket to temporary file list. self._temp[if_name] = socket @@ -439,32 +438,32 @@ class QemuUtils(object): response will contain the "error" keyword instead of "return". """ # To enter command mode, the qmp_capabilities command must be issued. - command = ('echo "{{ \\"execute\\": \\"qmp_capabilities\\" }}' - '{{ \\"execute\\": \\"{cmd}\\" }}" | ' - 'sudo -S socat - UNIX-CONNECT:{qmp}'. - format(cmd=cmd, qmp=self._temp.get('qmp'))) - message = ('QMP execute "{cmd}" failed on {host}'. - format(cmd=cmd, host=self._node['host'])) + command = f"echo \"{{{{ \\\"execute\\\": " \ + f"\\\"qmp_capabilities\\\" }}}}" \ + f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \ + f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}" + message = f"QMP execute '{cmd}' failed on {self._node[u'host']}" + stdout, _ = exec_cmd_no_error( - self._node, command, sudo=False, message=message) + self._node, command, sudo=False, message=message + ) # Skip capabilities negotiation messages. out_list = stdout.splitlines() if len(out_list) < 3: - raise RuntimeError( - 'Invalid QMP output on {host}'.format(host=self._node['host'])) + raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}") return json.loads(out_list[2]) def _qemu_qga_flush(self): """Flush the QGA parser state.""" - command = ('(printf "\xFF"; sleep 1) | ' - 'sudo -S socat - UNIX-CONNECT:{qga}'. - format(qga=self._temp.get('qga'))) - message = ('QGA flush failed on {host}'.format(host=self._node['host'])) + command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \ + f"- UNIX-CONNECT:{self._temp.get(u'qga')}" + message = f"QGA flush failed on {self._node[u'host']}" stdout, _ = exec_cmd_no_error( - self._node, command, sudo=False, message=message) + self._node, command, sudo=False, message=message + ) - return json.loads(stdout.split('\n', 1)[0]) if stdout else dict() + return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict() def _qemu_qga_exec(self, cmd): """Execute QGA command. @@ -474,25 +473,25 @@ class QemuUtils(object): :param cmd: QGA command to execute. :type cmd: str """ - command = ('(echo "{{ \\"execute\\": \\"{cmd}\\" }}"; sleep 1) | ' - 'sudo -S socat - UNIX-CONNECT:{qga}'. - format(cmd=cmd, qga=self._temp.get('qga'))) - message = ('QGA execute "{cmd}" failed on {host}'. - format(cmd=cmd, host=self._node['host'])) + command = f"(echo \"{{{{ \\\"execute\\\": " \ + f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \ + f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}" + message = f"QGA execute '{cmd}' failed on {self._node[u'host']}" stdout, _ = exec_cmd_no_error( - self._node, command, sudo=False, message=message) + self._node, command, sudo=False, message=message + ) - return json.loads(stdout.split('\n', 1)[0]) if stdout else dict() + return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict() def _wait_until_vm_boot(self): """Wait until QEMU with NestedVM is booted.""" - if self._opt.get('vm_type') == 'nestedvm': + if self._opt.get(u"vm_type") == u"nestedvm": self._wait_until_nestedvm_boot() self._update_vm_interfaces() - elif self._opt.get('vm_type') == 'kernelvm': + elif self._opt.get(u"vm_type") == u"kernelvm": self._wait_until_kernelvm_boot() else: - raise RuntimeError('QEMU: Unsupported VM type!') + raise RuntimeError(u"QEMU: Unsupported VM type!") def _wait_until_nestedvm_boot(self, retries=12): """Wait until QEMU with NestedVM is booted. @@ -508,40 +507,39 @@ class QemuUtils(object): try: out = self._qemu_qga_flush() except ValueError: - logger.trace('QGA qga flush unexpected output {out}'. - format(out=out)) + logger.trace(f"QGA qga flush unexpected output {out}") # Empty output - VM not booted yet if not out: sleep(5) else: break else: - raise RuntimeError('QEMU: Timeout, VM not booted on {host}!'. - format(host=self._node['host'])) + raise RuntimeError( + f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" + ) for _ in range(retries): out = None try: - out = self._qemu_qga_exec('guest-ping') + out = self._qemu_qga_exec(u"guest-ping") except ValueError: - logger.trace('QGA guest-ping unexpected output {out}'. - format(out=out)) + logger.trace(f"QGA guest-ping unexpected output {out}") # Empty output - VM not booted yet. if not out: sleep(5) # Non-error return - VM booted. - elif out.get('return') is not None: + elif out.get(u"return") is not None: break # Skip error and wait. - elif out.get('error') is not None: + elif out.get(u"error") is not None: sleep(5) else: # If there is an unexpected output from QGA guest-info, try # again until timeout. - logger.trace('QGA guest-ping unexpected output {out}'. - format(out=out)) + logger.trace(f"QGA guest-ping unexpected output {out}") else: - raise RuntimeError('QEMU: Timeout, VM not booted on {host}!'. - format(host=self._node['host'])) + raise RuntimeError( + f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" + ) def _wait_until_kernelvm_boot(self, retries=60): """Wait until QEMU KernelVM is booted. @@ -552,47 +550,49 @@ class QemuUtils(object): vpp_ver = VPPUtil.vpp_show_version(self._node) for _ in range(retries): - command = ('tail -1 {log}'.format(log=self._temp.get('log'))) + command = f"tail -1 {self._temp.get(u'log')}" stdout = None try: stdout, _ = exec_cmd_no_error(self._node, command, sudo=True) sleep(1) except RuntimeError: pass - if vpp_ver in stdout or 'Press enter to exit' in stdout: + if vpp_ver in stdout or u"Press enter to exit" in stdout: break - if 'reboot: Power down' in stdout: - raise RuntimeError('QEMU: NF failed to run on {host}!'. - format(host=self._node['host'])) + if u"reboot: Power down" in stdout: + raise RuntimeError( + f"QEMU: NF failed to run on {self._node[u'host']}!" + ) else: - raise RuntimeError('QEMU: Timeout, VM not booted on {host}!'. - format(host=self._node['host'])) + raise RuntimeError( + f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" + ) def _update_vm_interfaces(self): """Update interface names in VM node dict.""" # Send guest-network-get-interfaces command via QGA, output example: # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"}, # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}. - out = self._qemu_qga_exec('guest-network-get-interfaces') - interfaces = out.get('return') + out = self._qemu_qga_exec(u"guest-network-get-interfaces") + interfaces = out.get(u"return") mac_name = {} if not interfaces: - raise RuntimeError('Get VM interface list failed on {host}'. - format(host=self._node['host'])) + raise RuntimeError( + f"Get VM interface list failed on {self._node[u'host']}" + ) # Create MAC-name dict. for interface in interfaces: - if 'hardware-address' not in interface: + if u"hardware-address" not in interface: continue - mac_name[interface['hardware-address']] = interface['name'] + mac_name[interface[u"hardware-address"]] = interface[u"name"] # Match interface by MAC and save interface name. - for interface in self._vm_info['interfaces'].values(): - mac = interface.get('mac_address') + for interface in self._vm_info[u"interfaces"].values(): + mac = interface.get(u"mac_address") if_name = mac_name.get(mac) if if_name is None: - logger.trace( - 'Interface name for MAC {mac} not found'.format(mac=mac)) + logger.trace(f"Interface name for MAC {mac} not found") else: - interface['name'] = if_name + interface[u"name"] = if_name def qemu_start(self): """Start QEMU and wait until VM boot. @@ -601,17 +601,16 @@ class QemuUtils(object): :rtype: dict """ cmd_opts = OptionString() - cmd_opts.add('{bin_path}/qemu-system-{arch}'.format( - bin_path=Constants.QEMU_BIN_PATH, arch=self._arch)) + cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}") cmd_opts.extend(self._params) - message = ('QEMU: Start failed on {host}!'. - format(host=self._node['host'])) + message = f"QEMU: Start failed on {self._node[u'host']}!" try: DUTSetup.check_huge_page( - self._node, '/dev/hugepages', self._opt.get('mem')) + self._node, u"/dev/hugepages", self._opt.get(u"mem")) exec_cmd_no_error( - self._node, cmd_opts, timeout=300, sudo=True, message=message) + self._node, cmd_opts, timeout=300, sudo=True, message=message + ) self._wait_until_vm_boot() except RuntimeError: self.qemu_kill_all() @@ -620,22 +619,25 @@ class QemuUtils(object): def qemu_kill(self): """Kill qemu process.""" - exec_cmd(self._node, 'chmod +r {pidfile}'. - format(pidfile=self._temp.get('pidfile')), sudo=True) - exec_cmd(self._node, 'kill -SIGKILL $(cat {pidfile})'. - format(pidfile=self._temp.get('pidfile')), sudo=True) + exec_cmd( + self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True + ) + exec_cmd( + self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})", + sudo=True + ) for value in self._temp.values(): - exec_cmd(self._node, 'cat {value}'.format(value=value), sudo=True) - exec_cmd(self._node, 'rm -f {value}'.format(value=value), sudo=True) + exec_cmd(self._node, f"cat {value}", sudo=True) + exec_cmd(self._node, f"rm -f {value}", sudo=True) def qemu_kill_all(self): """Kill all qemu processes on DUT node if specified.""" - exec_cmd(self._node, 'pkill -SIGKILL qemu', sudo=True) + exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True) for value in self._temp.values(): - exec_cmd(self._node, 'cat {value}'.format(value=value), sudo=True) - exec_cmd(self._node, 'rm -f {value}'.format(value=value), sudo=True) + exec_cmd(self._node, f"cat {value}", sudo=True) + exec_cmd(self._node, f"rm -f {value}", sudo=True) def qemu_version(self): """Return Qemu version. @@ -643,12 +645,11 @@ class QemuUtils(object): :returns: Qemu version. :rtype: str """ - command = ('{bin_path}/qemu-system-{arch} --version'.format( - bin_path=Constants.QEMU_BIN_PATH, - arch=self._arch)) + command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \ + f"--version" try: stdout, _ = exec_cmd_no_error(self._node, command, sudo=True) - return match(r'QEMU emulator version ([\d.]*)', stdout).group(1) + return match(r"QEMU emulator version ([\d.]*)", stdout).group(1) except RuntimeError: self.qemu_kill_all() raise diff --git a/resources/libraries/python/SRv6.py b/resources/libraries/python/SRv6.py index 24ebea7ccf..4d21733c28 100644 --- a/resources/libraries/python/SRv6.py +++ b/resources/libraries/python/SRv6.py @@ -14,6 +14,7 @@ """Segment Routing over IPv6 data plane utilities library.""" from enum import IntEnum + from ipaddress import ip_address, IPv6Address from resources.libraries.python.Constants import Constants @@ -52,12 +53,9 @@ class SRv6PolicySteeringTypes(IntEnum): SR_STEER_IPV6 = 6 -class SRv6(object): +class SRv6: """SRv6 class.""" - def __init__(self): - pass - @staticmethod def create_srv6_sid_object(ip_addr): """Create SRv6 SID object. @@ -67,7 +65,9 @@ class SRv6(object): :returns: SRv6 SID object. :rtype: dict """ - return dict(addr=IPv6Address(unicode(ip_addr)).packed) + return dict( + addr=IPv6Address(ip_addr).packed + ) @staticmethod def create_srv6_sid_list(sids): @@ -78,10 +78,16 @@ class SRv6(object): :returns: SRv6 SID list object. :rtype: list """ - sid_list = list(0 for _ in xrange(16)) - for i in xrange(len(sids)): - sid_list[i] = SRv6.create_srv6_sid_object(sids[i]) - return dict(num_sids=len(sids), weight=1, sids=sid_list) + sid_list = [0] * 16 + + for index, item in enumerate(sids): + sid_list[index] = SRv6.create_srv6_sid_object(item) + + return dict( + num_sids=len(sids), + weight=1, + sids=sid_list + ) @staticmethod def configure_sr_localsid( @@ -123,43 +129,42 @@ class SRv6(object): :type sid_list: list :raises ValueError: If required parameter is missing. """ - beh = behavior.replace('.', '_').upper() + beh = behavior.replace(u".", u"_").upper() # There is no SRv6Behaviour enum defined for functions from SRv6 plugins # so we need to use CLI command to configure it. - if beh in (getattr(SRv6Behavior, 'END_AD').name, - getattr(SRv6Behavior, 'END_AS').name, - getattr(SRv6Behavior, 'END_AM').name): - if beh == getattr(SRv6Behavior, 'END_AS').name: + if beh in (getattr(SRv6Behavior, u"END_AD").name, + getattr(SRv6Behavior, u"END_AS").name, + getattr(SRv6Behavior, u"END_AM").name): + if beh == getattr(SRv6Behavior, u"END_AS").name: if next_hop is None or out_if is None or in_if is None or \ src_addr is None or sid_list is None: raise ValueError( - 'Required parameter(s) missing.\n' - 'next_hop:{nh}\n' - 'out_if:{oif}\n' - 'in_if:{iif}\n' - 'src_addr:{saddr}\n' - 'sid_list:{sids}'.format( - nh=next_hop, oif=out_if, iif=in_if, saddr=src_addr, - sids=sid_list)) - sid_conf = 'next ' + ' next '.join(sid_list) - params = 'nh {nh} oif {oif} iif {iif} src {saddr} {sids}'.\ - format(nh=next_hop, oif=out_if, iif=in_if, saddr=src_addr, - sids=sid_conf) + f"Required parameter(s) missing.\n" + f"next_hop:{next_hop}\n " + f"out_if:{out_if}\n" + f"in_if:{in_if}\n" + f"src_addr:{src_addr}\n" + f"sid_list:{sid_list}" + ) + sid_conf = f"next {u' next '.join(sid_list)}" + params = f"nh {next_hop} oif {out_if} iif {in_if} " \ + f"src {src_addr} {sid_conf}" else: if next_hop is None or out_if is None or in_if is None: raise ValueError( - 'Required parameter(s) missing.\nnext_hop:{0}\n' - 'out_if:{1}\nin_if:{2}'.format(next_hop, out_if, in_if)) - params = 'nh {0} oif {1} iif {2}'.format( - next_hop, out_if, in_if) + f"Required parameter(s) missing.\n" + f"next_hop:{next_hop}\n" + f"out_if:{out_if}\n" + f"in_if:{in_if}" + ) + params = f"nh {next_hop} oif {out_if} iif {in_if}" - cli_cmd = 'sr localsid address {l_sid} behavior {beh} {params}'.\ - format(l_sid=local_sid, beh=behavior, params=params) + cli_cmd = f"sr localsid address {local_sid} behavior {behavior}" \ + f"{params}" PapiSocketExecutor.run_cli_cmd(node, cli_cmd) - return - cmd = 'sr_localsid_add_del' + cmd = u"sr_localsid_add_del" args = dict( is_del=0, localsid=SRv6.create_srv6_sid_object(local_sid), @@ -171,36 +176,41 @@ class SRv6(object): nh_addr6=0, nh_addr4=0 ) - err_msg = 'Failed to add SR localSID {lsid} on host {host}'.format( - lsid=local_sid, host=node['host']) - - if beh in (getattr(SRv6Behavior, 'END_X').name, - getattr(SRv6Behavior, 'END_DX4').name, - getattr(SRv6Behavior, 'END_DX6').name): + err_msg = f"Failed to add SR localSID {local_sid} " \ + f"host {node[u'host']}" + if beh in (getattr(SRv6Behavior, u"END_X").name, + getattr(SRv6Behavior, u"END_DX4").name, + getattr(SRv6Behavior, u"END_DX6").name): if interface is None or next_hop is None: - raise ValueError('Required parameter(s) missing.\n' - 'interface:{ifc}\n' - 'next_hop:{nh}'. - format(ifc=interface, nh=next_hop)) - args['sw_if_index'] = InterfaceUtil.get_interface_index( - node, interface) - next_hop = ip_address(unicode(next_hop)) + raise ValueError( + f"Required parameter(s) missing.\n" + f"interface:{interface}\n" + f"next_hop:{next_hop}" + ) + args[u"sw_if_index"] = InterfaceUtil.get_interface_index( + node, interface + ) + next_hop = ip_address(next_hop) if next_hop.version == 6: - args['nh_addr6'] = next_hop.packed + args[u"nh_addr6"] = next_hop.packed else: - args['nh_addr4'] = next_hop.packed - elif beh == getattr(SRv6Behavior, 'END_DX2').name: + args[u"nh_addr4"] = next_hop.packed + elif beh == getattr(SRv6Behavior, u"END_DX2").name: if interface is None: - raise ValueError('Required parameter missing.\ninterface:{ifc}'. - format(ifc=interface)) - args['sw_if_index'] = InterfaceUtil.get_interface_index( - node, interface) - elif beh in (getattr(SRv6Behavior, 'END_DT4').name, - getattr(SRv6Behavior, 'END_DT6').name): + raise ValueError( + f"Required parameter missing.\ninterface: {interface}" + ) + args[u"sw_if_index"] = InterfaceUtil.get_interface_index( + node, interface + ) + elif beh in (getattr(SRv6Behavior, u"END_DT4").name, + getattr(SRv6Behavior, u"END_DT6").name): if fib_table is None: - raise ValueError('Required parameter missing.\n' - 'fib_table: {fib}'.format(fib=fib_table)) - args['fib_table'] = fib_table + raise ValueError( + f"Required parameter missing.\n" + f"fib_table: {fib_table}" + ) + args[u"fib_table"] = fib_table with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -212,15 +222,14 @@ class SRv6(object): :param node: Given node to show localSIDs on. :type node: dict """ - cmd = 'sr_localsids_dump' - err_msg = 'Failed to get SR localSID dump on host {host}'.format( - host=node['host']) + cmd = u"sr_localsids_dump" + err_msg = f"Failed to get SR localSID dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd).get_details(err_msg) @staticmethod - def configure_sr_policy(node, bsid, sid_list, mode='encap'): + def configure_sr_policy(node, bsid, sid_list, mode=u"encap"): """Create SRv6 policy on the given node. :param node: Given node to create SRv6 policy on. @@ -232,15 +241,15 @@ class SRv6(object): :type sid_list: list :type mode: str """ - cmd = 'sr_policy_add' + cmd = u"sr_policy_add" args = dict( - bsid_addr=IPv6Address(unicode(bsid)).packed, + bsid_addr=IPv6Address(bsid).packed, weight=1, - is_encap=1 if mode == 'encap' else 0, + is_encap=1 if mode == u"encap" else 0, sids=SRv6.create_srv6_sid_list(sid_list) ) - err_msg = 'Failed to add SR policy for BindingSID {bsid} ' \ - 'on host {host}'.format(bsid=bsid, host=node['host']) + err_msg = f"Failed to add SR policy for BindingSID {bsid} " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -252,9 +261,8 @@ class SRv6(object): :param node: Given node to show SRv6 policies on. :type node: dict """ - cmd = 'sr_policies_dump' - err_msg = 'Failed to get SR policies dump on host {host}'.format( - host=node['host']) + cmd = u"sr_policies_dump" + err_msg = f"Failed to get SR policies dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd).get_details(err_msg) @@ -284,31 +292,40 @@ class SRv6(object): :raises ValueError: If unsupported mode used or required parameter is missing. """ - if mode.lower() == 'l2': + if mode.lower() == u"l2": if interface is None: - raise ValueError('Required data missing.\ninterface:{ifc}'. - format(ifc=interface)) + raise ValueError( + f"Required data missing.\n" + f"interface: {interface}" + ) sw_if_index = InterfaceUtil.get_interface_index(node, interface) mask_width = 0 - prefix_addr = 16 * b'\x00' - traffic_type = getattr(SRv6PolicySteeringTypes, 'SR_STEER_L2').value - elif mode.lower() == 'l3': + prefix_addr = 16 * b"\0" + traffic_type = getattr( + SRv6PolicySteeringTypes, u"SR_STEER_L2" + ).value + elif mode.lower() == u"l3": if ip_addr is None or prefix is None: - raise ValueError('Required data missing.\nIP address:{0}\n' - 'mask:{1}'.format(ip_addr, prefix)) + raise ValueError( + f"Required data missing.\n" + f"IP address:{ip_addr}\n" + f"mask:{prefix}" + ) sw_if_index = Constants.BITWISE_NON_ZERO - ip_addr = ip_address(unicode(ip_addr)) + ip_addr = ip_address(ip_addr) prefix_addr = ip_addr.packed mask_width = int(prefix) if ip_addr.version == 4: - prefix_addr += 12 * b'\x00' + prefix_addr += 12 * b"\0" traffic_type = getattr( - SRv6PolicySteeringTypes, 'SR_STEER_IPV4').value + SRv6PolicySteeringTypes, u"SR_STEER_IPV4" + ).value else: traffic_type = getattr( - SRv6PolicySteeringTypes, 'SR_STEER_IPV6').value + SRv6PolicySteeringTypes, u"SR_STEER_IPV6" + ).value else: - raise ValueError('Unsupported mode: {0}'.format(mode)) + raise ValueError(f"Unsupported mode: {mode}") return sw_if_index, mask_width, prefix_addr, traffic_type @@ -338,12 +355,13 @@ class SRv6(object): """ sw_if_index, mask_width, prefix_addr, traffic_type = \ SRv6._get_sr_steer_policy_args( - node, mode, interface, ip_addr, prefix) + node, mode, interface, ip_addr, prefix + ) - cmd = 'sr_steering_add_del' + cmd = u"sr_steering_add_del" args = dict( is_del=0, - bsid_addr=IPv6Address(unicode(bsid)).packed, + bsid_addr=IPv6Address(str(bsid)).packed, sr_policy_index=0, table_id=0, prefix_addr=prefix_addr, @@ -351,8 +369,8 @@ class SRv6(object): sw_if_index=sw_if_index, traffic_type=traffic_type ) - err_msg = 'Failed to add SRv6 steering policy for BindingSID {bsid} ' \ - 'on host {host}'.format(bsid=bsid, host=node['host']) + err_msg = f"Failed to add SRv6 steering policy for BindingSID {bsid} " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) @@ -364,9 +382,8 @@ class SRv6(object): :param node: Given node to show SRv6 steering policies on. :type node: dict """ - cmd = 'sr_steering_pol_dump' - err_msg = 'Failed to get SR localSID dump on host {host}'.format( - host=node['host']) + cmd = u"sr_steering_pol_dump" + err_msg = f"Failed to get SR localSID dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd).get_details(err_msg) @@ -380,10 +397,12 @@ class SRv6(object): :type node: dict :type ip6_addr: str """ - cmd = 'sr_set_encap_source' - args = dict(encaps_source=IPv6Address(unicode(ip6_addr)).packed) - err_msg = 'Failed to set SRv6 encapsulation source address {addr} ' \ - 'on host {host}'.format(addr=ip6_addr, host=node['host']) + cmd = u"sr_set_encap_source" + args = dict( + encaps_source=IPv6Address(ip6_addr).packed + ) + err_msg = f"Failed to set SRv6 encapsulation source address " \ + f"{ip6_addr} on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: papi_exec.add(cmd, **args).get_reply(err_msg) diff --git a/resources/libraries/python/SchedUtils.py b/resources/libraries/python/SchedUtils.py index b4c57217bd..bb14c29de8 100644 --- a/resources/libraries/python/SchedUtils.py +++ b/resources/libraries/python/SchedUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -12,12 +12,13 @@ # limitations under the License. """Linux scheduler util library""" + from resources.libraries.python.ssh import SSH +__all__ = [u"SchedUtils"] -__all__ = ["SchedUtils"] -class SchedUtils(object): +class SchedUtils: """General class for any linux scheduler related methods/functions.""" @staticmethod @@ -32,24 +33,26 @@ class SchedUtils(object): ssh = SSH() ssh.connect(node) - cmd = "cat /proc/`pidof vpp`/task/*/stat | grep -i vpp_wk"\ - " | awk '{print $1}'" + cmd = u"cat /proc/`pidof vpp`/task/*/stat | grep -i vpp_wk" \ + u" | awk '{print $1}'" for _ in range(3): - (ret, out, _) = ssh.exec_command_sudo(cmd) + ret, out, _ = ssh.exec_command_sudo(cmd) if ret == 0: try: if not out: raise ValueError except ValueError: - print 'Reading VPP worker thread PID failed.' + print(u"Reading VPP worker thread PID failed.") else: - for pid in out.split("\n"): - if pid and pid[0] != '#': + for pid in out.split(u"\n"): + if pid and pid[0] != u"#": SchedUtils.set_proc_scheduling_rr(node, int(pid)) break else: - raise RuntimeError('Failed to retrieve PID for VPP worker threads.') + raise RuntimeError( + u"Failed to retrieve PID for VPP worker threads." + ) @staticmethod def set_proc_scheduling_rr(node, pid, priority=1): @@ -69,16 +72,17 @@ class SchedUtils(object): ssh.connect(node) if pid < 1: - raise ValueError("SCHED_RR: PID must be higher then 1.") + raise ValueError(u"SCHED_RR: PID must be higher then 1.") if 1 <= priority <= 99: - cmd = "chrt -r -p {0} {1}".format(priority, pid) - (ret, _, _) = ssh.exec_command_sudo(cmd) + cmd = f"chrt -r -p {priority} {pid}" + ret, _, _ = ssh.exec_command_sudo(cmd) if ret != 0: - raise RuntimeError("SCHED_RR: Failed to set policy "\ - "for PID {0}.".format(pid)) + raise RuntimeError( + f"SCHED_RR: Failed to set policy for PID {pid}." + ) else: - raise ValueError("SCHED_RR: Priority must be in range 1-99.") + raise ValueError(u"SCHED_RR: Priority must be in range 1-99.") @staticmethod def set_proc_scheduling_other(node, pid): @@ -95,10 +99,11 @@ class SchedUtils(object): ssh.connect(node) if pid < 1: - raise ValueError("SCHED_OTHER: PID must be higher then 1.") + raise ValueError(u"SCHED_OTHER: PID must be higher then 1.") - cmd = "chrt -o -p 0 {1}".format(pid) - (ret, _, _) = ssh.exec_command_sudo(cmd) + cmd = f"chrt -o -p 0 {pid}" + ret, _, _ = ssh.exec_command_sudo(cmd) if ret != 0: - raise RuntimeError("SCHED_OTHER: Failed to set policy "\ - "for PID {0}.".format(pid)) + raise RuntimeError( + f"SCHED_OTHER: Failed to set policy for PID {pid}." + ) diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py index 3f74057832..45447e923b 100644 --- a/resources/libraries/python/SetupFramework.py +++ b/resources/libraries/python/SetupFramework.py @@ -17,7 +17,6 @@ supposed to end up here. """ from os import environ, remove -from os.path import basename from tempfile import NamedTemporaryFile import threading @@ -28,7 +27,7 @@ from resources.libraries.python.ssh import exec_cmd_no_error, scp_node from resources.libraries.python.LocalExecution import run from resources.libraries.python.topology import NodeType -__all__ = ["SetupFramework"] +__all__ = [u"SetupFramework"] def pack_framework_dir(): @@ -40,21 +39,25 @@ def pack_framework_dir(): """ try: - directory = environ["TMPDIR"] + directory = environ[u"TMPDIR"] except KeyError: directory = None if directory is not None: - tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-", - dir="{0}".format(directory)) + tmpfile = NamedTemporaryFile( + suffix=u".tgz", prefix=u"csit-testing-", dir=f"{directory}" + ) else: - tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-") + tmpfile = NamedTemporaryFile(suffix=u".tgz", prefix=u"csit-testing-") file_name = tmpfile.name tmpfile.close() - run(["tar", "--sparse", "--exclude-vcs", "--exclude=output*.xml", - "--exclude=./tmp", "-zcf", file_name, "."], - msg="Could not pack testing framework") + run( + [ + u"tar", u"--sparse", u"--exclude-vcs", u"--exclude=output*.xml", + u"--exclude=./tmp", u"-zcf", file_name, u"." + ], msg=u"Could not pack testing framework" + ) return file_name @@ -68,10 +71,15 @@ def copy_tarball_to_node(tarball, node): :type node: dict :returns: nothing """ - host = node['host'] - logger.console('Copying tarball to {0} starts.'.format(host)) - scp_node(node, tarball, "/tmp/") - logger.console('Copying tarball to {0} done.'.format(host)) + logger.console( + f"Copying tarball to {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} starts." + ) + scp_node(node, tarball, u"/tmp/") + logger.console( + f"Copying tarball to {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) def extract_tarball_at_node(tarball, node): @@ -86,16 +94,22 @@ def extract_tarball_at_node(tarball, node): :returns: nothing :raises RuntimeError: When failed to unpack tarball. """ - host = node['host'] - logger.console('Extracting tarball to {0} on {1} starts.' - .format(con.REMOTE_FW_DIR, host)) + logger.console( + f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} starts." + ) + cmd = f"sudo rm -rf {con.REMOTE_FW_DIR}; mkdir {con.REMOTE_FW_DIR}; " \ + f"tar -zxf {tarball} -C {con.REMOTE_FW_DIR}; rm -f {tarball}" exec_cmd_no_error( - node, "sudo rm -rf {1}; mkdir {1}; tar -zxf {0} -C {1};" - " rm -f {0}".format(tarball, con.REMOTE_FW_DIR), - message='Failed to extract {0} at node {1}'.format(tarball, host), - timeout=30, include_reason=True) - logger.console('Extracting tarball to {0} on {1} done.' - .format(con.REMOTE_FW_DIR, host)) + node, cmd, + message=f"Failed to extract {tarball} at node {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']}", + timeout=30, include_reason=True + ) + logger.console( + f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} done." + ) def create_env_directory_at_node(node): @@ -106,17 +120,22 @@ def create_env_directory_at_node(node): :returns: nothing :raises RuntimeError: When failed to setup virtualenv. """ - host = node['host'] - logger.console('Virtualenv setup including requirements.txt on {0} starts.' - .format(host)) + logger.console( + f"Virtualenv setup including requirements.txt on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} starts." + ) + cmd = f"cd {con.REMOTE_FW_DIR} && rm -rf env && virtualenv " \ + f"-p $(which python3) --system-site-packages --never-download env " \ + f"&& source env/bin/activate && pip3 install -r requirements.txt" exec_cmd_no_error( - node, 'cd {0} && rm -rf env' - ' && virtualenv -p $(which python3) ' - '--system-site-packages --never-download env' - ' && source env/bin/activate && pip3 install -r requirements.txt' - .format(con.REMOTE_FW_DIR), timeout=100, include_reason=True, - message="Failed install at node {host}".format(host=host)) - logger.console('Virtualenv setup on {0} done.'.format(host)) + node, cmd, timeout=100, include_reason=True, + message=f"Failed install at node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']}" + ) + logger.console( + f"Virtualenv setup on {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) def setup_node(node, tarball, remote_tarball, results=None): @@ -133,18 +152,22 @@ def setup_node(node, tarball, remote_tarball, results=None): :returns: True - success, False - error :rtype: bool """ - host = node['host'] try: copy_tarball_to_node(tarball, node) extract_tarball_at_node(remote_tarball, node) - if node['type'] == NodeType.TG: + if node[u"type"] == NodeType.TG: create_env_directory_at_node(node) except RuntimeError as exc: - logger.console("Node {node} setup failed, error: {err!r}".format( - node=host, err=exc)) + logger.console( + f"Node {node[u'type']} host {node[u'host']}, port {node[u'port']} " + f"setup failed, error: {exc!r}" + ) result = False else: - logger.console('Setup of node {ip} done.'.format(ip=host)) + logger.console( + f"Setup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) result = True if isinstance(results, list): @@ -168,15 +191,20 @@ def delete_framework_dir(node): :param node: Node to delete framework directory on. :type node: dict """ - host = node['host'] logger.console( - 'Deleting framework directory on {0} starts.'.format(host)) + f"Deleting framework directory on {node[u'type']} host {node[u'host']}," + f" port {node[u'port']} starts." + ) exec_cmd_no_error( - node, 'sudo rm -rf {0}'.format(con.REMOTE_FW_DIR), - message="Framework delete failed at node {host}".format(host=host), - timeout=100, include_reason=True) + node, f"sudo rm -rf {con.REMOTE_FW_DIR}", + message=f"Framework delete failed at node {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']}", + timeout=100, include_reason=True + ) logger.console( - 'Deleting framework directory on {0} done.'.format(host)) + f"Deleting framework directory on {node[u'type']} host {node[u'host']}," + f" port {node[u'port']} done." + ) def cleanup_node(node, results=None): @@ -189,14 +217,19 @@ def cleanup_node(node, results=None): :returns: True - success, False - error :rtype: bool """ - host = node['host'] try: delete_framework_dir(node) except RuntimeError: - logger.error("Cleanup of node {0} failed.".format(host)) + logger.error( + f"Cleanup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} failed." + ) result = False else: - logger.console('Cleanup of node {0} done.'.format(host)) + logger.console( + f"Cleanup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) result = True if isinstance(results, list): @@ -204,7 +237,7 @@ def cleanup_node(node, results=None): return result -class SetupFramework(object): +class SetupFramework: """Setup suite run on topology nodes. Many VAT/CLI based tests need the scripts at remote hosts before executing @@ -222,13 +255,13 @@ class SetupFramework(object): """ tarball = pack_framework_dir() - msg = 'Framework packed to {0}'.format(tarball) + msg = f"Framework packed to {tarball}" logger.console(msg) logger.trace(msg) - remote_tarball = "/tmp/{0}".format(basename(tarball)) + remote_tarball = f"{tarball}" - results = [] - threads = [] + results = list() + threads = list() for node in nodes.values(): args = node, tarball, remote_tarball, results @@ -237,24 +270,27 @@ class SetupFramework(object): threads.append(thread) logger.info( - 'Executing node setups in parallel, waiting for threads to end') + f"Executing node setups in parallel, waiting for threads to end" + ) for thread in threads: thread.join() - logger.info('Results: {0}'.format(results)) + logger.info(f"Results: {results}") delete_local_tarball(tarball) if all(results): - logger.console('All nodes are ready.') + logger.console(u"All nodes are ready.") for node in nodes.values(): - logger.info('Setup of {type} node {ip} done.'. - format(type=node['type'], ip=node['host'])) + logger.info( + f"Setup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) else: - raise RuntimeError('Failed to setup framework.') + raise RuntimeError(u"Failed to setup framework.") -class CleanupFramework(object): +class CleanupFramework: """Clean up suite run on topology nodes.""" @staticmethod @@ -266,24 +302,24 @@ class CleanupFramework(object): :raises RuntimeError: If cleanup framework failed. """ - results = [] - threads = [] + results = list() + threads = list() for node in nodes.values(): - thread = threading.Thread(target=cleanup_node, - args=(node, results)) + thread = threading.Thread(target=cleanup_node, args=(node, results)) thread.start() threads.append(thread) logger.info( - 'Executing node cleanups in parallel, waiting for threads to end.') + u"Executing node cleanups in parallel, waiting for threads to end." + ) for thread in threads: thread.join() - logger.info('Results: {0}'.format(results)) + logger.info(f"Results: {results}") if all(results): - logger.console('All nodes cleaned up.') + logger.console(u"All nodes cleaned up.") else: - raise RuntimeError('Failed to cleaned up framework.') + raise RuntimeError(u"Failed to cleaned up framework.") diff --git a/resources/libraries/python/SysctlUtil.py b/resources/libraries/python/SysctlUtil.py index 0db7e2c5e5..f8c169a833 100644 --- a/resources/libraries/python/SysctlUtil.py +++ b/resources/libraries/python/SysctlUtil.py @@ -15,10 +15,10 @@ from resources.libraries.python.ssh import exec_cmd_no_error -__all__ = ["SysctlUtil"] +__all__ = [u"SysctlUtil"] -class SysctlUtil(object): +class SysctlUtil: """Class contains methods for getting or setting sysctl settings.""" @staticmethod @@ -30,10 +30,8 @@ class SysctlUtil(object): :type node: dict :type key: str """ - command = 'sysctl {key}'.format(key=key) - - message = 'Node {host} failed to run: {command}'.\ - format(host=node['host'], command=command) + command = f"sysctl {key}" + message = f"Node {node[u'host']} failed to run: {command}" exec_cmd_no_error(node, command, sudo=True, message=message) @@ -48,10 +46,7 @@ class SysctlUtil(object): :type key: str :type value: str """ - command = 'sysctl -w {key}={value}'.format(key=key, value=value) - - message = 'Node {host} failed to run: {command}'.\ - format(host=node['host'], command=command) + command = f"sysctl -w {key}={value}" + message = f"Node {node[u'host']} failed to run: {command}" exec_cmd_no_error(node, command, sudo=True, message=message) - diff --git a/resources/libraries/python/TGSetup.py b/resources/libraries/python/TGSetup.py index 7c05e55503..e105921e23 100644 --- a/resources/libraries/python/TGSetup.py +++ b/resources/libraries/python/TGSetup.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -13,11 +13,11 @@ """TG Setup library.""" -from resources.libraries.python.topology import NodeType from resources.libraries.python.InterfaceUtil import InterfaceUtil +from resources.libraries.python.topology import NodeType -class TGSetup(object): +class TGSetup: """TG setup before test.""" @staticmethod @@ -28,5 +28,5 @@ class TGSetup(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.TG: + if node[u"type"] == NodeType.TG: InterfaceUtil.tg_set_interfaces_default_driver(node) diff --git a/resources/libraries/python/TLDK/SetupTLDKTest.py b/resources/libraries/python/TLDK/SetupTLDKTest.py index e4d7120022..e9aaec7149 100644 --- a/resources/libraries/python/TLDK/SetupTLDKTest.py +++ b/resources/libraries/python/TLDK/SetupTLDKTest.py @@ -206,7 +206,7 @@ def delete_local_tarball(tarball): call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball))) -class SetupTLDKTest(object): +class SetupTLDKTest: """Setup suite run on topology nodes. Many VAT/CLI based tests need the scripts at remote hosts before executing diff --git a/resources/libraries/python/TLDK/TLDKConstants.py b/resources/libraries/python/TLDK/TLDKConstants.py index 153b36c3ab..57b7fa23a1 100644 --- a/resources/libraries/python/TLDK/TLDKConstants.py +++ b/resources/libraries/python/TLDK/TLDKConstants.py @@ -15,7 +15,7 @@ """This file defines the constants variables for the TLDK test.""" -class TLDKConstants(object): +class TLDKConstants: """Define the directory path for the TLDK test.""" # TLDK testing directory location at topology nodes diff --git a/resources/libraries/python/TLDK/UdpTest.py b/resources/libraries/python/TLDK/UdpTest.py index b3f1b221e6..8973fe45d9 100644 --- a/resources/libraries/python/TLDK/UdpTest.py +++ b/resources/libraries/python/TLDK/UdpTest.py @@ -26,7 +26,7 @@ from resources.libraries.python.ssh import SSH from resources.libraries.python.TLDK.TLDKConstants import TLDKConstants as con from resources.libraries.python.topology import Topology -class UdpTest(object): +class UdpTest: """Test the TLDK UDP function.""" @staticmethod diff --git a/resources/libraries/python/Tap.py b/resources/libraries/python/Tap.py index 103eeffd9e..19f60242cb 100644 --- a/resources/libraries/python/Tap.py +++ b/resources/libraries/python/Tap.py @@ -17,13 +17,13 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants -from resources.libraries.python.L2Util import L2Util from resources.libraries.python.InterfaceUtil import InterfaceUtil +from resources.libraries.python.L2Util import L2Util from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.topology import Topology -class Tap(object): +class Tap: """Tap utilities.""" @staticmethod @@ -34,35 +34,34 @@ class Tap(object): :param tap_name: Tap interface name for linux tap. :param mac: Optional MAC address for VPP tap. :type node: dict - :type tap_name: str or unicode + :type tap_name: str :type mac: str :returns: Returns a interface index. :rtype: int """ - if isinstance(tap_name, unicode): - tap_name = str(tap_name) - cmd = 'tap_create_v2' + cmd = u"tap_create_v2" args = dict( id=Constants.BITWISE_NON_ZERO, use_random_mac=0 if mac else 1, - mac_address=L2Util.mac_to_bin(mac) if mac else 6 * b'\x00', - host_namespace=64 * b'\x00', - host_mac_addr=6 * b'\x00', + mac_address=L2Util.mac_to_bin(mac) if mac else 6 * b"\0", + host_namespace=64 * b"\0", + host_mac_addr=6 * b"\0", host_if_name_set=1, - host_if_name=tap_name + (64 - len(tap_name)) * b'\x00', - host_bridge=64 * b'\x00', - host_ip4_addr=4 * b'\x00', - host_ip6_addr=16 * b'\x00', - host_ip4_gw=4 * b'\x00', - host_ip6_gw=16 * b'\x00' + host_if_name=tap_name.encode(encoding=u"utf-8") + + (64 - len(tap_name)) * b"\0", + host_bridge=64 * b"\0", + host_ip4_addr=4 * b"\0", + host_ip6_addr=16 * b"\0", + host_ip4_gw=4 * b"\0", + host_ip6_gw=16 * b"\0" ) - err_msg = 'Failed to create tap interface {tap} on host {host}'.format( - tap=tap_name, host=node['host']) + err_msg = f"Failed to create tap interface {tap_name} " \ + f"on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) - if_key = Topology.add_new_port(node, 'tap') + if_key = Topology.add_new_port(node, u"tap") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) Topology.update_interface_name(node, if_key, tap_name) if mac is None: @@ -84,7 +83,7 @@ class Tap(object): :returns: VPP tap interface dev_name. :rtype: str """ - return Tap.tap_dump(node, host_if_name).get('dev_name') + return Tap.tap_dump(node, host_if_name).get(u"dev_name") @staticmethod def vpp_get_tap_interface_mac(node, interface_name): @@ -120,19 +119,17 @@ class Tap(object): :returns: Processed tap interface dump. :rtype: dict """ - tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00') - tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00') - tap_dump['host_namespace'] = \ - tap_dump['host_namespace'].rstrip('\x00') - tap_dump['host_mac_addr'] = \ - L2Util.bin_to_mac(tap_dump['host_mac_addr']) - tap_dump['host_ip4_addr'] = ip_address(tap_dump['host_ip4_addr']) - tap_dump['host_ip6_addr'] = ip_address(tap_dump['host_ip6_addr']) + tap_dump[u"host_mac_addr"] = L2Util.bin_to_mac( + tap_dump[u"host_mac_addr"] + ) + tap_dump[u"host_ip4_addr"] = ip_address(tap_dump[u"host_ip4_addr"]) + tap_dump[u"host_ip6_addr"] = ip_address(tap_dump[u"host_ip6_addr"]) + return tap_dump - cmd = 'sw_interface_tap_v2_dump' - err_msg = 'Failed to get TAP dump on host {host}'.format( - host=node['host']) + cmd = u"sw_interface_tap_v2_dump" + err_msg = f"Failed to get TAP dump on host {node[u'host']}" + with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details(err_msg) @@ -140,9 +137,9 @@ class Tap(object): for dump in details: if name is None: data.append(process_tap_dump(dump)) - elif dump.get('host_if_name').rstrip('\x00') == name: + elif dump.get(u"host_if_name") == name: data = process_tap_dump(dump) break - logger.debug('TAP data:\n{tap_data}'.format(tap_data=data)) + logger.debug(f"TAP data:\n{data}") return data diff --git a/resources/libraries/python/TestConfig.py b/resources/libraries/python/TestConfig.py index 48f67e5a0f..9f83fbb5fc 100644 --- a/resources/libraries/python/TestConfig.py +++ b/resources/libraries/python/TestConfig.py @@ -25,7 +25,7 @@ from resources.libraries.python.topology import Topology from resources.libraries.python.VatExecutor import VatExecutor -class TestConfig(object): +class TestConfig: """Contains special test configurations implemented in python for faster execution.""" @@ -69,17 +69,20 @@ class TestConfig(object): # configure IPs, create VXLAN interfaces and VLAN sub-interfaces vxlan_count = TestConfig.vpp_create_vxlan_and_vlan_interfaces( node, node_vxlan_if, node_vlan_if, n_tunnels, vni_start, - src_ip_start, dst_ip_start, ip_step) + src_ip_start, dst_ip_start, ip_step + ) # update topology with VXLAN interfaces and VLAN sub-interfaces data # and put interfaces up TestConfig.vpp_put_vxlan_and_vlan_interfaces_up( - node, vxlan_count, node_vlan_if) + node, vxlan_count, node_vlan_if + ) # configure bridge domains, ARPs and routes TestConfig.vpp_put_vxlan_and_vlan_interfaces_to_bridge_domain( node, node_vxlan_if, vxlan_count, op_node, op_node_if, dst_ip_start, - ip_step, bd_id_start) + ip_step, bd_id_start + ) @staticmethod def vpp_create_vxlan_and_vlan_interfaces( @@ -110,47 +113,49 @@ class TestConfig(object): :returns: Number of created VXLAN interfaces. :rtype: int """ - src_ip_addr_start = ip_address(unicode(src_ip_start)) - dst_ip_addr_start = ip_address(unicode(dst_ip_start)) + src_ip_start = ip_address(src_ip_start) + dst_ip_start = ip_address(dst_ip_start) if vxlan_count > 10: commands = list() - tmp_fn = '/tmp/create_vxlan_interfaces.config' - for i in xrange(0, vxlan_count): + for i in range(0, vxlan_count): try: - src_ip = src_ip_addr_start + i * ip_step - dst_ip = dst_ip_addr_start + i * ip_step + src_ip = src_ip_start + i * ip_step + dst_ip = dst_ip_start + i * ip_step except AddressValueError: - logger.warn("Can't do more iterations - IP address limit " - "has been reached.") + logger.warn( + u"Can't do more iterations - IP address limit " + u"has been reached." + ) vxlan_count = i break commands.append( - 'sw_interface_add_del_address sw_if_index {sw_idx} ' - '{ip}/{ip_len}\n'.format( - sw_idx=Topology.get_interface_sw_index( - node, node_vxlan_if), - ip=src_ip, - ip_len=128 if src_ip.version == 6 else 32)) + f"sw_interface_add_del_address sw_if_index " + f"{Topology.get_interface_sw_index(node, node_vxlan_if)} " + f"{src_ip}/{128 if src_ip.version == 6 else 32}\n" + ) commands.append( - 'vxlan_add_del_tunnel src {srcip} dst {dstip} vni {vni}\n'\ - .format(srcip=src_ip, dstip=dst_ip, - vni=vni_start + i)) + f"vxlan_add_del_tunnel src {src_ip} dst {dst_ip} " + f"vni {vni_start + i}\n" + ) commands.append( - 'create_vlan_subif sw_if_index {sw_idx} vlan {vlan}\n'\ - .format(sw_idx=Topology.get_interface_sw_index( - node, node_vlan_if), vlan=i + 1)) - VatExecutor().write_and_execute_script(node, tmp_fn, commands) + f"create_vlan_subif sw_if_index " + f"{Topology.get_interface_sw_index(node, node_vlan_if)} " + f"vlan {i + 1}\n" + ) + VatExecutor().write_and_execute_script( + node, u"/tmp/create_vxlan_interfaces.config", commands + ) return vxlan_count - cmd1 = 'sw_interface_add_del_address' + cmd1 = u"sw_interface_add_del_address" args1 = dict( sw_if_index=InterfaceUtil.get_interface_index(node, node_vxlan_if), is_add=True, del_all=False, prefix=None ) - cmd2 = 'vxlan_add_del_tunnel' + cmd2 = u"vxlan_add_del_tunnel" args2 = dict( is_add=1, is_ipv6=0, @@ -162,7 +167,7 @@ class TestConfig(object): decap_next_index=Constants.BITWISE_NON_ZERO, vni=None ) - cmd3 = 'create_vlan_subif' + cmd3 = u"create_vlan_subif" args3 = dict( sw_if_index=InterfaceUtil.get_interface_index( node, node_vlan_if), @@ -170,22 +175,25 @@ class TestConfig(object): ) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(0, vxlan_count): + for i in range(0, vxlan_count): try: - src_ip = src_ip_addr_start + i * ip_step - dst_ip = dst_ip_addr_start + i * ip_step + src_ip = src_ip_start + i * ip_step + dst_ip = dst_ip_start + i * ip_step except AddressValueError: - logger.warn("Can't do more iterations - IP address limit " - "has been reached.") + logger.warn( + u"Can't do more iterations - IP address limit " + u"has been reached." + ) vxlan_count = i break - args1['prefix'] = IPUtil.create_prefix_object( - src_ip, 128 if src_ip_addr_start.version == 6 else 32) - args2['src_address'] = getattr(src_ip, 'packed') - args2['dst_address'] = getattr(dst_ip, 'packed') - args2['vni'] = int(vni_start) + i - args3['vlan_id'] = i + 1 - history = False if 1 < i < vxlan_count else True + args1[u"prefix"] = IPUtil.create_prefix_object( + src_ip, 128 if src_ip_start.version == 6 else 32 + ) + args2[u"src_address"] = getattr(src_ip, u"packed") + args2[u"dst_address"] = getattr(dst_ip, u"packed") + args2[u"vni"] = int(vni_start) + i + args3[u"vlan_id"] = i + 1 + history = bool(not 1 < i < vxlan_count - 1) papi_exec.add(cmd1, history=history, **args1).\ add(cmd2, history=history, **args2).\ add(cmd3, history=history, **args3) @@ -207,50 +215,51 @@ class TestConfig(object): :type vxlan_count: int :type node_vlan_if: str """ - if_data = InterfaceUtil.vpp_get_interface_data(node) - vlan_if_name = Topology.get_interface_name(node, node_vlan_if) - if vxlan_count > 10: - tmp_fn = '/tmp/put_subinterfaces_up.config' commands = list() - for i in xrange(0, vxlan_count): - vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel') - vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i) - vxlan_found = False + for i in range(0, vxlan_count): + vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel") + vxlan_subif_name = f"vxlan_tunnel{i}" + founds = dict(vxlan=False, vlan=False) vxlan_subif_idx = None - vlan_subif_key = Topology.add_new_port(node, 'vlan_subif') - vlan_subif_name = '{if_name}.{vlan}'.format( - if_name=vlan_if_name, vlan=i + 1) - vlan_found = False + vlan_subif_key = Topology.add_new_port(node, u"vlan_subif") + vlan_subif_name = \ + f"{Topology.get_interface_name(node, node_vlan_if)}.{i + 1}" vlan_idx = None - for data in if_data: - if_name = data['interface_name'] - if not vxlan_found and if_name == vxlan_subif_name: - vxlan_subif_idx = data['sw_if_index'] - vxlan_found = True - elif not vlan_found and if_name == vlan_subif_name: - vlan_idx = data['sw_if_index'] - vlan_found = True - if vxlan_found and vlan_found: + for data in InterfaceUtil.vpp_get_interface_data(node): + if_name = data[u"interface_name"] + if not founds[u"vxlan"] and if_name == vxlan_subif_name: + vxlan_subif_idx = data[u"sw_if_index"] + founds[u"vxlan"] = True + elif not founds[u"vlan"] and if_name == vlan_subif_name: + vlan_idx = data[u"sw_if_index"] + founds[u"vlan"] = True + if founds[u"vxlan"] and founds[u"vlan"]: break Topology.update_interface_sw_if_index( node, vxlan_subif_key, vxlan_subif_idx) Topology.update_interface_name( node, vxlan_subif_key, vxlan_subif_name) commands.append( - 'sw_interface_set_flags sw_if_index {sw_idx} admin-up ' - 'link-up\n'.format(sw_idx=vxlan_subif_idx)) + f"sw_interface_set_flags sw_if_index {vxlan_subif_idx} " + f"admin-up link-up\n" + ) Topology.update_interface_sw_if_index( - node, vlan_subif_key, vlan_idx) + node, vlan_subif_key, vlan_idx + ) Topology.update_interface_name( - node, vlan_subif_key, vlan_subif_name) + node, vlan_subif_key, vlan_subif_name + ) commands.append( - 'sw_interface_set_flags sw_if_index {sw_idx} admin-up ' - 'link-up\n'.format(sw_idx=vlan_idx)) - VatExecutor().write_and_execute_script(node, tmp_fn, commands) + f"sw_interface_set_flags sw_if_index {vlan_idx} admin-up " + f"link-up\n" + ) + VatExecutor().write_and_execute_script( + node, u"/tmp/put_subinterfaces_up.config", commands + ) return - cmd = 'sw_interface_set_flags' + cmd = u"sw_interface_set_flags" args1 = dict( sw_if_index=None, flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value @@ -261,38 +270,41 @@ class TestConfig(object): ) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(0, vxlan_count): - vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel') - vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i) - vxlan_found = False + for i in range(0, vxlan_count): + vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel") + vxlan_subif_name = f"vxlan_tunnel{i}" + founds = dict(vxlan=False, vlan=False) vxlan_subif_idx = None - vlan_subif_key = Topology.add_new_port(node, 'vlan_subif') - vlan_subif_name = '{if_name}.{vlan}'.format( - if_name=vlan_if_name, vlan=i+1) - vlan_found = False + vlan_subif_key = Topology.add_new_port(node, u"vlan_subif") + vlan_subif_name = \ + f"{Topology.get_interface_name(node, node_vlan_if)}.{i+1}" vlan_idx = None - for data in if_data: - if not vxlan_found \ - and data['interface_name'] == vxlan_subif_name: - vxlan_subif_idx = data['sw_if_index'] - vxlan_found = True - elif not vlan_found \ - and data['interface_name'] == vlan_subif_name: - vlan_idx = data['sw_if_index'] - vlan_found = True - if vxlan_found and vlan_found: + for data in InterfaceUtil.vpp_get_interface_data(node): + if not founds[u"vxlan"] \ + and data[u"interface_name"] == vxlan_subif_name: + vxlan_subif_idx = data[u"sw_if_index"] + founds[u"vxlan"] = True + elif not founds[u"vlan"] \ + and data[u"interface_name"] == vlan_subif_name: + vlan_idx = data[u"sw_if_index"] + founds[u"vlan"] = True + if founds[u"vxlan"] and founds[u"vlan"]: break Topology.update_interface_sw_if_index( - node, vxlan_subif_key, vxlan_subif_idx) + node, vxlan_subif_key, vxlan_subif_idx + ) Topology.update_interface_name( - node, vxlan_subif_key, vxlan_subif_name) - args1['sw_if_index'] = vxlan_subif_idx + node, vxlan_subif_key, vxlan_subif_name + ) + args1[u"sw_if_index"] = vxlan_subif_idx Topology.update_interface_sw_if_index( - node, vlan_subif_key, vlan_idx) + node, vlan_subif_key, vlan_idx + ) Topology.update_interface_name( - node, vlan_subif_key, vlan_subif_name) - args2['sw_if_index'] = vlan_idx - history = False if 1 < i < vxlan_count else True + node, vlan_subif_key, vlan_subif_name + ) + args2[u"sw_if_index"] = vlan_idx + history = bool(not 1 < i < vxlan_count - 1) papi_exec.add(cmd, history=history, **args1). \ add(cmd, history=history, **args2) papi_exec.add(cmd, **args1).add(cmd, **args2) @@ -325,92 +337,101 @@ class TestConfig(object): :type ip_step: int :type bd_id_start: int """ - dst_ip_addr_start = ip_address(unicode(dst_ip_start)) + dst_ip_start = ip_address(dst_ip_start) if vxlan_count > 1: sw_idx_vxlan = Topology.get_interface_sw_index(node, node_vxlan_if) - tmp_fn = '/tmp/configure_routes_and_bridge_domains.config' commands = list() - for i in xrange(0, vxlan_count): - dst_ip = dst_ip_addr_start + i * ip_step + for i in range(0, vxlan_count): + dst_ip = dst_ip_start + i * ip_step commands.append( - 'ip_neighbor_add_del sw_if_index {sw_idx} dst {ip} ' - 'mac {mac}\n'.format( - sw_idx=sw_idx_vxlan, - ip=dst_ip, - mac=Topology.get_interface_mac(op_node, op_node_if))) + f"ip_neighbor_add_del sw_if_index {sw_idx_vxlan} " + f"dst {dst_ip} " + f"mac {Topology.get_interface_mac(op_node, op_node_if)}\n" + ) commands.append( - 'ip_route_add_del {ip}/{ip_len} count 1 via {ip} ' - 'sw_if_index {sw_idx}\n'.format( - ip=dst_ip, - ip_len=128 if dst_ip.version == 6 else 32, - sw_idx=sw_idx_vxlan)) + f"ip_route_add_del " + f"{dst_ip}/{128 if dst_ip.version == 6 else 32} count 1 " + f"via {dst_ip} sw_if_index {sw_idx_vxlan}\n" + ) + sw_idx_vxlan = Topology.get_interface_sw_index( + node, f"vxlan_tunnel{i + 1}" + ) commands.append( - 'sw_interface_set_l2_bridge sw_if_index {sw_idx} ' - 'bd_id {bd_id} shg 0 enable\n'.format( - sw_idx=Topology.get_interface_sw_index( - node, 'vxlan_tunnel{nr}'.format(nr=i + 1)), - bd_id=bd_id_start + i)) + f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vxlan} " + f"bd_id {bd_id_start + i} shg 0 enable\n" + ) + sw_idx_vlan = Topology.get_interface_sw_index( + node, f"vlan_subif{i + 1}" + ) commands.append( - 'sw_interface_set_l2_bridge sw_if_index {sw_idx} ' - 'bd_id {bd_id} shg 0 enable\n'.format( - sw_idx=Topology.get_interface_sw_index( - node, 'vlan_subif{nr}'.format(nr=i + 1)), - bd_id=bd_id_start + i)) - VatExecutor().write_and_execute_script(node, tmp_fn, commands) + f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vlan} " + f"bd_id {bd_id_start + i} shg 0 enable\n" + ) + VatExecutor().write_and_execute_script( + node, u"/tmp/configure_routes_and_bridge_domains.config", + commands + ) return - cmd1 = 'ip_neighbor_add_del' + cmd1 = u"ip_neighbor_add_del" neighbor = dict( sw_if_index=Topology.get_interface_sw_index(node, node_vxlan_if), flags=0, mac_address=Topology.get_interface_mac(op_node, op_node_if), - ip_address='') + ip_address=u"" + ) args1 = dict( is_add=1, - neighbor=neighbor) - cmd2 = 'ip_route_add_del' + neighbor=neighbor + ) + cmd2 = u"ip_route_add_del" kwargs = dict( interface=node_vxlan_if, - gateway=str(dst_ip_addr_start)) + gateway=str(dst_ip_start) + ) route = IPUtil.compose_vpp_route_structure( - node, - str(dst_ip_addr_start), - 128 if dst_ip_addr_start.version == 6 else 32, - **kwargs) + node, str(dst_ip_start), + 128 if dst_ip_start.version == 6 else 32, **kwargs + ) args2 = dict( is_add=1, is_multipath=0, - route=route) - cmd3 = 'sw_interface_set_l2_bridge' + route=route + ) + cmd3 = u"sw_interface_set_l2_bridge" args3 = dict( rx_sw_if_index=None, bd_id=None, shg=0, port_type=0, - enable=1) + enable=1 + ) args4 = dict( rx_sw_if_index=None, bd_id=None, shg=0, port_type=0, - enable=1) + enable=1 + ) with PapiSocketExecutor(node) as papi_exec: - for i in xrange(0, vxlan_count): - dst_ip = dst_ip_addr_start + i * ip_step - args1['neighbor']['ip_address'] = str(dst_ip) - args2['route']['prefix']['address']['un'] = \ - IPUtil.union_addr(dst_ip) - args2['route']['paths'][0]['nh']['address'] = \ - IPUtil.union_addr(dst_ip) - args3['rx_sw_if_index'] = Topology.get_interface_sw_index( - node, 'vxlan_tunnel{nr}'.format(nr=i+1)) - args3['bd_id'] = int(bd_id_start+i) - args4['rx_sw_if_index'] = Topology.get_interface_sw_index( - node, 'vlan_subif{nr}'.format(nr=i+1)) - args4['bd_id'] = int(bd_id_start+i) - history = False if 1 < i < vxlan_count else True + for i in range(0, vxlan_count): + args1[u"neighbor"][u"ip_address"] = \ + str(dst_ip_start + i * ip_step) + args2[u"route"][u"prefix"][u"address"][u"un"] = \ + IPUtil.union_addr(dst_ip_start + i * ip_step) + args2[u"route"][u"paths"][0][u"nh"][u"address"] = \ + IPUtil.union_addr(dst_ip_start + i * ip_step) + args3[u"rx_sw_if_index"] = Topology.get_interface_sw_index( + node, f"vxlan_tunnel{i+1}" + ) + args3[u"bd_id"] = int(bd_id_start+i) + args4[u"rx_sw_if_index"] = Topology.get_interface_sw_index( + node, f"vlan_subif{i+1}" + ) + args4[u"bd_id"] = int(bd_id_start+i) + history = bool(not 1 < i < vxlan_count - 1) papi_exec.add(cmd1, history=history, **args1). \ add(cmd2, history=history, **args2). \ add(cmd3, history=history, **args3). \ diff --git a/resources/libraries/python/Trace.py b/resources/libraries/python/Trace.py index 27cc30476e..c88150f72c 100644 --- a/resources/libraries/python/Trace.py +++ b/resources/libraries/python/Trace.py @@ -17,7 +17,7 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.topology import NodeType -class Trace(object): +class Trace: """This class provides methods to manipulate the VPP packet trace.""" @staticmethod @@ -29,13 +29,11 @@ class Trace(object): :type nodes: dict :type maximum: int """ - maximum = "max {count}".format(count=maximum) if maximum is not None\ - else "" + maximum = f"max {maximum}" if maximum is not None else f"" for node in nodes.values(): - if node['type'] == NodeType.DUT: - PapiSocketExecutor.run_cli_cmd( - node, "show trace {max}".format(max=maximum)) + if node[u"type"] == NodeType.DUT: + PapiSocketExecutor.run_cli_cmd(node, f"show trace {maximum}") @staticmethod def clear_packet_trace_on_all_duts(nodes): @@ -45,5 +43,5 @@ class Trace(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: - PapiSocketExecutor.run_cli_cmd(node, "clear trace") + if node[u"type"] == NodeType.DUT: + PapiSocketExecutor.run_cli_cmd(node, u"clear trace") diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py index 8c3abfb373..14d2dc8d1c 100644 --- a/resources/libraries/python/TrafficGenerator.py +++ b/resources/libraries/python/TrafficGenerator.py @@ -29,7 +29,7 @@ from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement from .PLRsearch.PLRsearch import PLRsearch -__all__ = ['TGDropRateSearchImpl', 'TrafficGenerator', 'OptimizedSearch'] +__all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"] def check_subtype(node): @@ -44,27 +44,28 @@ def check_subtype(node): :rtype: NodeSubTypeTG :raises RuntimeError: If node is not supported, message explains how. """ - if node.get('type') is None: - raise RuntimeError('Node type is not defined') + if node.get(u"type") is None: + msg = u"Node type is not defined" elif node['type'] != NodeType.TG: - raise RuntimeError('Node type is {typ!r}, not a TG'.format( - typ=node['type'])) - elif node.get('subtype') is None: - raise RuntimeError('TG subtype is not defined') - elif node['subtype'] == NodeSubTypeTG.TREX: + msg = f"Node type is {node[u'type']!r}, not a TG" + elif node.get(u"subtype") is None: + msg = u"TG subtype is not defined" + elif node[u"subtype"] != NodeSubTypeTG.TREX: + msg = f"TG subtype {node[u'subtype']!r} is not supported" + else: return NodeSubTypeTG.TREX - raise RuntimeError('TG subtype {sub!r} is not supported'.format( - sub=node['subtype'])) + raise RuntimeError(msg) class TGDropRateSearchImpl(DropRateSearch): """Drop Rate Search implementation.""" - def __init__(self): - super(TGDropRateSearchImpl, self).__init__() + # def __init__(self): + # super(TGDropRateSearchImpl, self).__init__() - def measure_loss(self, rate, frame_size, loss_acceptance, - loss_acceptance_type, traffic_profile, skip_warmup=False): + def measure_loss( + self, rate, frame_size, loss_acceptance, loss_acceptance_type, + traffic_profile, skip_warmup=False): """Runs the traffic and evaluate the measured results. :param rate: Offered traffic load. @@ -88,23 +89,27 @@ class TGDropRateSearchImpl(DropRateSearch): # we need instance of TrafficGenerator instantiated by Robot Framework # to be able to use trex_stl-*() tg_instance = BuiltIn().get_library_instance( - 'resources.libraries.python.TrafficGenerator') + u"resources.libraries.python.TrafficGenerator" + ) subtype = check_subtype(tg_instance.node) if subtype == NodeSubTypeTG.TREX: unit_rate = str(rate) + self.get_rate_type_str() if skip_warmup: tg_instance.trex_stl_start_remote_exec( self.get_duration(), unit_rate, frame_size, traffic_profile, - warmup_time=0.0) + warmup_time=0.0 + ) else: tg_instance.trex_stl_start_remote_exec( - self.get_duration(), unit_rate, frame_size, traffic_profile) + self.get_duration(), unit_rate, frame_size, traffic_profile + ) loss = tg_instance.get_loss() sent = tg_instance.get_sent() if self.loss_acceptance_type_is_percentage(): loss = (float(loss) / float(sent)) * 100 - logger.trace("comparing: {los} < {acc} {typ}".format( - los=loss, acc=loss_acceptance, typ=loss_acceptance_type)) + logger.trace( + f"comparing: {loss} < {loss_acceptance} {loss_acceptance_type}" + ) return float(loss) <= float(loss_acceptance) return False @@ -115,10 +120,12 @@ class TGDropRateSearchImpl(DropRateSearch): :rtype: list """ tg_instance = BuiltIn().get_library_instance( - 'resources.libraries.python.TrafficGenerator') + u"resources.libraries.python.TrafficGenerator" + ) return tg_instance.get_latency_int() +# pylint: disable=too-many-instance-attributes class TrafficGenerator(AbstractMeasurer): """Traffic Generator. @@ -128,7 +135,7 @@ class TrafficGenerator(AbstractMeasurer): # TODO: Remove "trex" from lines which could work with other TGs. # Use one instance of TrafficGenerator for all tests in test suite - ROBOT_LIBRARY_SCOPE = 'TEST SUITE' + ROBOT_LIBRARY_SCOPE = u"TEST SUITE" def __init__(self): # TODO: Number of fields will be reduced with CSIT-1378. @@ -194,6 +201,7 @@ class TrafficGenerator(AbstractMeasurer): """ return self._latency + # pylint: disable=too-many-locals def initialize_traffic_generator( self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if, tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None, @@ -234,23 +242,27 @@ class TrafficGenerator(AbstractMeasurer): if1_addr = Topology().get_interface_mac(self._node, tg_if1) if2_addr = Topology().get_interface_mac(self._node, tg_if2) - if osi_layer == 'L2': + if osi_layer == u"L2": if1_adj_addr = if2_addr if2_adj_addr = if1_addr - elif osi_layer == 'L3': - if1_adj_addr = Topology().get_interface_mac(tg_if1_adj_node, - tg_if1_adj_if) - if2_adj_addr = Topology().get_interface_mac(tg_if2_adj_node, - tg_if2_adj_if) - elif osi_layer == 'L7': + elif osi_layer == u"L3": + if1_adj_addr = Topology().get_interface_mac( + tg_if1_adj_node, tg_if1_adj_if + ) + if2_adj_addr = Topology().get_interface_mac( + tg_if2_adj_node, tg_if2_adj_if + ) + elif osi_layer == u"L7": if1_addr = Topology().get_interface_ip4(self._node, tg_if1) if2_addr = Topology().get_interface_ip4(self._node, tg_if2) - if1_adj_addr = Topology().get_interface_ip4(tg_if1_adj_node, - tg_if1_adj_if) - if2_adj_addr = Topology().get_interface_ip4(tg_if2_adj_node, - tg_if2_adj_if) + if1_adj_addr = Topology().get_interface_ip4( + tg_if1_adj_node, tg_if1_adj_if + ) + if2_adj_addr = Topology().get_interface_ip4( + tg_if2_adj_node, tg_if2_adj_if + ) else: - raise ValueError("Unknown Test Type") + raise ValueError(u"Unknown Test Type") # in case of switched environment we can override MAC addresses if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None: @@ -263,42 +275,40 @@ class TrafficGenerator(AbstractMeasurer): if1_adj_addr, if2_adj_addr = if2_adj_addr, if1_adj_addr self._ifaces_reordered = True - if osi_layer == 'L2' or osi_layer == 'L3': + if osi_layer in (u"L2", u"L3"): + dst_mac0 = f"0x{if1_adj_addr.replace(u':', u',0x')}" + src_mac0 = f"0x{if1_addr.replace(u':', u',0x')}" + dst_mac1 = f"0x{if2_adj_addr.replace(u':', u',0x')}" + src_mac1 = f"0x{if2_addr.replace(u':', u',0x')}" exec_cmd_no_error( self._node, - "sh -c 'cat << EOF > /etc/trex_cfg.yaml\n" - "- version: 2\n" - " interfaces: [\"{0}\",\"{1}\"]\n" - " port_info:\n" - " - dest_mac: [{2}]\n" - " src_mac: [{3}]\n" - " - dest_mac: [{4}]\n" - " src_mac: [{5}]\n" - "EOF'"\ - .format(if1_pci, if2_pci, - "0x"+if1_adj_addr.replace(":", ",0x"), - "0x"+if1_addr.replace(":", ",0x"), - "0x"+if2_adj_addr.replace(":", ",0x"), - "0x"+if2_addr.replace(":", ",0x")), - sudo=True, message='TRex config generation error') - elif osi_layer == 'L7': + f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n" + f"- version: 2\n" + f" interfaces: [\"{if1_pci}\",\"{if2_pci}\"]\n" + f" port_info:\n" + f" - dest_mac: [{dst_mac0}]\n" + f" src_mac: [{src_mac0}]\n" + f" - dest_mac: [{dst_mac1}]\n" + f" src_mac: [{src_mac1}]\n" + f"EOF'", + sudo=True, message=u"TRex config generation error" + ) + elif osi_layer == u"L7": exec_cmd_no_error( self._node, - "sh -c 'cat << EOF > /etc/trex_cfg.yaml\n" - "- version: 2\n" - " interfaces: [\"{0}\",\"{1}\"]\n" - " port_info:\n" - " - ip: [{2}]\n" - " default_gw: [{3}]\n" - " - ip: [{4}]\n" - " default_gw: [{5}]\n" - "EOF'"\ - .format(if1_pci, if2_pci, - if1_addr, if1_adj_addr, - if2_addr, if2_adj_addr), - sudo=True, message='TRex config generation error') + f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n" + f"- version: 2\n" + f" interfaces: [\"{if1_pci}\",\"{if2_pci}\"]\n" + f" port_info:\n" + f" - ip: [{if1_addr}]\n" + f" default_gw: [{if1_adj_addr}]\n" + f" - ip: [{if2_addr}]\n" + f" default_gw: [{if2_adj_addr}]\n" + f"EOF'", + sudo=True, message=u"TRex config generation error" + ) else: - raise ValueError("Unknown Test Type") + raise ValueError(u"Unknown Test Type") self._startup_trex(osi_layer) @@ -312,49 +322,52 @@ class TrafficGenerator(AbstractMeasurer): # No need to check subtype, we know it is TREX. for _ in range(0, 3): # Kill TRex only if it is already running. - cmd = "sh -c 'pgrep t-rex && pkill t-rex && sleep 3 || true'" + cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\"" exec_cmd_no_error( - self._node, cmd, sudo=True, message='Kill TRex failed!') + self._node, cmd, sudo=True, message=u"Kill TRex failed!" + ) # Configure TRex. ports = '' - for port in self._node['interfaces'].values(): - ports += ' {pci}'.format(pci=port.get('pci_address')) + for port in self._node[u"interfaces"].values(): + ports += f" {port.get(u'pci_address')}" - cmd = ("sh -c 'cd {dir}/scripts/ && " - "./dpdk_nic_bind.py -u {ports} || true'" - .format(dir=Constants.TREX_INSTALL_DIR, ports=ports)) + cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \ + f"./dpdk_nic_bind.py -u {ports} || true\"" exec_cmd_no_error( self._node, cmd, sudo=True, - message='Unbind PCI ports from driver failed!') + message=u"Unbind PCI ports from driver failed!" + ) # Start TRex. - cmd = ("sh -c 'cd {dir}/scripts/ && " - "nohup ./t-rex-64 --hdrh{mode} --prefix $(hostname)" - " -i -c 7 > /tmp/trex.log 2>&1 &' > /dev/null" - .format(dir=Constants.TREX_INSTALL_DIR, - mode=' --astf' if osi_layer == 'L7' else '')) + cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \ + f"nohup ./t-rex-64 " \ + f"--hdrh{u' --astf' if osi_layer == u'L7' else u''} " \ + f"--prefix $(hostname) -i -c 7 > /tmp/trex.log 2>&1 &\" > " \ + f"/dev/null" try: exec_cmd_no_error(self._node, cmd, sudo=True) except RuntimeError: - cmd = "sh -c 'cat /tmp/trex.log'" + cmd = u"sh -c \"cat /tmp/trex.log\"" exec_cmd_no_error( - self._node, cmd, sudo=True, message='Get TRex logs failed!') - raise RuntimeError('Start TRex failed!') + self._node, cmd, sudo=True, message=u"Get TRex logs failed!" + ) + raise RuntimeError(u"Start TRex failed!") # Test if TRex starts successfuly. - cmd = ("sh -c '{dir}/resources/tools/trex/trex_server_info.py'" - .format(dir=Constants.REMOTE_FW_DIR)) + cmd = f"sh -c \"{Constants.REMOTE_FW_DIR}/resources/tools/trex/" \ + f"trex_server_info.py\"" try: exec_cmd_no_error( - self._node, cmd, sudo=True, message='Test TRex failed!', - retries=20) + self._node, cmd, sudo=True, message=u"Test TRex failed!", + retries=20 + ) except RuntimeError: continue return # After max retries TRex is still not responding to API critical error # occurred. - raise RuntimeError('Start TRex failed after multiple retries!') + raise RuntimeError(u"Start TRex failed after multiple retries!") @staticmethod def is_trex_running(node): @@ -368,7 +381,7 @@ class TrafficGenerator(AbstractMeasurer): """ # No need to check subtype, we know it is TREX. - ret, _, _ = exec_cmd(node, "pidof t-rex", sudo=True) + ret, _, _ = exec_cmd(node, u"pidof t-rex", sudo=True) return bool(int(ret) == 0) @staticmethod @@ -384,8 +397,9 @@ class TrafficGenerator(AbstractMeasurer): subtype = check_subtype(node) if subtype == NodeSubTypeTG.TREX: exec_cmd_no_error( - node, "sh -c 'sudo pkill t-rex && sleep 3'", - sudo=False, message='pkill t-rex failed') + node, u"sh -c \"sudo pkill t-rex && sleep 3\"", + sudo=False, message=u"pkill t-rex failed" + ) def _parse_traffic_results(self, stdout): """Parse stdout of scripts into fields of self. @@ -399,13 +413,13 @@ class TrafficGenerator(AbstractMeasurer): # last line from console output line = stdout.splitlines()[-1] self._result = line - logger.info('TrafficGen result: {0}'.format(self._result)) - self._received = self._result.split(', ')[1].split('=', 1)[1] - self._sent = self._result.split(', ')[2].split('=', 1)[1] - self._loss = self._result.split(', ')[3].split('=', 1)[1] - self._latency = [] - self._latency.append(self._result.split(', ')[4].split('=', 1)[1]) - self._latency.append(self._result.split(', ')[5].split('=', 1)[1]) + logger.info(f"TrafficGen result: {self._result}") + self._received = self._result.split(u", ")[1].split(u"=", 1)[1] + self._sent = self._result.split(u", ")[2].split(u"=", 1)[1] + self._loss = self._result.split(u", ")[3].split(u"=", 1)[1] + self._latency = list() + self._latency.append(self._result.split(u", ")[4].split(u"=", 1)[1]) + self._latency.append(self._result.split(u", ")[5].split(u"=", 1)[1]) def trex_stl_stop_remote_exec(self, node): """Execute script on remote node over ssh to stop running traffic. @@ -417,17 +431,17 @@ class TrafficGenerator(AbstractMeasurer): :raises RuntimeError: If stop traffic script fails. """ # No need to check subtype, we know it is TREX. - x_args = "" + x_args = u"" for index, value in enumerate(self._xstats): if value is not None: # Nested quoting is fun. - value = value.replace("'", "\"") - x_args += " --xstat{i}='\"'\"'{v}'\"'\"'".format( - i=index, v=value) + value = value.replace(u"'", u"\"") + x_args += f" --xstat{index}='\"'\"'{value}'\"'\"'" stdout, _ = exec_cmd_no_error( - node, "sh -c '{d}/resources/tools/trex/trex_stateless_stop.py{a}'"\ - .format(d=Constants.REMOTE_FW_DIR, a=x_args), - message='TRex stateless runtime error') + node, f"sh -c '{Constants.REMOTE_FW_DIR}/resources/tools/trex/" + f"trex_stateless_stop.py{x_args}'", + message=u"TRex stateless runtime error" + ) self._parse_traffic_results(stdout) def trex_stl_start_remote_exec( @@ -468,39 +482,36 @@ class TrafficGenerator(AbstractMeasurer): # No need to check subtype, we know it is TREX. reorder = self._ifaces_reordered # Just to make the next line fit. p_0, p_1 = (rx_port, tx_port) if reorder else (tx_port, rx_port) - # Values from Robot can introduce type unicode, - # we need to encode them, so that repr() does not lead with 'u'. - if isinstance(rate, unicode): - rate = rate.encode("utf-8") + if not isinstance(duration, (float, int)): duration = float(duration) if not isinstance(warmup_time, (float, int)): warmup_time = float(warmup_time) - command = ( - "sh -c '{tool}/resources/tools/trex/trex_stateless_profile.py" - " --profile {prof}/resources/traffic_profiles/trex/{traffic}.py" - " --duration {duration!r} --frame_size {frame_size} --rate {rate!r}" - " --warmup_time {warmup!r} --port_0 {p_0} --port_1 {p_1}" - " --traffic_directions {dirs}").format( - tool=Constants.REMOTE_FW_DIR, prof=Constants.REMOTE_FW_DIR, - traffic=traffic_profile, duration=duration, - frame_size=frame_size, rate=rate, warmup=warmup_time, p_0=p_0, - p_1=p_1, dirs=traffic_directions) + command = f"sh -c \"" \ + f"{Constants.REMOTE_FW_DIR}/resources/tools/trex/" \ + f"trex_stateless_profile.py" \ + f" --profile {Constants.REMOTE_FW_DIR}/resources/" \ + f"traffic_profiles/trex/{traffic_profile}.py" \ + f" --duration {duration!r} --frame_size {frame_size} " \ + f"--rate {rate!r} --warmup_time {warmup_time!r} " \ + f"--port_0 {p_0} --port_1 {p_1}" \ + f" --traffic_directions {traffic_directions}" if async_call: - command += " --async_start" + command += u" --async_start" if latency: - command += " --latency" - command += "'" + command += u" --latency" + command += u"\"" stdout, _ = exec_cmd_no_error( self._node, command, timeout=float(duration) + 60, - message='TRex stateless runtime error') + message=u"TRex stateless runtime error" + ) self.traffic_directions = traffic_directions if async_call: - #no result + # no result self._start_time = time.time() - self._rate = float(rate[:-3]) if "pps" in rate else float(rate) + self._rate = float(rate[:-3]) if u"pps" in rate else float(rate) self._received = None self._sent = None self._loss = None @@ -508,7 +519,7 @@ class TrafficGenerator(AbstractMeasurer): xstats = [None, None] index = 0 for line in stdout.splitlines(): - if "Xstats snapshot {i}: ".format(i=index) in line: + if f"Xstats snapshot {index}: " in line: xstats[index] = line[19:] index += 1 if index == 2: @@ -590,7 +601,8 @@ class TrafficGenerator(AbstractMeasurer): if subtype == NodeSubTypeTG.TREX: self.trex_stl_start_remote_exec( duration, rate, frame_size, traffic_profile, async_call, - latency, warmup_time, traffic_directions, tx_port, rx_port) + latency, warmup_time, traffic_directions, tx_port, rx_port + ) return self._result @@ -601,9 +613,9 @@ class TrafficGenerator(AbstractMeasurer): :raises Exception: If loss occured. """ if self._loss is None: - raise RuntimeError('The traffic generation has not been issued') - if self._loss != '0': - raise RuntimeError('Traffic loss occurred: {0}'.format(self._loss)) + raise RuntimeError(u"The traffic generation has not been issued") + if self._loss != u"0": + raise RuntimeError(f"Traffic loss occurred: {self._loss}") def fail_if_no_traffic_forwarded(self): """Fail if no traffic forwarded. @@ -612,12 +624,12 @@ class TrafficGenerator(AbstractMeasurer): :raises Exception: If no traffic forwarded. """ if self._received is None: - raise RuntimeError('The traffic generation has not been issued') - if self._received == '0': - raise RuntimeError('No traffic forwarded') + raise RuntimeError(u"The traffic generation has not been issued") + if self._received == u"0": + raise RuntimeError(u"No traffic forwarded") - def partial_traffic_loss_accepted(self, loss_acceptance, - loss_acceptance_type): + def partial_traffic_loss_accepted( + self, loss_acceptance, loss_acceptance_type): """Fail if loss is higher then accepted in traffic run. :param loss_acceptance: Permitted drop ratio or frames count. @@ -628,21 +640,23 @@ class TrafficGenerator(AbstractMeasurer): :raises Exception: If loss is above acceptance criteria. """ if self._loss is None: - raise Exception('The traffic generation has not been issued') + raise Exception(u"The traffic generation has not been issued") - if loss_acceptance_type == 'percentage': + if loss_acceptance_type == u"percentage": loss = (float(self._loss) / float(self._sent)) * 100 - elif loss_acceptance_type == 'frames': + elif loss_acceptance_type == u"frames": loss = float(self._loss) else: - raise Exception('Loss acceptance type not supported') + raise Exception(u"Loss acceptance type not supported") if loss > float(loss_acceptance): - raise Exception("Traffic loss {} above loss acceptance: {}".format( - loss, loss_acceptance)) + raise Exception( + f"Traffic loss {loss} above loss acceptance: {loss_acceptance}" + ) - def set_rate_provider_defaults(self, frame_size, traffic_profile, - warmup_time=0.0, traffic_directions=2): + def set_rate_provider_defaults( + self, frame_size, traffic_profile, warmup_time=0.0, + traffic_directions=2): """Store values accessed by measure(). :param frame_size: Frame size identifier or value [B]. @@ -686,7 +700,8 @@ class TrafficGenerator(AbstractMeasurer): transmit_count = int(self.get_sent()) loss_count = int(self.get_loss()) measurement = ReceiveRateMeasurement( - duration, transmit_rate, transmit_count, loss_count) + duration, transmit_rate, transmit_count, loss_count + ) measurement.latency = self.get_latency_int() return measurement @@ -709,15 +724,16 @@ class TrafficGenerator(AbstractMeasurer): transmit_rate = float(transmit_rate) # TG needs target Tr per stream, but reports aggregate Tx and Dx. unit_rate_int = transmit_rate / float(self.traffic_directions) - unit_rate_str = str(unit_rate_int) + "pps" + unit_rate_str = str(unit_rate_int) + u"pps" self.send_traffic_on_tg( duration, unit_rate_str, self.frame_size, self.traffic_profile, warmup_time=self.warmup_time, latency=True, - traffic_directions=self.traffic_directions) + traffic_directions=self.traffic_directions + ) return self.get_measurement_result(duration, transmit_rate) -class OptimizedSearch(object): +class OptimizedSearch: """Class to be imported as Robot Library, containing search keywords. Aside of setting up measurer and forwarding arguments, @@ -777,7 +793,8 @@ class OptimizedSearch(object): # we need instance of TrafficGenerator instantiated by Robot Framework # to be able to use trex_stl-*() tg_instance = BuiltIn().get_library_instance( - 'resources.libraries.python.TrafficGenerator') + u"resources.libraries.python.TrafficGenerator" + ) tg_instance.set_rate_provider_defaults( frame_size, traffic_profile, traffic_directions=traffic_directions) algorithm = MultipleLossRatioSearch( @@ -785,9 +802,11 @@ class OptimizedSearch(object): final_relative_width=final_relative_width, number_of_intermediate_phases=number_of_intermediate_phases, initial_trial_duration=initial_trial_duration, timeout=timeout, - doublings=doublings) + doublings=doublings + ) result = algorithm.narrow_down_ndr_and_pdr( - minimum_transmit_rate, maximum_transmit_rate, packet_loss_ratio) + minimum_transmit_rate, maximum_transmit_rate, packet_loss_ratio + ) return result @staticmethod @@ -814,6 +833,7 @@ class OptimizedSearch(object): This is needed because initial "search" phase of integrator takes significant time even without any trial results. :param timeout: The search will stop after this overall time [s]. + :param trace_enabled: True if trace enabled else False. :param traffic_directions: Traffic is bi- (2) or uni- (1) directional. Default: 2 :type frame_size: str or int @@ -823,6 +843,7 @@ class OptimizedSearch(object): :type plr_target: float :type initial_count: int :type timeout: float + :type trace_enabled: bool :type traffic_directions: int :returns: Average and stdev of estimated aggregate rate giving PLR. :rtype: 2-tuple of float @@ -830,13 +851,15 @@ class OptimizedSearch(object): minimum_transmit_rate *= traffic_directions maximum_transmit_rate *= traffic_directions tg_instance = BuiltIn().get_library_instance( - 'resources.libraries.python.TrafficGenerator') + u"resources.libraries.python.TrafficGenerator" + ) tg_instance.set_rate_provider_defaults( frame_size, traffic_profile, traffic_directions=traffic_directions) algorithm = PLRsearch( measurer=tg_instance, trial_duration_per_trial=tdpt, packet_loss_ratio_target=plr_target, trial_number_offset=initial_count, timeout=timeout, - trace_enabled=trace_enabled) + trace_enabled=trace_enabled + ) result = algorithm.search(minimum_transmit_rate, maximum_transmit_rate) return result diff --git a/resources/libraries/python/TrafficScriptArg.py b/resources/libraries/python/TrafficScriptArg.py index 64d0747b27..247eccbc2f 100644 --- a/resources/libraries/python/TrafficScriptArg.py +++ b/resources/libraries/python/TrafficScriptArg.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,7 +16,7 @@ import argparse -class TrafficScriptArg(object): +class TrafficScriptArg: """Traffic scripts argument parser. Parse arguments for traffic script. Default has two arguments '--tx_if' @@ -37,18 +37,18 @@ class TrafficScriptArg(object): def __init__(self, more_args=None, opt_args=None): parser = argparse.ArgumentParser() - parser.add_argument("--tx_if", help="interface that sends traffic") - parser.add_argument("--rx_if", help="interface that receives traffic") + parser.add_argument(u"--tx_if", help=u"interface that sends traffic") + parser.add_argument(u"--rx_if", help=u"interface that receives traffic") if more_args is not None: for arg in more_args: - arg_name = '--{0}'.format(arg) + arg_name = f"--{arg}" parser.add_argument(arg_name) if opt_args is not None: for arg in opt_args: - arg_name = '--{0}'.format(arg) - parser.add_argument(arg_name, nargs='?', default='') + arg_name = f"--{arg}" + parser.add_argument(arg_name, nargs=u"?", default=u"") self._parser = parser self._args = vars(parser.parse_args()) @@ -63,6 +63,6 @@ class TrafficScriptArg(object): """ arg_val = self._args.get(arg_name) if arg_val is None: - raise Exception("Argument '{0}' not found".format(arg_name)) + raise Exception(f"Argument '{arg_name}' not found") return arg_val diff --git a/resources/libraries/python/TrafficScriptExecutor.py b/resources/libraries/python/TrafficScriptExecutor.py index abd97e6fc6..a9a733bbdf 100644 --- a/resources/libraries/python/TrafficScriptExecutor.py +++ b/resources/libraries/python/TrafficScriptExecutor.py @@ -16,10 +16,10 @@ from resources.libraries.python.Constants import Constants from resources.libraries.python.ssh import SSH -__all__ = ['TrafficScriptExecutor'] +__all__ = [u"TrafficScriptExecutor"] -class TrafficScriptExecutor(object): +class TrafficScriptExecutor: """Traffic script executor utilities.""" @staticmethod @@ -31,11 +31,11 @@ class TrafficScriptExecutor(object): :returns: Escaped string. :rtype: str """ - return string.replace('"', '\\"').replace("$", "\\$") + return string.replace(u'"', u'\\"').replace(u"$", u"\\$") @staticmethod - def run_traffic_script_on_node(script_file_name, node, script_args, - timeout=60): + def run_traffic_script_on_node( + script_file_name, node, script_args, timeout=60): """Run traffic script on the TG node. :param script_file_name: Traffic script name. @@ -55,33 +55,32 @@ class TrafficScriptExecutor(object): """ ssh = SSH() ssh.connect(node) - cmd = ("cd {}; " + - "virtualenv --system-site-packages --never-download env && " + - "export PYTHONPATH=${{PWD}}; " + - ". ${{PWD}}/env/bin/activate; " + - "resources/traffic_scripts/{} {}") \ - .format(Constants.REMOTE_FW_DIR, script_file_name, - script_args) + cmd = f"cd {Constants.REMOTE_FW_DIR}; virtualenv -p $(which python3) " \ + f"--system-site-packages --never-download env && " \ + f"export PYTHONPATH=${{PWD}}; . ${{PWD}}/env/bin/activate; " \ + f"resources/traffic_scripts/{script_file_name} {script_args}" + ret_code, stdout, stderr = ssh.exec_command_sudo( - 'sh -c "{cmd}"'.format(cmd=TrafficScriptExecutor._escape(cmd)), - timeout=timeout) + f'sh -c "{TrafficScriptExecutor._escape(cmd)}"', timeout=timeout + ) if ret_code != 0: - if "RuntimeError: ICMP echo Rx timeout" in stderr: - raise RuntimeError("ICMP echo Rx timeout") - elif "RuntimeError: IP packet Rx timeout" in stderr: - raise RuntimeError("IP packet Rx timeout") - elif "RuntimeError: DHCP REQUEST Rx timeout" in stderr: - raise RuntimeError("DHCP REQUEST Rx timeout") - elif "RuntimeError: DHCP DISCOVER Rx timeout" in stderr: - raise RuntimeError("DHCP DISCOVER Rx timeout") - elif "RuntimeError: TCP/UDP Rx timeout" in stderr: - raise RuntimeError("TCP/UDP Rx timeout") - elif "Error occurred: ARP reply timeout" in stdout: - raise RuntimeError("ARP reply timeout") - elif "RuntimeError: ESP packet Rx timeout" in stderr: - raise RuntimeError("ESP packet Rx timeout") + if u"RuntimeError: ICMP echo Rx timeout" in stderr: + msg = "ICMP echo Rx timeout" + elif u"RuntimeError: IP packet Rx timeout" in stderr: + msg = u"IP packet Rx timeout" + elif u"RuntimeError: DHCP REQUEST Rx timeout" in stderr: + msg = u"DHCP REQUEST Rx timeout" + elif u"RuntimeError: DHCP DISCOVER Rx timeout" in stderr: + msg = u"DHCP DISCOVER Rx timeout" + elif u"RuntimeError: TCP/UDP Rx timeout" in stderr: + msg = u"TCP/UDP Rx timeout" + elif u"Error occurred: ARP reply timeout" in stdout: + msg = u"ARP reply timeout" + elif u"RuntimeError: ESP packet Rx timeout" in stderr: + msg = u"ESP packet Rx timeout" else: - raise RuntimeError("Traffic script execution failed") + msg = u"Traffic script execution failed" + raise RuntimeError(msg) @staticmethod def traffic_script_gen_arg(rx_if, tx_if, src_mac, dst_mac, src_ip, dst_ip): @@ -102,7 +101,6 @@ class TrafficScriptExecutor(object): :returns: Traffic script arguments string. :rtype: str """ - args = ('--rx_if {0} --tx_if {1} --src_mac {2} --dst_mac {3} --src_ip' - ' {4} --dst_ip {5}').format(rx_if, tx_if, src_mac, dst_mac, - src_ip, dst_ip) + args = f"--rx_if {rx_if} --tx_if {tx_if} --src_mac {src_mac} " \ + f"--dst_mac {dst_mac} --src_ip {src_ip} --dst_ip {dst_ip}" return args diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py index 72b6142306..72325d8169 100644 --- a/resources/libraries/python/VPPUtil.py +++ b/resources/libraries/python/VPPUtil.py @@ -22,7 +22,7 @@ from resources.libraries.python.ssh import exec_cmd_no_error from resources.libraries.python.topology import NodeType -class VPPUtil(object): +class VPPUtil: """General class for any VPP related methods/functions.""" @staticmethod @@ -38,20 +38,20 @@ class VPPUtil(object): :type additional_cmds: tuple """ def_setting_tb_displayed = { - 'IPv6 FIB': 'ip6 fib', - 'IPv4 FIB': 'ip fib', - 'Interface IP': 'int addr', - 'Interfaces': 'int', - 'ARP': 'ip arp', - 'Errors': 'err' + u"IPv6 FIB": u"ip6 fib", + u"IPv4 FIB": u"ip fib", + u"Interface IP": u"int addr", + u"Interfaces": u"int", + u"ARP": u"ip arp", + u"Errors": u"err" } if additional_cmds: for cmd in additional_cmds: - def_setting_tb_displayed['Custom Setting: {}'.format(cmd)] = cmd + def_setting_tb_displayed[f"Custom Setting: {cmd}"] = cmd for _, cmd in def_setting_tb_displayed.items(): - command = 'vppctl sh {cmd}'.format(cmd=cmd) + command = f"vppctl sh {cmd}" exec_cmd_no_error(node, command, timeout=30, sudo=True) @staticmethod @@ -71,7 +71,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.restart_vpp_service(node) @staticmethod @@ -91,7 +91,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.stop_vpp_service(node) @staticmethod @@ -101,9 +101,8 @@ class VPPUtil(object): :param node: Topology node. :type node: dict """ - cmd = 'command -v vpp' - exec_cmd_no_error( - node, cmd, message='VPP is not installed!') + cmd = u"command -v vpp" + exec_cmd_no_error(node, cmd, message=u"VPP is not installed!") @staticmethod def verify_vpp_started(node): @@ -112,15 +111,16 @@ class VPPUtil(object): :param node: Topology node. :type node: dict """ - cmd = 'echo "show pci" | sudo socat - UNIX-CONNECT:/run/vpp/cli.sock' + cmd = u"echo \"show pci\" | sudo socat - UNIX-CONNECT:/run/vpp/cli.sock" exec_cmd_no_error( - node, cmd, sudo=False, message='VPP failed to start!', retries=120) + node, cmd, sudo=False, message=u"VPP failed to start!", retries=120 + ) - cmd = ('vppctl show pci 2>&1 | ' - 'fgrep -v "Connection refused" | ' - 'fgrep -v "No such file or directory"') + cmd = u"vppctl show pci 2>&1 | fgrep -v \"Connection refused\" | " \ + u"fgrep -v \"No such file or directory\"" exec_cmd_no_error( - node, cmd, sudo=True, message='VPP failed to start!', retries=120) + node, cmd, sudo=True, message=u"VPP failed to start!", retries=120 + ) @staticmethod def verify_vpp(node): @@ -133,9 +133,9 @@ class VPPUtil(object): """ VPPUtil.verify_vpp_installed(node) try: - # Verify responsivness of vppctl. + # Verify responsiveness of vppctl. VPPUtil.verify_vpp_started(node) - # Verify responsivness of PAPI. + # Verify responsiveness of PAPI. VPPUtil.show_log(node) VPPUtil.vpp_show_version(node) finally: @@ -149,33 +149,23 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.verify_vpp(node) @staticmethod - def vpp_show_version(node, verbose=True): + def vpp_show_version(node): """Run "show_version" PAPI command. :param node: Node to run command on. - :param verbose: Show version, compile date and compile location if True - otherwise show only version. :type node: dict - :type verbose: bool :returns: VPP version. :rtype: str """ - cmd = 'show_version' + cmd = u"show_version" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply() - return_version = reply['version'].rstrip('\0x00') - version = 'VPP version: {ver}\n'.format(ver=return_version) - if verbose: - version += ('Compile date: {date}\n' - 'Compile location: {cl}\n'. - format(date=reply['build_date'].rstrip('\0x00'), - cl=reply['build_directory'].rstrip('\0x00'))) - logger.info(version) - return return_version + logger.info(f"VPP version: {reply[u'version']}\n") + return f"{reply[u'version']}" @staticmethod def show_vpp_version_on_all_duts(nodes): @@ -185,7 +175,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.vpp_show_version(node) @staticmethod @@ -196,29 +186,27 @@ class VPPUtil(object): :type node: dict """ - cmd = 'sw_interface_dump' + cmd = u"sw_interface_dump" args = dict( name_filter_valid=False, - name_filter='' + name_filter=u"" ) - err_msg = 'Failed to get interface dump on host {host}'.format( - host=node['host']) + err_msg = f"Failed to get interface dump on host {node[u'host']}" with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd, **args).get_details(err_msg) for if_dump in details: - if_dump['l2_address'] = str(if_dump['l2_address']) - if_dump['b_dmac'] = str(if_dump['b_dmac']) - if_dump['b_smac'] = str(if_dump['b_smac']) - if_dump['flags'] = if_dump['flags'].value - if_dump['type'] = if_dump['type'].value - if_dump['link_duplex'] = if_dump['link_duplex'].value - if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \ - if hasattr(if_dump['sub_if_flags'], 'value') \ - else int(if_dump['sub_if_flags']) + if_dump[u"l2_address"] = str(if_dump[u"l2_address"]) + if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"]) + if_dump[u"b_smac"] = str(if_dump[u"b_smac"]) + if_dump[u"flags"] = if_dump[u"flags"].value + if_dump[u"type"] = if_dump[u"type"].value + if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value + if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \ + if hasattr(if_dump[u"sub_if_flags"], u"value") \ + else int(if_dump[u"sub_if_flags"]) # TODO: return only base data - logger.trace('Interface data of host {host}:\n{details}'.format( - host=node['host'], details=details)) + logger.trace(f"Interface data of host {node[u'host']}:\n{details}") @staticmethod def vpp_enable_traces_on_dut(node, fail_on_error=False): @@ -231,10 +219,10 @@ class VPPUtil(object): :type fail_on_error: bool """ cmds = [ - "trace add dpdk-input 50", - "trace add vhost-user-input 50", - "trace add memif-input 50", - "trace add avf-input 50" + u"trace add dpdk-input 50", + u"trace add vhost-user-input 50", + u"trace add memif-input 50", + u"trace add avf-input 50" ] for cmd in cmds: @@ -255,7 +243,7 @@ class VPPUtil(object): :type fail_on_error: bool """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.vpp_enable_traces_on_dut(node, fail_on_error) @staticmethod @@ -275,7 +263,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.vpp_enable_elog_traces(node) @staticmethod @@ -285,7 +273,7 @@ class VPPUtil(object): :param node: Topology node. :type node: dict """ - PapiSocketExecutor.run_cli_cmd(node, "show event-logger") + PapiSocketExecutor.run_cli_cmd(node, u"show event-logger") @staticmethod def show_event_logger_on_all_duts(nodes): @@ -295,7 +283,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.show_event_logger(node) @staticmethod @@ -305,7 +293,7 @@ class VPPUtil(object): :param node: Topology node. :type node: dict """ - PapiSocketExecutor.run_cli_cmd(node, "show logging") + PapiSocketExecutor.run_cli_cmd(node, u"show logging") @staticmethod def show_log_on_all_duts(nodes): @@ -315,7 +303,7 @@ class VPPUtil(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VPPUtil.show_log(node) @staticmethod @@ -327,19 +315,19 @@ class VPPUtil(object): :returns: VPP thread data. :rtype: list """ - cmd = 'show_threads' + cmd = u"show_threads" with PapiSocketExecutor(node) as papi_exec: reply = papi_exec.add(cmd).get_reply() threads_data = list() - for thread in reply["thread_data"]: + for thread in reply[u"thread_data"]: thread_data = list() for item in thread: - if isinstance(item, unicode): + if isinstance(item, str): item = item.rstrip('\x00') thread_data.append(item) threads_data.append(thread_data) - logger.trace("show threads:\n{threads}".format(threads=threads_data)) + logger.trace(f"show threads:\n{threads_data}") return threads_data diff --git a/resources/libraries/python/VatExecutor.py b/resources/libraries/python/VatExecutor.py index 4fe549eb23..2d6a03327b 100644 --- a/resources/libraries/python/VatExecutor.py +++ b/resources/libraries/python/VatExecutor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -14,16 +14,17 @@ """VAT executor library.""" import json + from os import remove from paramiko.ssh_exception import SSHException from robot.api import logger -from resources.libraries.python.ssh import SSH, SSHTimeout from resources.libraries.python.Constants import Constants from resources.libraries.python.PapiHistory import PapiHistory +from resources.libraries.python.ssh import SSH, SSHTimeout -__all__ = ['VatExecutor'] +__all__ = [u"VatExecutor"] def cleanup_vat_json_output(json_output, vat_name=None): @@ -40,14 +41,13 @@ def cleanup_vat_json_output(json_output, vat_name=None): """ retval = json_output - clutter = ['vat#', 'dump_interface_table error: Misc'] + clutter = [u"vat#", u"dump_interface_table error: Misc"] if vat_name: - remote_file_path = '{0}/{1}/{2}'.format(Constants.REMOTE_FW_DIR, - Constants.RESOURCES_TPL_VAT, - vat_name) - clutter.append("{0}(2):".format(remote_file_path)) + remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \ + f"{Constants.RESOURCES_TPL_VAT}/{vat_name}" + clutter.append(f"{remote_file_path}(2):") for garbage in clutter: - retval = retval.replace(garbage, '') + retval = retval.replace(garbage, u"") return retval @@ -60,12 +60,13 @@ def get_vpp_pid(node): running on the DUT node. :rtype: int or list """ + # pylint: disable=import-outside-toplevel import resources.libraries.python.DUTSetup as PidLib pid = PidLib.DUTSetup.get_vpp_pid(node) return pid -class VatExecutor(object): +class VatExecutor: """Contains methods for executing VAT commands on DUTs.""" def __init__(self): self._stdout = None @@ -73,8 +74,9 @@ class VatExecutor(object): self._ret_code = None self._script_name = None - def execute_script(self, vat_name, node, timeout=120, json_out=True, - copy_on_execute=False, history=True): + def execute_script( + self, vat_name, node, timeout=120, json_out=True, + copy_on_execute=False, history=True): """Execute VAT script on remote node, and store the result. There is an option to copy script from local host to remote host before execution. Path is defined automatically. @@ -102,45 +104,44 @@ class VatExecutor(object): try: ssh.connect(node) except: - raise SSHException("Cannot open SSH connection to execute VAT " - "command(s) from vat script {name}" - .format(name=vat_name)) + raise SSHException( + f"Cannot open SSH connection to execute VAT command(s) " + f"from vat script {vat_name}" + ) if copy_on_execute: ssh.scp(vat_name, vat_name) remote_file_path = vat_name if history: - with open(vat_name, 'r') as vat_file: + with open(vat_name, "r") as vat_file: for line in vat_file: - PapiHistory.add_to_papi_history(node, - line.replace('\n', ''), - papi=False) + PapiHistory.add_to_papi_history( + node, line.replace(u"\n", u""), papi=False + ) else: - remote_file_path = '{0}/{1}/{2}'.format(Constants.REMOTE_FW_DIR, - Constants.RESOURCES_TPL_VAT, - vat_name) - - cmd = "{vat_bin} {json} in {vat_path} script".format( - vat_bin=Constants.VAT_BIN_NAME, - json="json" if json_out is True else "", - vat_path=remote_file_path) + remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \ + f"{Constants.RESOURCES_TPL_VAT}/{vat_name}" + cmd = f"{Constants.VAT_BIN_NAME}" \ + f"{u' json' if json_out is True else u''} " \ + f"in {remote_file_path} script" try: - ret_code, stdout, stderr = ssh.exec_command_sudo(cmd=cmd, - timeout=timeout) + ret_code, stdout, stderr = ssh.exec_command_sudo( + cmd=cmd, timeout=timeout + ) except SSHTimeout: - logger.error("VAT script execution timeout: {0}".format(cmd)) + logger.error(f"VAT script execution timeout: {cmd}") raise - except: - raise RuntimeError("VAT script execution failed: {0}".format(cmd)) + except Exception: + raise RuntimeError(f"VAT script execution failed: {cmd}") self._ret_code = ret_code self._stdout = stdout self._stderr = stderr self._script_name = vat_name - def write_and_execute_script(self, node, tmp_fn, commands, timeout=300, - json_out=False): + def write_and_execute_script( + self, node, tmp_fn, commands, timeout=300, json_out=False): """Write VAT commands to the script, copy it to node and execute it. :param node: VPP node. @@ -154,11 +155,13 @@ class VatExecutor(object): :type timeout: int :type json_out: bool """ - with open(tmp_fn, 'w') as tmp_f: + with open(tmp_fn, "w") as tmp_f: tmp_f.writelines(commands) - self.execute_script(tmp_fn, node, timeout=timeout, json_out=json_out, - copy_on_execute=True) + self.execute_script( + tmp_fn, node, timeout=timeout, json_out=json_out, + copy_on_execute=True + ) remove(tmp_fn) def execute_script_json_out(self, vat_name, node, timeout=120): @@ -181,21 +184,23 @@ class VatExecutor(object): """Read return code from last executed script and raise exception if the script didn't fail.""" if self._ret_code is None: - raise Exception("First execute the script!") + raise Exception(u"First execute the script!") if self._ret_code == 0: raise AssertionError( - "VAT Script execution passed, but failure was expected: {cmd}" - .format(cmd=self._script_name)) + f"VAT Script execution passed, but failure was expected: " + f"{self._script_name}" + ) def script_should_have_passed(self): """Read return code from last executed script and raise exception if the script failed.""" if self._ret_code is None: - raise Exception("First execute the script!") + raise Exception(u"First execute the script!") if self._ret_code != 0: raise AssertionError( - "VAT Script execution failed, but success was expected: {cmd}" - .format(cmd=self._script_name)) + f"VAT Script execution failed, but success was expected: " + f"{self._script_name}" + ) def get_script_stdout(self): """Returns value of stdout from last executed script.""" @@ -212,15 +217,17 @@ class VatExecutor(object): :param node: Node in topology on witch the script is executed. :param vat_template_file: Template file of VAT script. + :param json_param: Require JSON mode. :param vat_args: Arguments to the template file. :returns: List of JSON objects returned by VAT. """ with VatTerminal(node, json_param=json_param) as vat: - return vat.vat_terminal_exec_cmd_from_template(vat_template_file, - **vat_args) + return vat.vat_terminal_exec_cmd_from_template( + vat_template_file, **vat_args + ) -class VatTerminal(object): +class VatTerminal: """VAT interactive terminal. :param node: Node to open VAT terminal on. @@ -231,11 +238,11 @@ class VatTerminal(object): """ - __VAT_PROMPT = ("vat# ", ) - __LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ") + __VAT_PROMPT = (u"vat# ", ) + __LINUX_PROMPT = (u":~# ", u":~$ ", u"~]$ ", u"~]# ") def __init__(self, node, json_param=True): - json_text = ' json' if json_param else '' + json_text = u" json" if json_param else u"" self.json = json_param self._node = node self._ssh = SSH() @@ -243,15 +250,16 @@ class VatTerminal(object): try: self._tty = self._ssh.interactive_terminal_open() except Exception: - raise RuntimeError("Cannot open interactive terminal on node {0}". - format(self._node)) + raise RuntimeError( + f"Cannot open interactive terminal on node {self._node}" + ) for _ in range(3): try: self._ssh.interactive_terminal_exec_command( - self._tty, - 'sudo -S {0}{1}'.format(Constants.VAT_BIN_NAME, json_text), - self.__VAT_PROMPT) + self._tty, f"sudo -S {Constants.VAT_BIN_NAME}{json_text}", + self.__VAT_PROMPT + ) except Exception: continue else: @@ -260,16 +268,17 @@ class VatTerminal(object): vpp_pid = get_vpp_pid(self._node) if vpp_pid: if isinstance(vpp_pid, int): - logger.trace("VPP running on node {0}". - format(self._node['host'])) + logger.trace(f"VPP running on node {self._node[u'host']}") else: - logger.error("More instances of VPP running on node {0}.". - format(self._node['host'])) + logger.error( + f"More instances of VPP running " + f"on node {self._node[u'host']}." + ) else: - logger.error("VPP not running on node {0}.". - format(self._node['host'])) - raise RuntimeError("Failed to open VAT console on node {0}". - format(self._node['host'])) + logger.error(f"VPP not running on node {self._node[u'host']}.") + raise RuntimeError( + f"Failed to open VAT console on node {self._node[u'host']}" + ) self._exec_failure = False self.vat_stdout = None @@ -289,36 +298,37 @@ class VatTerminal(object): None if not in JSON mode. """ PapiHistory.add_to_papi_history(self._node, cmd, papi=False) - logger.debug("Executing command in VAT terminal: {0}".format(cmd)) + logger.debug(f"Executing command in VAT terminal: {cmd}") try: - out = self._ssh.interactive_terminal_exec_command(self._tty, cmd, - self.__VAT_PROMPT) + out = self._ssh.interactive_terminal_exec_command( + self._tty, cmd, self.__VAT_PROMPT + ) self.vat_stdout = out except Exception: self._exec_failure = True vpp_pid = get_vpp_pid(self._node) if vpp_pid: if isinstance(vpp_pid, int): - raise RuntimeError("VPP running on node {0} but VAT command" - " {1} execution failed.". - format(self._node['host'], cmd)) + msg = f"VPP running on node {self._node[u'host']} " \ + f"but VAT command {cmd} execution failed." else: - raise RuntimeError("More instances of VPP running on node " - "{0}. VAT command {1} execution failed.". - format(self._node['host'], cmd)) - raise RuntimeError("VPP not running on node {0}. VAT command " - "{1} execution failed.". - format(self._node['host'], cmd)) - - logger.debug("VAT output: {0}".format(out)) + msg = f"More instances of VPP running on node " \ + f"{self._node[u'host']}. VAT command {cmd} " \ + f"execution failed." + else: + msg = f"VPP not running on node {self._node[u'host']}. " \ + f"VAT command {cmd} execution failed." + raise RuntimeError(msg) + + logger.debug(f"VAT output: {out}") if self.json: - obj_start = out.find('{') - obj_end = out.rfind('}') - array_start = out.find('[') - array_end = out.rfind(']') + obj_start = out.find(u"{") + obj_end = out.rfind(u"}") + array_start = out.find(u"[") + array_end = out.rfind(u"]") if obj_start == -1 and array_start == -1: - raise RuntimeError("VAT command {0}: no JSON data.".format(cmd)) + raise RuntimeError(f"VAT command {cmd}: no JSON data.") if obj_start < array_start or array_start == -1: start = obj_start @@ -329,36 +339,44 @@ class VatTerminal(object): out = out[start:end] json_out = json.loads(out) return json_out - else: - return None + + return None def vat_terminal_close(self): """Close VAT terminal.""" # interactive terminal is dead, we only need to close session if not self._exec_failure: try: - self._ssh.interactive_terminal_exec_command(self._tty, - 'quit', - self.__LINUX_PROMPT) + self._ssh.interactive_terminal_exec_command( + self._tty, u"quit", self.__LINUX_PROMPT + ) except Exception: vpp_pid = get_vpp_pid(self._node) if vpp_pid: if isinstance(vpp_pid, int): - logger.trace("VPP running on node {0}.". - format(self._node['host'])) + logger.trace( + f"VPP running on node {self._node[u'host']}." + ) else: - logger.error("More instances of VPP running on node " - "{0}.".format(self._node['host'])) + logger.error( + f"More instances of VPP running " + f"on node {self._node[u'host']}." + ) else: - logger.error("VPP not running on node {0}.". - format(self._node['host'])) - raise RuntimeError("Failed to close VAT console on node {0}". - format(self._node['host'])) + logger.error( + f"VPP not running on node {self._node[u'host']}." + ) + raise RuntimeError( + f"Failed to close VAT console " + f"on node {self._node[u'host']}" + ) try: self._ssh.interactive_terminal_close(self._tty) - except: - raise RuntimeError("Cannot close interactive terminal on node {0}". - format(self._node['host'])) + except Exception: + raise RuntimeError( + f"Cannot close interactive terminal " + f"on node {self._node[u'host']}" + ) def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args): """Execute VAT script from a file. @@ -367,12 +385,12 @@ class VatTerminal(object): :param args: Dictionary of parameters for VAT script. :returns: List of JSON objects returned by VAT. """ - file_path = '{}/{}'.format(Constants.RESOURCES_TPL_VAT, - vat_template_file) - with open(file_path, 'r') as template_file: + file_path = f"{Constants.RESOURCES_TPL_VAT}/{vat_template_file}" + + with open(file_path, "r") as template_file: cmd_template = template_file.readlines() - ret = [] + ret = list() for line_tmpl in cmd_template: vat_cmd = line_tmpl.format(**args) - ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace('\n', ''))) + ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace(u"\n", u""))) return ret diff --git a/resources/libraries/python/VatJsonUtil.py b/resources/libraries/python/VatJsonUtil.py index ef92a24f07..594d3e2f39 100644 --- a/resources/libraries/python/VatJsonUtil.py +++ b/resources/libraries/python/VatJsonUtil.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -18,7 +18,7 @@ from robot.api import logger from resources.libraries.python.parsers.JsonParser import JsonParser -class VatJsonUtil(object): +class VatJsonUtil: """Utilities to work with JSON data format from VAT.""" @staticmethod @@ -33,8 +33,8 @@ class VatJsonUtil(object): :returns: List representation of MAC address. :rtype: list """ - list_mac = [] - for num in mac_address.split(":"): + list_mac = list() + for num in mac_address.split(u":"): list_mac.append(int(num, 16)) return list_mac @@ -52,26 +52,29 @@ class VatJsonUtil(object): :returns: Interface from JSON. :rtype: dict """ - interface_dict = {} + interface_dict = dict() list_mac_address = VatJsonUtil._convert_mac_to_number_list(mac_address) - logger.trace("MAC address {0} converted to list {1}." - .format(mac_address, list_mac_address)) + logger.trace( + f"MAC address {mac_address} converted to list {list_mac_address}." + ) for interface in interfaces_list: # TODO: create vat json integrity checking and move there - if "l2_address" not in interface: + if u"l2_address" not in interface: raise KeyError( - "key l2_address not found in interface dict." - "Probably input list is not parsed from correct VAT " - "json output.") - if "l2_address_length" not in interface: + u"key l2_address not found in interface dict." + u"Probably input list is not parsed from correct VAT " + u"json output." + ) + if u"l2_address_length" not in interface: raise KeyError( - "key l2_address_length not found in interface " - "dict. Probably input list is not parsed from correct " - "VAT json output.") - mac_from_json = interface["l2_address"][:6] + u"key l2_address_length not found in interface " + u"dict. Probably input list is not parsed from correct " + u"VAT json output." + ) + mac_from_json = interface[u"l2_address"][:6] if mac_from_json == list_mac_address: - if interface["l2_address_length"] != 6: - raise ValueError("l2_address_length value is not 6.") + if interface[u"l2_address_length"] != 6: + raise ValueError(u"l2_address_length value is not 6.") interface_dict = interface break return interface_dict @@ -90,18 +93,18 @@ class VatJsonUtil(object): :type interface_dump_json: str """ interface_list = JsonParser().parse_data(interface_dump_json) - for ifc in node['interfaces'].values(): - if_mac = ifc['mac_address'] + for ifc in node[u"interfaces"].values(): + if_mac = ifc[u"mac_address"] interface_dict = VatJsonUtil.get_vpp_interface_by_mac( - interface_list, if_mac) + interface_list, if_mac + ) if not interface_dict: - logger.trace('Interface {0} not found by MAC {1}' - .format(ifc, if_mac)) - ifc['vpp_sw_index'] = None + logger.trace(f"Interface {ifc} not found by MAC {if_mac}") + ifc[u"vpp_sw_index"] = None continue - ifc['name'] = interface_dict["interface_name"] - ifc['vpp_sw_index'] = interface_dict["sw_if_index"] - ifc['mtu'] = interface_dict["mtu"] + ifc[u"name"] = interface_dict[u"interface_name"] + ifc[u"vpp_sw_index"] = interface_dict[u"sw_if_index"] + ifc[u"mtu"] = interface_dict[u"mtu"] @staticmethod def get_interface_sw_index_from_json(interface_dump_json, interface_name): @@ -120,15 +123,16 @@ class VatJsonUtil(object): interface_list = JsonParser().parse_data(interface_dump_json) for interface in interface_list: try: - if interface['interface_name'] == interface_name: - index = interface['sw_if_index'] - logger.debug('Interface with name {} has sw_if_index {}.' - .format(interface_name, index)) + if interface[u"interface_name"] == interface_name: + index = interface[u"sw_if_index"] + logger.debug( + f"Interface with name {interface_name} " + f"has sw_if_index {index}." + ) return index except KeyError: pass - raise ValueError('Interface with name {} not found.' - .format(interface_name)) + raise ValueError(f"Interface with name {interface_name} not found.") @staticmethod def get_interface_name_from_json(interface_dump_json, sw_if_index): @@ -147,16 +151,16 @@ class VatJsonUtil(object): interface_list = JsonParser().parse_data(interface_dump_json) for interface in interface_list: try: - if interface['sw_if_index'] == sw_if_index: - interface_name = interface['interface_name'] - logger.debug('Interface with sw_if_index {idx} has name' - ' {name}.'.format(idx=sw_if_index, - name=interface_name)) + if interface[u"sw_if_index"] == sw_if_index: + interface_name = interface[u"interface_name"] + logger.debug( + f"Interface with sw_if_index {sw_if_index} " + f"has name {interface_name}." + ) return interface_name except KeyError: pass - raise ValueError('Interface with sw_if_index {} not found.' - .format(sw_if_index)) + raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.") @staticmethod def get_interface_mac_from_json(interface_dump_json, sw_if_index): @@ -175,22 +179,23 @@ class VatJsonUtil(object): interface_list = JsonParser().parse_data(interface_dump_json) for interface in interface_list: try: - if interface['sw_if_index'] == sw_if_index: - mac_from_json = interface['l2_address'][:6] \ - if 'l2_address' in interface.keys() else '' - mac_address = ':'.join('{:02x}'.format(item) - for item in mac_from_json) - logger.debug('Interface with sw_if_index {idx} has MAC' - ' address {addr}.'.format(idx=sw_if_index, - addr=mac_address)) + if interface[u"sw_if_index"] == sw_if_index: + mac_from_json = interface[u"l2_address"][:6] \ + if u"l2_address" in list(interface.keys()) else u"" + mac_address = u":".join( + f"{item:02x}" for item in mac_from_json + ) + logger.debug( + f"Interface with sw_if_index {sw_if_index} " + f"has MAC address {mac_address}." + ) return mac_address except KeyError: pass - raise ValueError('Interface with sw_if_index {idx} not found.' - .format(idx=sw_if_index)) + raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.") @staticmethod - def verify_vat_retval(vat_out, exp_retval=0, err_msg='VAT cmd failed'): + def verify_vat_retval(vat_out, exp_retval=0, err_msg=u"VAT cmd failed"): """Verify return value of VAT command. VAT command JSON output should be object (dict in python) or array. We @@ -207,7 +212,7 @@ class VatJsonUtil(object): :raises RuntimeError: If VAT command return value is incorrect. """ if isinstance(vat_out, dict): - retval = vat_out.get('retval') + retval = vat_out.get(u"retval") if retval is not None: if retval != exp_retval: raise RuntimeError(err_msg) diff --git a/resources/libraries/python/VhostUser.py b/resources/libraries/python/VhostUser.py index a24bc97633..48cce002bd 100644 --- a/resources/libraries/python/VhostUser.py +++ b/resources/libraries/python/VhostUser.py @@ -20,7 +20,7 @@ from resources.libraries.python.topology import NodeType, Topology from resources.libraries.python.InterfaceUtil import InterfaceUtil -class VhostUser(object): +class VhostUser: """Vhost-user interfaces L1 library.""" @staticmethod @@ -33,15 +33,16 @@ class VhostUser(object): response. :rtype: list """ - cmd = "sw_interface_vhost_user_dump" + cmd = u"sw_interface_vhost_user_dump" + with PapiSocketExecutor(node) as papi_exec: details = papi_exec.add(cmd).get_details() for vhost in details: - vhost["interface_name"] = vhost["interface_name"].rstrip('\x00') - vhost["sock_filename"] = vhost["sock_filename"].rstrip('\x00') + vhost[u"interface_name"] = vhost[u"interface_name"] + vhost[u"sock_filename"] = vhost[u"sock_filename"] - logger.debug("VhostUser details:\n{details}".format(details=details)) + logger.debug(f"VhostUser details:\n{details}") return details @@ -56,17 +57,18 @@ class VhostUser(object): :returns: SW interface index. :rtype: int """ - cmd = 'create_vhost_user_if' - err_msg = 'Failed to create Vhost-user interface on host {host}'.format( - host=node['host']) + cmd = u"create_vhost_user_if" + err_msg = f"Failed to create Vhost-user interface " \ + f"on host {node[u'host']}" args = dict( - sock_filename=str(socket) + sock_filename=str(socket).encode(encoding=u"utf-8") ) + with PapiSocketExecutor(node) as papi_exec: sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg) # Update the Topology: - if_key = Topology.add_new_port(node, 'vhost') + if_key = Topology.add_new_port(node, u"vhost") Topology.update_interface_sw_if_index(node, if_key, sw_if_index) ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index) @@ -90,9 +92,9 @@ class VhostUser(object): :returns: Interface name or None if not found. :rtype: str """ - for interface in node['interfaces'].values(): - if interface.get('socket') == socket: - return interface.get('name') + for interface in node[u"interfaces"].values(): + if interface.get(u"socket") == socket: + return interface.get(u"name") return None @staticmethod @@ -107,7 +109,6 @@ class VhostUser(object): :returns: l2_address of the given interface. :rtype: str """ - return InterfaceUtil.vpp_get_interface_mac(node, sw_if_index) @staticmethod @@ -127,5 +128,23 @@ class VhostUser(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VhostUser.vpp_show_vhost(node) + + @staticmethod + def vhost_user_dump(node): + """Get vhost-user data for the given node. + + :param node: VPP node to get interface data from. + :type node: dict + :returns: List of dictionaries with all vhost-user interfaces. + :rtype: list + """ + cmd = u"sw_interface_vhost_user_dump" + err_msg = f"Failed to get vhost-user dump on host {node['host']}" + + with PapiSocketExecutor(node) as papi_exec: + details = papi_exec.add(cmd).get_details(err_msg) + + logger.debug(f"Vhost-user details:\n{details}") + return details diff --git a/resources/libraries/python/VppApiCrc.py b/resources/libraries/python/VppApiCrc.py index 8f2979eaec..d55058e431 100644 --- a/resources/libraries/python/VppApiCrc.py +++ b/resources/libraries/python/VppApiCrc.py @@ -21,23 +21,24 @@ from robot.api import logger from resources.libraries.python.Constants import Constants + def _str(text): - """Convert from possible unicode without interpreting as number. + """Convert from possible bytes without interpreting as number. :param text: Input to convert. :type text: str or unicode :returns: Converted text. :rtype: str """ - return text.encode("utf-8") if isinstance(text, unicode) else text + return text.decode(u"utf-8") if isinstance(text, bytes) else text -class VppApiCrcChecker(object): +class VppApiCrcChecker: """Holder of data related to tracking VPP API CRCs. Both message names and crc hexa strings are tracked as - ordinary Python2 (bytes) string, so _str() is used when input is - possibly unicode or otherwise not safe. + ordinary Python3 (unicode) string, so _str() is used when input is + possibly bytes or otherwise not safe. Each instance of this class starts with same default state, so make sure the calling libraries have appropriate robot library scope. @@ -62,9 +63,9 @@ class VppApiCrcChecker(object): self._expected = dict() """Mapping from collection name to mapping from API name to CRC string. - Colection name should be something useful for logging. + Collection name should be something useful for logging. - Order of addition reflects the order colections should be queried. + Order of addition reflects the order collections should be queried. If an incompatible CRC is found, affected collections are removed. A CRC that would remove all does not, added to _reported instead, while causing a failure in single test (if fail_on_mismatch).""" @@ -100,7 +101,7 @@ class VppApiCrcChecker(object): """Log to console, on fail_on_mismatch also raise runtime exception. :param exc_msg: The message to include in log or exception. - :type exception: str + :type exc_msg: str :raises RuntimeError: With the message, if fail_on_mismatch. """ logger.console("RuntimeError:\n{m}".format(m=exc_msg)) @@ -118,8 +119,9 @@ class VppApiCrcChecker(object): """ collection_name = _str(collection_name) if collection_name in self._expected: - raise RuntimeError("Collection {cn!r} already registered.".format( - cn=collection_name)) + raise RuntimeError( + f"Collection {collection_name!r} already registered." + ) mapping = {_str(k): _str(v) for k, v in name_to_crc_mapping.items()} self._expected[collection_name] = mapping self._missing[collection_name] = mapping.copy() @@ -128,10 +130,10 @@ class VppApiCrcChecker(object): """Add all collections this CSIT codebase is tested against.""" file_path = os.path.normpath(os.path.join( - os.path.dirname(os.path.abspath(__file__)), "..", "..", - "api", "vpp", "supported_crcs.yaml")) + os.path.dirname(os.path.abspath(__file__)), u"..", u"..", + u"api", u"vpp", u"supported_crcs.yaml")) with open(file_path, "r") as file_in: - collections_dict = yaml.load(file_in.read()) + collections_dict = yaml.safe_load(file_in.read()) for collection_name, name_to_crc_mapping in collections_dict.items(): self._register_collection(collection_name, name_to_crc_mapping) @@ -149,8 +151,7 @@ class VppApiCrcChecker(object): if isinstance(item, (dict, list)): continue return _str(item) - raise RuntimeError("No name found for message: {obj!r}".format( - obj=msg_obj)) + raise RuntimeError(f"No name found for message: {msg_obj!r}") @staticmethod def _get_crc(msg_obj): @@ -165,11 +166,10 @@ class VppApiCrcChecker(object): for item in reversed(msg_obj): if not isinstance(item, dict): continue - crc = item.get("crc", None) + crc = item.get(u"crc", None) if crc: return _str(crc) - raise RuntimeError("No CRC found for message: {obj!r}".format( - obj=msg_obj)) + raise RuntimeError(f"No CRC found for message: {msg_obj!r}") def _process_crc(self, api_name, crc): """Compare API to verified collections, update class state. @@ -220,7 +220,7 @@ class VppApiCrcChecker(object): self._expected = new_expected self._missing = {name: self._missing[name] for name in new_expected} return - # No new_expected means some colections knew the api_name, + # No new_expected means some collections knew the api_name, # but CRC does not match any. This has to be reported. self._reported[api_name] = crc @@ -240,17 +240,16 @@ class VppApiCrcChecker(object): """ for root, _, files in os.walk(directory): for filename in files: - if not filename.endswith(".api.json"): + if not filename.endswith(u".api.json"): continue - with open(root + '/' + filename, "r") as file_in: + with open(f"{root}/{filename}", "r") as file_in: json_obj = json.load(file_in) - msgs = json_obj["messages"] + msgs = json_obj[u"messages"] for msg_obj in msgs: msg_name = self._get_name(msg_obj) msg_crc = self._get_crc(msg_obj) self._process_crc(msg_name, msg_crc) - logger.debug("Surviving collections: {col!r}".format( - col=self._expected.keys())) + logger.debug(f"Surviving collections: {self._expected.keys()!r}") def report_initial_conflicts(self, report_missing=False): """Report issues discovered by _check_dir, if not done that already. @@ -275,19 +274,23 @@ class VppApiCrcChecker(object): self._initial_conflicts_reported = True if self._reported: reported_indented = json.dumps( - self._reported, indent=1, sort_keys=True, separators=[",", ":"]) + self._reported, indent=1, sort_keys=True, + separators=[u",", u":"] + ) self._reported = dict() self.log_and_raise( - "Incompatible API CRCs found in .api.json files:\n{ri}".format( - ri=reported_indented)) + f"Incompatible API CRCs found in .api.json files:\n" + f"{reported_indented}" + ) if not report_missing: return missing = {name: mapp for name, mapp in self._missing.items() if mapp} if missing: missing_indented = json.dumps( - missing, indent=1, sort_keys=True, separators=[",", ":"]) - self.log_and_raise("API CRCs missing from .api.json:\n{mi}".format( - mi=missing_indented)) + missing, indent=1, sort_keys=True, separators=[u",", u":"]) + self.log_and_raise( + f"API CRCs missing from .api.json:\n{missing_indented}" + ) def check_api_name(self, api_name): """Fail if the api_name has no, or different from known CRC associated. @@ -295,9 +298,9 @@ class VppApiCrcChecker(object): Do not fail if this particular failure has been already reported. Intended use: Call during test (not in initialization), - everytime an API call is queued or response received. + every time an API call is queued or response received. - :param api_name: VPP API messagee name to check. + :param api_name: VPP API message name to check. :type api_name: str or unicode :raises RuntimeError: If no verified CRC for the api_name is found. """ @@ -328,5 +331,6 @@ class VppApiCrcChecker(object): if matching: return self._reported[api_name] = crc - self.log_and_raise("No active collection contains API {api!r}" - " with CRC {crc!r}".format(api=api_name, crc=crc)) + self.log_and_raise( + f"No active collection contains API {api_name!r} with CRC {crc!r}" + ) diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py index 15e8ffb3b5..88fbb317c4 100644 --- a/resources/libraries/python/VppConfigGenerator.py +++ b/resources/libraries/python/VppConfigGenerator.py @@ -26,7 +26,7 @@ from resources.libraries.python.topology import NodeType from resources.libraries.python.topology import Topology from resources.libraries.python.VPPUtil import VPPUtil -__all__ = ['VppConfigGenerator'] +__all__ = [u"VppConfigGenerator"] def pci_dev_check(pci_dev): @@ -38,33 +38,35 @@ def pci_dev_check(pci_dev): :rtype: bool :raises ValueError: If PCI address is in incorrect format. """ - pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:" - "[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$") - if not pattern.match(pci_dev): - raise ValueError('PCI address {addr} is not in valid format ' - 'xxxx:xx:xx.x'.format(addr=pci_dev)) + pattern = re.compile( + r"^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:[0-9A-Fa-f]{2}\.[0-9A-Fa-f]$" + ) + if not re.match(pattern, pci_dev): + raise ValueError( + f"PCI address {pci_dev} is not in valid format xxxx:xx:xx.x" + ) return True -class VppConfigGenerator(object): +class VppConfigGenerator: """VPP Configuration File Generator.""" def __init__(self): """Initialize library.""" # VPP Node to apply configuration on - self._node = '' + self._node = u"" # VPP Hostname - self._hostname = '' + self._hostname = u"" # VPP Configuration - self._nodeconfig = {} + self._nodeconfig = dict() # Serialized VPP Configuration - self._vpp_config = '' + self._vpp_config = u"" # VPP Service name - self._vpp_service_name = 'vpp' + self._vpp_service_name = u"vpp" # VPP Logfile location - self._vpp_logfile = '/tmp/vpe.log' + self._vpp_logfile = u"/tmp/vpe.log" # VPP Startup config location - self._vpp_startup_conf = '/etc/vpp/startup.conf' + self._vpp_startup_conf = u"/etc/vpp/startup.conf" # VPP Startup config backup location self._vpp_startup_conf_backup = None @@ -75,9 +77,10 @@ class VppConfigGenerator(object): :type node: dict :raises RuntimeError: If Node type is not DUT. """ - if node['type'] != NodeType.DUT: - raise RuntimeError('Startup config can only be applied to DUT' - 'node.') + if node[u"type"] != NodeType.DUT: + raise RuntimeError( + u"Startup config can only be applied to DUTnode." + ) self._node = node self._hostname = Topology.get_node_hostname(node) @@ -89,7 +92,7 @@ class VppConfigGenerator(object): """ self._vpp_logfile = logfile - def set_vpp_startup_conf_backup(self, backup='/etc/vpp/startup.backup'): + def set_vpp_startup_conf_backup(self, backup=u"/etc/vpp/startup.backup"): """Set VPP startup configuration backup. :param backup: VPP logfile location. @@ -120,10 +123,10 @@ class VppConfigGenerator(object): config[path[0]] = value return if path[0] not in config: - config[path[0]] = {} + config[path[0]] = dict() elif isinstance(config[path[0]], str): - config[path[0]] = {} if config[path[0]] == '' \ - else {config[path[0]]: ''} + config[path[0]] = dict() if config[path[0]] == u"" \ + else {config[path[0]]: u""} self.add_config_item(config[path[0]], value, path[1:]) def dump_config(self, obj, level=-1): @@ -135,23 +138,21 @@ class VppConfigGenerator(object): :type level: int :returns: nothing """ - indent = ' ' + indent = u" " if level >= 0: - self._vpp_config += '{}{{\n'.format((level) * indent) + self._vpp_config += f"{level * indent}{{\n" if isinstance(obj, dict): for key, val in obj.items(): - if hasattr(val, '__iter__'): - self._vpp_config += '{}{}\n'.format((level + 1) * indent, - key) + if hasattr(val, u"__iter__") and not isinstance(val, str): + self._vpp_config += f"{(level + 1) * indent}{key}\n" self.dump_config(val, level + 1) else: - self._vpp_config += '{}{} {}\n'.format((level + 1) * indent, - key, val) + self._vpp_config += f"{(level + 1) * indent}{key} {val}\n" else: for val in obj: - self._vpp_config += '{}{}\n'.format((level + 1) * indent, val) + self._vpp_config += f"{(level + 1) * indent}{val}\n" if level >= 0: - self._vpp_config += '{}}}\n'.format(level * indent) + self._vpp_config += f"{level * indent}}}\n" def add_unix_log(self, value=None): """Add UNIX log configuration. @@ -159,56 +160,56 @@ class VppConfigGenerator(object): :param value: Log file. :type value: str """ - path = ['unix', 'log'] + path = [u"unix", u"log"] if value is None: value = self._vpp_logfile self.add_config_item(self._nodeconfig, value, path) - def add_unix_cli_listen(self, value='/run/vpp/cli.sock'): + def add_unix_cli_listen(self, value=u"/run/vpp/cli.sock"): """Add UNIX cli-listen configuration. :param value: CLI listen address and port or path to CLI socket. :type value: str """ - path = ['unix', 'cli-listen'] + path = [u"unix", u"cli-listen"] self.add_config_item(self._nodeconfig, value, path) - def add_unix_gid(self, value='vpp'): + def add_unix_gid(self, value=u"vpp"): """Add UNIX gid configuration. :param value: Gid. :type value: str """ - path = ['unix', 'gid'] + path = [u"unix", u"gid"] self.add_config_item(self._nodeconfig, value, path) def add_unix_nodaemon(self): """Add UNIX nodaemon configuration.""" - path = ['unix', 'nodaemon'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"unix", u"nodaemon"] + self.add_config_item(self._nodeconfig, u"", path) def add_unix_coredump(self): """Add UNIX full-coredump configuration.""" - path = ['unix', 'full-coredump'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"unix", u"full-coredump"] + self.add_config_item(self._nodeconfig, u"", path) def add_unix_exec(self, value): """Add UNIX exec configuration.""" - path = ['unix', 'exec'] + path = [u"unix", u"exec"] self.add_config_item(self._nodeconfig, value, path) def add_socksvr(self, socket=Constants.SOCKSVR_PATH): """Add socksvr configuration.""" - path = ['socksvr', 'socket-name'] + path = ['socksvr', u"socket-name"] self.add_config_item(self._nodeconfig, socket, path) - def add_api_segment_gid(self, value='vpp'): + def add_api_segment_gid(self, value=u"vpp"): """Add API-SEGMENT gid configuration. :param value: Gid. :type value: str """ - path = ['api-segment', 'gid'] + path = [u"api-segment", u"gid"] self.add_config_item(self._nodeconfig, value, path) def add_api_segment_global_size(self, value): @@ -217,7 +218,7 @@ class VppConfigGenerator(object): :param value: Global size. :type value: str """ - path = ['api-segment', 'global-size'] + path = [u"api-segment", u"global-size"] self.add_config_item(self._nodeconfig, value, path) def add_api_segment_api_size(self, value): @@ -226,7 +227,7 @@ class VppConfigGenerator(object): :param value: API size. :type value: str """ - path = ['api-segment', 'api-size'] + path = [u"api-segment", u"api-size"] self.add_config_item(self._nodeconfig, value, path) def add_buffers_per_numa(self, value): @@ -235,7 +236,7 @@ class VppConfigGenerator(object): :param value: Number of buffers allocated. :type value: int """ - path = ['buffers', 'buffers-per-numa'] + path = [u"buffers", u"buffers-per-numa"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev(self, *devices): @@ -246,8 +247,8 @@ class VppConfigGenerator(object): """ for device in devices: if pci_dev_check(device): - path = ['dpdk', 'dev {0}'.format(device)] - self.add_config_item(self._nodeconfig, '', path) + path = [u"dpdk", f"dev {device}"] + self.add_config_item(self._nodeconfig, u"", path) def add_dpdk_dev_parameter(self, device, parameter, value): """Add parameter for DPDK device. @@ -260,7 +261,7 @@ class VppConfigGenerator(object): :type value: str """ if pci_dev_check(device): - path = ['dpdk', 'dev {0}'.format(device), parameter] + path = [u"dpdk", f"dev {device}", parameter] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_cryptodev(self, count): @@ -271,11 +272,10 @@ class VppConfigGenerator(object): """ cryptodev = Topology.get_cryptodev(self._node) for i in range(count): - cryptodev_config = 'dev {0}'.format( - re.sub(r'\d.\d$', '1.'+str(i), cryptodev)) - path = ['dpdk', cryptodev_config] - self.add_config_item(self._nodeconfig, '', path) - self.add_dpdk_uio_driver('vfio-pci') + cryptodev_config = re.sub(r"\d.\d$", f"1.{str(i)}", cryptodev) + path = [u"dpdk", f"dev {cryptodev_config}"] + self.add_config_item(self._nodeconfig, u"", path) + self.add_dpdk_uio_driver(u"vfio-pci") def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count): """Add DPDK SW Crypto device configuration. @@ -288,10 +288,10 @@ class VppConfigGenerator(object): :type count: int """ for _ in range(count): - cryptodev_config = 'vdev cryptodev_{0}_pmd,socket_id={1}'.\ - format(sw_pmd_type, str(socket_id)) - path = ['dpdk', cryptodev_config] - self.add_config_item(self._nodeconfig, '', path) + cryptodev_config = f"vdev cryptodev_{sw_pmd_type}_pmd," \ + f"socket_id={str(socket_id)}" + path = [u"dpdk", cryptodev_config] + self.add_config_item(self._nodeconfig, u"", path) def add_dpdk_eth_bond_dev(self, ethbond_id, mode, xmit_policy, *slaves): """Add DPDK Eth_bond device configuration. @@ -305,16 +305,13 @@ class VppConfigGenerator(object): :type xmit_policy: str :type slaves: list """ - slaves_config = ',slave=' + \ - ',slave='.join(slave if pci_dev_check(slave) else '' - for slave in slaves) - ethbond_config = 'vdev eth_bond{id},mode={mode}{slaves},' \ - 'xmit_policy={xmit_pol}'.format(id=ethbond_id, - mode=mode, - slaves=slaves_config, - xmit_pol=xmit_policy) - path = ['dpdk', ethbond_config] - self.add_config_item(self._nodeconfig, '', path) + slaves_config = u"slave=" + u",slave=".join( + slave if pci_dev_check(slave) else u"" for slave in slaves + ) + ethbond_config = f"vdev eth_bond{ethbond_id}," \ + f"mode={mode}{slaves_config},xmit_policy={xmit_policy}" + path = [u"dpdk", ethbond_config] + self.add_config_item(self._nodeconfig, u"", path) def add_dpdk_dev_default_rxq(self, value): """Add DPDK dev default rxq configuration. @@ -322,7 +319,7 @@ class VppConfigGenerator(object): :param value: Default number of rxqs. :type value: str """ - path = ['dpdk', 'dev default', 'num-rx-queues'] + path = [u"dpdk", u"dev default", u"num-rx-queues"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_txq(self, value): @@ -331,7 +328,7 @@ class VppConfigGenerator(object): :param value: Default number of txqs. :type value: str """ - path = ['dpdk', 'dev default', 'num-tx-queues'] + path = [u"dpdk", u"dev default", u"num-tx-queues"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_rxd(self, value): @@ -340,7 +337,7 @@ class VppConfigGenerator(object): :param value: Default number of rxds. :type value: str """ - path = ['dpdk', 'dev default', 'num-rx-desc'] + path = [u"dpdk", u"dev default", u"num-rx-desc"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_txd(self, value): @@ -349,7 +346,7 @@ class VppConfigGenerator(object): :param value: Default number of txds. :type value: str """ - path = ['dpdk', 'dev default', 'num-tx-desc'] + path = [u"dpdk", u"dev default", u"num-tx-desc"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_log_level(self, value): @@ -358,25 +355,25 @@ class VppConfigGenerator(object): :param value: Log level. :type value: str """ - path = ['dpdk', 'log-level'] + path = [u"dpdk", u"log-level"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_no_pci(self): """Add DPDK no-pci.""" - path = ['dpdk', 'no-pci'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"dpdk", u"no-pci"] + self.add_config_item(self._nodeconfig, u"", path) def add_dpdk_uio_driver(self, value=None): """Add DPDK uio-driver configuration. :param value: DPDK uio-driver configuration. By default, driver will be - loaded automatically from Topology file, still leaving - option to manually override by parameter. + loaded automatically from Topology file, still leaving option + to manually override by parameter. :type value: str """ if value is None: value = Topology.get_uio_driver(self._node) - path = ['dpdk', 'uio-driver'] + path = [u"dpdk", u"uio-driver"] self.add_config_item(self._nodeconfig, value, path) def add_cpu_main_core(self, value): @@ -385,7 +382,7 @@ class VppConfigGenerator(object): :param value: Main core option. :type value: str """ - path = ['cpu', 'main-core'] + path = [u"cpu", u"main-core"] self.add_config_item(self._nodeconfig, value, path) def add_cpu_corelist_workers(self, value): @@ -394,7 +391,7 @@ class VppConfigGenerator(object): :param value: Corelist-workers option. :type value: str """ - path = ['cpu', 'corelist-workers'] + path = [u"cpu", u"corelist-workers"] self.add_config_item(self._nodeconfig, value, path) def add_heapsize(self, value): @@ -403,13 +400,13 @@ class VppConfigGenerator(object): :param value: Amount of heapsize. :type value: str """ - path = ['heapsize'] + path = [u"heapsize"] self.add_config_item(self._nodeconfig, value, path) def add_api_trace(self): """Add API trace configuration.""" - path = ['api-trace', 'on'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"api-trace", u"on"] + self.add_config_item(self._nodeconfig, u"", path) def add_ip6_hash_buckets(self, value): """Add IP6 hash buckets configuration. @@ -417,7 +414,7 @@ class VppConfigGenerator(object): :param value: Number of IP6 hash buckets. :type value: str """ - path = ['ip6', 'hash-buckets'] + path = [u"ip6", u"hash-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_ip6_heap_size(self, value): @@ -426,7 +423,7 @@ class VppConfigGenerator(object): :param value: IP6 Heapsize amount. :type value: str """ - path = ['ip6', 'heap-size'] + path = [u"ip6", u"heap-size"] self.add_config_item(self._nodeconfig, value, path) def add_ip_heap_size(self, value): @@ -435,7 +432,7 @@ class VppConfigGenerator(object): :param value: IP Heapsize amount. :type value: str """ - path = ['ip', 'heap-size'] + path = [u"ip", u"heap-size"] self.add_config_item(self._nodeconfig, value, path) def add_statseg_size(self, value): @@ -444,7 +441,7 @@ class VppConfigGenerator(object): :param value: Stats heapsize amount. :type value: str """ - path = ['statseg', 'size'] + path = [u"statseg", u"size"] self.add_config_item(self._nodeconfig, value, path) def add_statseg_per_node_counters(self, value): @@ -453,7 +450,7 @@ class VppConfigGenerator(object): :param value: "on" to switch the counters on. :type value: str """ - path = ['statseg', 'per-node-counters'] + path = [u"statseg", u"per-node-counters"] self.add_config_item(self._nodeconfig, value, path) def add_plugin(self, state, *plugins): @@ -465,26 +462,26 @@ class VppConfigGenerator(object): :type plugins: list """ for plugin in plugins: - path = ['plugins', 'plugin {0}'.format(plugin), state] - self.add_config_item(self._nodeconfig, ' ', path) + path = [u"plugins", f"plugin {plugin}", state] + self.add_config_item(self._nodeconfig, u" ", path) def add_dpdk_no_multi_seg(self): """Add DPDK no-multi-seg configuration.""" - path = ['dpdk', 'no-multi-seg'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"dpdk", u"no-multi-seg"] + self.add_config_item(self._nodeconfig, u"", path) def add_dpdk_no_tx_checksum_offload(self): """Add DPDK no-tx-checksum-offload configuration.""" - path = ['dpdk', 'no-tx-checksum-offload'] - self.add_config_item(self._nodeconfig, '', path) + path = [u"dpdk", u"no-tx-checksum-offload"] + self.add_config_item(self._nodeconfig, u"", path) - def add_nat(self, value='deterministic'): + def add_nat(self, value=u"deterministic"): """Add NAT configuration. :param value: NAT mode. :type value: str """ - path = ['nat'] + path = [u"nat"] self.add_config_item(self._nodeconfig, value, path) def add_tcp_preallocated_connections(self, value): @@ -493,7 +490,7 @@ class VppConfigGenerator(object): :param value: The number of pre-allocated connections. :type value: int """ - path = ['tcp', 'preallocated-connections'] + path = [u"tcp", u"preallocated-connections"] self.add_config_item(self._nodeconfig, value, path) def add_tcp_preallocated_half_open_connections(self, value): @@ -502,7 +499,7 @@ class VppConfigGenerator(object): :param value: The number of pre-allocated half open connections. :type value: int """ - path = ['tcp', 'preallocated-half-open-connections'] + path = [u"tcp", u"preallocated-half-open-connections"] self.add_config_item(self._nodeconfig, value, path) def add_session_event_queue_length(self, value): @@ -511,7 +508,7 @@ class VppConfigGenerator(object): :param value: Session event queue length. :type value: int """ - path = ['session', 'event-queue-length'] + path = [u"session", u"event-queue-length"] self.add_config_item(self._nodeconfig, value, path) def add_session_preallocated_sessions(self, value): @@ -520,7 +517,7 @@ class VppConfigGenerator(object): :param value: Number of pre-allocated sessions. :type value: int """ - path = ['session', 'preallocated-sessions'] + path = [u"session", u"preallocated-sessions"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_session_table_buckets(self, value): @@ -529,7 +526,7 @@ class VppConfigGenerator(object): :param value: Number of v4 session table buckets. :type value: int """ - path = ['session', 'v4-session-table-buckets'] + path = [u"session", u"v4-session-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_session_table_memory(self, value): @@ -538,7 +535,7 @@ class VppConfigGenerator(object): :param value: Size of v4 session table memory. :type value: str """ - path = ['session', 'v4-session-table-memory'] + path = [u"session", u"v4-session-table-memory"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_halfopen_table_buckets(self, value): @@ -547,7 +544,7 @@ class VppConfigGenerator(object): :param value: Number of v4 halfopen table buckets. :type value: int """ - path = ['session', 'v4-halfopen-table-buckets'] + path = [u"session", u"v4-halfopen-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_halfopen_table_memory(self, value): @@ -556,7 +553,7 @@ class VppConfigGenerator(object): :param value: Size of v4 halfopen table memory. :type value: str """ - path = ['session', 'v4-halfopen-table-memory'] + path = [u"session", u"v4-halfopen-table-memory"] self.add_config_item(self._nodeconfig, value, path) def add_session_local_endpoints_table_buckets(self, value): @@ -565,7 +562,7 @@ class VppConfigGenerator(object): :param value: Number of local endpoints table buckets. :type value: int """ - path = ['session', 'local-endpoints-table-buckets'] + path = [u"session", u"local-endpoints-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_local_endpoints_table_memory(self, value): @@ -574,7 +571,7 @@ class VppConfigGenerator(object): :param value: Size of local endpoints table memory. :type value: str """ - path = ['session', 'local-endpoints-table-memory'] + path = [u"session", u"local-endpoints-table-memory"] self.add_config_item(self._nodeconfig, value, path) def write_config(self, filename=None): @@ -592,15 +589,15 @@ class VppConfigGenerator(object): filename = self._vpp_startup_conf if self._vpp_startup_conf_backup is not None: - cmd = ('cp {src} {dest}'.format( - src=self._vpp_startup_conf, dest=self._vpp_startup_conf_backup)) + cmd = f"cp {self._vpp_startup_conf} {self._vpp_startup_conf_backup}" exec_cmd_no_error( - self._node, cmd, sudo=True, message='Copy config file failed!') + self._node, cmd, sudo=True, message=u"Copy config file failed!" + ) - cmd = ('echo "{config}" | sudo tee {filename}'.format( - config=self._vpp_config, filename=filename)) + cmd = f"echo \"{self._vpp_config}\" | sudo tee {filename}" exec_cmd_no_error( - self._node, cmd, message='Writing config file failed!') + self._node, cmd, message=u"Writing config file failed!" + ) def apply_config(self, filename=None, verify_vpp=True): """Generate and write VPP startup configuration to file and restart VPP. @@ -621,7 +618,7 @@ class VppConfigGenerator(object): def restore_config(self): """Restore VPP startup.conf from backup.""" - cmd = ('cp {src} {dest}'.format( - src=self._vpp_startup_conf_backup, dest=self._vpp_startup_conf)) + cmd = f"cp {self._vpp_startup_conf_backup} {self._vpp_startup_conf}" exec_cmd_no_error( - self._node, cmd, sudo=True, message='Copy config file failed!') + self._node, cmd, sudo=True, message=u"Copy config file failed!" + ) diff --git a/resources/libraries/python/VppCounters.py b/resources/libraries/python/VppCounters.py index c115efcdb5..bb8a8d2c28 100644 --- a/resources/libraries/python/VppCounters.py +++ b/resources/libraries/python/VppCounters.py @@ -16,12 +16,13 @@ from pprint import pformat from robot.api import logger -from resources.libraries.python.PapiExecutor import PapiExecutor -from resources.libraries.python.PapiExecutor import PapiSocketExecutor + +from resources.libraries.python.PapiExecutor import PapiExecutor, \ + PapiSocketExecutor from resources.libraries.python.topology import Topology, SocketType, NodeType -class VppCounters(object): +class VppCounters: """VPP counters utilities.""" def __init__(self): @@ -34,8 +35,7 @@ class VppCounters(object): :param node: Node to run command on. :type node: dict """ - PapiSocketExecutor.run_cli_cmd_on_all_sockets( - node, 'show errors') + PapiSocketExecutor.run_cli_cmd_on_all_sockets(node, u"show errors") @staticmethod def vpp_show_errors_on_all_duts(nodes): @@ -45,7 +45,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.vpp_show_errors(node) @staticmethod @@ -57,46 +57,46 @@ class VppCounters(object): :type node: dict :type log_zeros: bool """ - args = dict(path='^/sys/node') + args = dict(path=u"^/sys/node") sockets = Topology.get_node_sockets(node, socket_type=SocketType.STATS) if sockets: for socket in sockets.values(): with PapiExecutor(node) as papi_exec: - stats = papi_exec.add("vpp-stats", **args).\ + stats = papi_exec.add(u"vpp-stats", **args).\ get_stats(socket=socket)[0] - names = stats['/sys/node/names'] + names = stats[u"/sys/node/names"] if not names: return - runtime = [] - runtime_nz = [] + runtime = list() + runtime_nz = list() for name in names: - runtime.append({'name': name}) + runtime.append({u"name": name}) for idx, runtime_item in enumerate(runtime): calls_th = [] - for thread in stats['/sys/node/calls']: + for thread in stats[u"/sys/node/calls"]: calls_th.append(thread[idx]) - runtime_item["calls"] = calls_th + runtime_item[u"calls"] = calls_th vectors_th = [] - for thread in stats['/sys/node/vectors']: + for thread in stats[u"/sys/node/vectors"]: vectors_th.append(thread[idx]) - runtime_item["vectors"] = vectors_th + runtime_item[u"vectors"] = vectors_th suspends_th = [] - for thread in stats['/sys/node/suspends']: + for thread in stats[u"/sys/node/suspends"]: suspends_th.append(thread[idx]) - runtime_item["suspends"] = suspends_th + runtime_item[u"suspends"] = suspends_th clocks_th = [] - for thread in stats['/sys/node/clocks']: + for thread in stats[u"/sys/node/clocks"]: clocks_th.append(thread[idx]) - runtime_item["clocks"] = clocks_th + runtime_item[u"clocks"] = clocks_th if (sum(calls_th) or sum(vectors_th) or sum(suspends_th) or sum(clocks_th)): @@ -104,16 +104,14 @@ class VppCounters(object): if log_zeros: logger.info( - "stats runtime ({host} - {socket}):\n{runtime}". - format( - host=node['host'], runtime=pformat(runtime), - socket=socket)) + f"stats runtime ({node[u'host']} - {socket}):\n" + f"{pformat(runtime)}" + ) else: logger.info( - "stats runtime ({host} - {socket}):\n{runtime}". - format( - host=node['host'], runtime=pformat(runtime_nz), - socket=socket)) + f"stats runtime ({node[u'host']} - {socket}):\n" + f"{pformat(runtime_nz)}" + ) @staticmethod def vpp_show_runtime_counters_on_all_duts(nodes): @@ -123,7 +121,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.vpp_show_runtime(node) @staticmethod @@ -134,7 +132,8 @@ class VppCounters(object): :type node: dict """ PapiSocketExecutor.run_cli_cmd_on_all_sockets( - node, 'show hardware verbose') + node, u"show hardware verbose" + ) @staticmethod def vpp_show_memory(node): @@ -146,7 +145,8 @@ class VppCounters(object): :type node: dict """ PapiSocketExecutor.run_cli_cmd( - node, 'show memory verbose api-segment stats-segment main-heap') + node, u"show memory verbose api-segment stats-segment main-heap" + ) @staticmethod def vpp_clear_runtime(node): @@ -156,7 +156,8 @@ class VppCounters(object): :type node: dict """ PapiSocketExecutor.run_cli_cmd_on_all_sockets( - node, 'clear runtime', log=False) + node, u"clear runtime", log=False + ) @staticmethod def vpp_clear_runtime_counters_on_all_duts(nodes): @@ -166,7 +167,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.vpp_clear_runtime(node) @staticmethod @@ -179,7 +180,8 @@ class VppCounters(object): :rtype: dict """ PapiSocketExecutor.run_cli_cmd_on_all_sockets( - node, 'clear hardware', log=False) + node, u"clear hardware", log=False + ) @staticmethod def vpp_clear_hardware_counters_on_all_duts(nodes): @@ -189,7 +191,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.vpp_clear_hardware_counters(node) @staticmethod @@ -200,7 +202,8 @@ class VppCounters(object): :type node: dict """ PapiSocketExecutor.run_cli_cmd_on_all_sockets( - node, 'clear errors', log=False) + node, u"clear errors", log=False + ) @staticmethod def vpp_clear_error_counters_on_all_duts(nodes): @@ -210,7 +213,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.vpp_clear_errors_counters(node) @staticmethod @@ -233,7 +236,7 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.show_vpp_statistics(node) @staticmethod @@ -255,5 +258,5 @@ class VppCounters(object): :type nodes: dict """ for node in nodes.values(): - if node['type'] == NodeType.DUT: + if node[u"type"] == NodeType.DUT: VppCounters.clear_vpp_statistics(node) diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py index eb5f9bda68..7271fbcf37 100644 --- a/resources/libraries/python/autogen/Regenerator.py +++ b/resources/libraries/python/autogen/Regenerator.py @@ -13,44 +13,39 @@ """Module defining utilities for test directory regeneration.""" -from __future__ import print_function +import sys from glob import glob +from io import open from os import getcwd -import sys + from resources.libraries.python.Constants import Constants from resources.libraries.python.autogen.Testcase import Testcase PROTOCOL_TO_MIN_FRAME_SIZE = { - "ip4": 64, - "ip6": 78, - "ethip4vxlan": 114, # What is the real minimum for latency stream? - "dot1qip4vxlan": 118 + u"ip4": 64, + u"ip6": 78, + u"ethip4vxlan": 114, # What is the real minimum for latency stream? + u"dot1qip4vxlan": 118 } -MIN_FRAME_SIZE_VALUES = PROTOCOL_TO_MIN_FRAME_SIZE.values() - - -# Copied from https://stackoverflow.com/a/14981125 -def eprint(*args, **kwargs): - """Print to stderr.""" - print(*args, file=sys.stderr, **kwargs) +MIN_FRAME_SIZE_VALUES = list(PROTOCOL_TO_MIN_FRAME_SIZE.values()) def replace_defensively( whole, to_replace, replace_with, how_many, msg, in_filename): - """Replace substrings while checking the number of occurences. + """Replace substrings while checking the number of occurrences. Return edited copy of the text. Assuming "whole" is really a string, or something else with .replace not affecting it. :param whole: The text to perform replacements on. - :param to_replace: Substring occurences of which to replace. - :param replace_with: Substring to replace occurences with. - :param how_many: Number of occurences to expect. + :param to_replace: Substring occurrences of which to replace. + :param replace_with: Substring to replace occurrences with. + :param how_many: Number of occurrences to expect. :param msg: Error message to raise. - :param in_filename: File name in which the error occured. + :param in_filename: File name in which the error occurred. :type whole: str :type to_replace: str :type replace_with: str @@ -59,11 +54,11 @@ def replace_defensively( :type in_filename: str :return: The whole text after replacements are done. :rtype: str - :raise ValueError: If number of occurences does not match. + :raise ValueError: If number of occurrences does not match. """ found = whole.count(to_replace) if found != how_many: - raise ValueError(in_filename + ": " + msg) + raise ValueError(f"{in_filename}: {msg}") return whole.replace(to_replace, replace_with) @@ -73,18 +68,18 @@ def get_iface_and_suite_id(filename): Interface ID is the part of suite name which should be replaced for other NIC. Suite ID is the part os suite name - which si appended to testcase names. + which si appended to test case names. :param filename: Suite file. :type filename: str :returns: Interface ID, Suite ID. :rtype: (str, str) """ - dash_split = filename.split("-", 1) + dash_split = filename.split(u"-", 1) if len(dash_split[0]) <= 4: # It was something like "2n1l", we need one more split. - dash_split = dash_split[1].split("-", 1) - return dash_split[0], dash_split[1].split(".", 1)[0] + dash_split = dash_split[1].split(u"-", 1) + return dash_split[0], dash_split[1].split(u".", 1)[0] def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list): @@ -106,32 +101,32 @@ def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list): for num, kwargs in enumerate(tc_kwargs_list, start=1): # TODO: Is there a better way to disable some combinations? emit = True - if kwargs["frame_size"] == 9000: - if "vic1227" in iface: + if kwargs[u"frame_size"] == 9000: + if u"vic1227" in iface: # Not supported in HW. emit = False - if "vic1385" in iface: + if u"vic1385" in iface: # Not supported in HW. emit = False - if "ipsec" in suite_id: + if u"ipsec" in suite_id: # IPsec code does not support chained buffers. # Tracked by Jira ticket VPP-1207. emit = False - if "-16vm2t-" in suite_id or "-16dcr2t-" in suite_id: - if kwargs["phy_cores"] > 3: + if u"-16vm2t-" in suite_id or u"-16dcr2t-" in suite_id: + if kwargs[u"phy_cores"] > 3: # CSIT lab only has 28 (physical) core processors, # so these test would fail when attempting to assign cores. emit = False - if "-24vm1t-" in suite_id or "-24dcr1t-" in suite_id: - if kwargs["phy_cores"] > 3: + if u"-24vm1t-" in suite_id or u"-24dcr1t-" in suite_id: + if kwargs[u"phy_cores"] > 3: # CSIT lab only has 28 (physical) core processors, # so these test would fail when attempting to assign cores. emit = False - if "soak" in suite_id: + if u"soak" in suite_id: # Soak test take too long, do not risk other than tc01. - if kwargs["phy_cores"] != 1: + if kwargs[u"phy_cores"] != 1: emit = False - if kwargs["frame_size"] not in MIN_FRAME_SIZE_VALUES: + if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES: emit = False if emit: file_out.write(testcase.generate(num=num, **kwargs)) @@ -163,52 +158,61 @@ def write_default_files(in_filename, in_prolog, kwargs_list): """ for suite_type in Constants.PERF_TYPE_TO_KEYWORD: tmp_filename = replace_defensively( - in_filename, "ndrpdr", suite_type, 1, - "File name should contain suite type once.", in_filename) + in_filename, u"ndrpdr", suite_type, 1, + u"File name should contain suite type once.", in_filename + ) tmp_prolog = replace_defensively( - in_prolog, "ndrpdr".upper(), suite_type.upper(), 1, - "Suite type should appear once in uppercase (as tag).", - in_filename) + in_prolog, u"ndrpdr".upper(), suite_type.upper(), 1, + u"Suite type should appear once in uppercase (as tag).", + in_filename + ) tmp_prolog = replace_defensively( tmp_prolog, - "Find NDR and PDR intervals using optimized search", + u"Find NDR and PDR intervals using optimized search", Constants.PERF_TYPE_TO_KEYWORD[suite_type], 1, - "Main search keyword should appear once in suite.", - in_filename) + u"Main search keyword should appear once in suite.", + in_filename + ) tmp_prolog = replace_defensively( tmp_prolog, - Constants.PERF_TYPE_TO_SUITE_DOC_VER["ndrpdr"], + Constants.PERF_TYPE_TO_SUITE_DOC_VER[u"ndrpdr"], Constants.PERF_TYPE_TO_SUITE_DOC_VER[suite_type], - 1, "Exact suite type doc not found.", in_filename) + 1, u"Exact suite type doc not found.", in_filename + ) tmp_prolog = replace_defensively( tmp_prolog, - Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER["ndrpdr"], + Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[u"ndrpdr"], Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[suite_type], - 1, "Exact template type doc not found.", in_filename) + 1, u"Exact template type doc not found.", in_filename + ) _, suite_id = get_iface_and_suite_id(tmp_filename) testcase = Testcase.default(suite_id) for nic_name in Constants.NIC_NAME_TO_CODE: out_filename = replace_defensively( - tmp_filename, "10ge2p1x710", + tmp_filename, u"10ge2p1x710", Constants.NIC_NAME_TO_CODE[nic_name], 1, - "File name should contain NIC code once.", in_filename) + u"File name should contain NIC code once.", in_filename + ) out_prolog = replace_defensively( - tmp_prolog, "Intel-X710", nic_name, 2, - "NIC name should appear twice (tag and variable).", - in_filename) - if out_prolog.count("HW_") == 2: + tmp_prolog, u"Intel-X710", nic_name, 2, + u"NIC name should appear twice (tag and variable).", + in_filename + ) + if out_prolog.count(u"HW_") == 2: # TODO CSIT-1481: Crypto HW should be read # from topology file instead. if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW: out_prolog = replace_defensively( - out_prolog, "HW_DH895xcc", + out_prolog, u"HW_DH895xcc", Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1, - "HW crypto name should appear.", in_filename) + u"HW crypto name should appear.", in_filename + ) iface, suite_id = get_iface_and_suite_id(out_filename) - with open(out_filename, "w") as file_out: + with open(out_filename, "wt") as file_out: file_out.write(out_prolog) add_default_testcases( - testcase, iface, suite_id, file_out, kwargs_list) + testcase, iface, suite_id, file_out, kwargs_list + ) def write_reconf_files(in_filename, in_prolog, kwargs_list): @@ -229,26 +233,30 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list): testcase = Testcase.default(suite_id) for nic_name in Constants.NIC_NAME_TO_CODE: out_filename = replace_defensively( - in_filename, "10ge2p1x710", + in_filename, u"10ge2p1x710", Constants.NIC_NAME_TO_CODE[nic_name], 1, - "File name should contain NIC code once.", in_filename) + u"File name should contain NIC code once.", in_filename + ) out_prolog = replace_defensively( - in_prolog, "Intel-X710", nic_name, 2, - "NIC name should appear twice (tag and variable).", - in_filename) - if out_prolog.count("HW_") == 2: + in_prolog, u"Intel-X710", nic_name, 2, + u"NIC name should appear twice (tag and variable).", + in_filename + ) + if out_prolog.count(u"HW_") == 2: # TODO CSIT-1481: Crypto HW should be read # from topology file instead. - if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW.keys(): + if nic_name in list(Constants.NIC_NAME_TO_CRYPTO_HW.keys()): out_prolog = replace_defensively( - out_prolog, "HW_DH895xcc", + out_prolog, u"HW_DH895xcc", Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1, - "HW crypto name should appear.", in_filename) + u"HW crypto name should appear.", in_filename + ) iface, suite_id = get_iface_and_suite_id(out_filename) - with open(out_filename, "w") as file_out: + with open(out_filename, "wt") as file_out: file_out.write(out_prolog) add_default_testcases( - testcase, iface, suite_id, file_out, kwargs_list) + testcase, iface, suite_id, file_out, kwargs_list + ) def write_tcp_files(in_filename, in_prolog, kwargs_list): @@ -266,19 +274,21 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list): testcase = Testcase.tcp(suite_id) for nic_name in Constants.NIC_NAME_TO_CODE: out_filename = replace_defensively( - in_filename, "10ge2p1x710", + in_filename, u"10ge2p1x710", Constants.NIC_NAME_TO_CODE[nic_name], 1, - "File name should contain NIC code once.", in_filename) + u"File name should contain NIC code once.", in_filename + ) out_prolog = replace_defensively( - in_prolog, "Intel-X710", nic_name, 2, - "NIC name should appear twice (tag and variable).", - in_filename) - with open(out_filename, "w") as file_out: + in_prolog, u"Intel-X710", nic_name, 2, + u"NIC name should appear twice (tag and variable).", + in_filename + ) + with open(out_filename, "wt") as file_out: file_out.write(out_prolog) add_tcp_testcases(testcase, file_out, kwargs_list) -class Regenerator(object): +class Regenerator: """Class containing file generating methods.""" def __init__(self, quiet=True): @@ -289,15 +299,15 @@ class Regenerator(object): """ self.quiet = quiet - def regenerate_glob(self, pattern, protocol="ip4"): + def regenerate_glob(self, pattern, protocol=u"ip4"): """Regenerate files matching glob pattern based on arguments. In the current working directory, find all files matching the glob pattern. Use testcase template according to suffix - to regenerate test cases, autonumbering them, + to regenerate test cases, auto-numbering them, taking arguments from list. - Log-like prints are emited to sys.stderr. + Log-like prints are emitted to sys.stderr. :param pattern: Glob pattern to select files. Example: *-ndrpdr.robot :param protocol: String determining minimal frame size. Default: "ip4" @@ -306,45 +316,50 @@ class Regenerator(object): :raises RuntimeError: If invalid source suite is encountered. """ if not self.quiet: - eprint("Regenerator starts at {cwd}".format(cwd=getcwd())) + print(f"Regenerator starts at {getcwd()}", file=sys.stderr) min_frame_size = PROTOCOL_TO_MIN_FRAME_SIZE[protocol] default_kwargs_list = [ - {"frame_size": min_frame_size, "phy_cores": 1}, - {"frame_size": min_frame_size, "phy_cores": 2}, - {"frame_size": min_frame_size, "phy_cores": 4}, - {"frame_size": 1518, "phy_cores": 1}, - {"frame_size": 1518, "phy_cores": 2}, - {"frame_size": 1518, "phy_cores": 4}, - {"frame_size": 9000, "phy_cores": 1}, - {"frame_size": 9000, "phy_cores": 2}, - {"frame_size": 9000, "phy_cores": 4}, - {"frame_size": "IMIX_v4_1", "phy_cores": 1}, - {"frame_size": "IMIX_v4_1", "phy_cores": 2}, - {"frame_size": "IMIX_v4_1", "phy_cores": 4} + {u"frame_size": min_frame_size, u"phy_cores": 1}, + {u"frame_size": min_frame_size, u"phy_cores": 2}, + {u"frame_size": min_frame_size, u"phy_cores": 4}, + {u"frame_size": 1518, u"phy_cores": 1}, + {u"frame_size": 1518, u"phy_cores": 2}, + {u"frame_size": 1518, u"phy_cores": 4}, + {u"frame_size": 9000, u"phy_cores": 1}, + {u"frame_size": 9000, u"phy_cores": 2}, + {u"frame_size": 9000, u"phy_cores": 4}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 1}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 2}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 4} + ] + tcp_kwargs_list = [ + {u"phy_cores": i, u"frame_size": 0} for i in (1, 2, 4) ] - tcp_kwargs_list = [{"phy_cores": i, "frame_size": 0} for i in (1, 2, 4)] for in_filename in glob(pattern): if not self.quiet: - eprint("Regenerating in_filename:", in_filename) + print( + u"Regenerating in_filename:", in_filename, file=sys.stderr + ) iface, _ = get_iface_and_suite_id(in_filename) - if not iface.endswith("10ge2p1x710"): + if not iface.endswith(u"10ge2p1x710"): raise RuntimeError( - "Error in {fil}: non-primary NIC found.".format( - fil=in_filename)) - with open(in_filename, "r") as file_in: - in_prolog = "".join( - file_in.read().partition("*** Test Cases ***")[:-1]) - if in_filename.endswith("-ndrpdr.robot"): + f"Error in {in_filename}: non-primary NIC found." + ) + with open(in_filename, "rt") as file_in: + in_prolog = u"".join( + file_in.read().partition("*** Test Cases ***")[:-1] + ) + if in_filename.endswith(u"-ndrpdr.robot"): write_default_files(in_filename, in_prolog, default_kwargs_list) - elif in_filename.endswith("-reconf.robot"): + elif in_filename.endswith(u"-reconf.robot"): write_reconf_files(in_filename, in_prolog, default_kwargs_list) - elif in_filename[-10:] in ("-cps.robot", "-rps.robot"): + elif in_filename[-10:] in (u"-cps.robot", u"-rps.robot"): write_tcp_files(in_filename, in_prolog, tcp_kwargs_list) else: raise RuntimeError( - "Error in {fil}: non-primary suite type found.".format( - fil=in_filename)) + f"Error in {in_filename}: non-primary suite type found." + ) if not self.quiet: - eprint("Regenerator ends.") - eprint() # To make autogen check output more readable. + print(u"Regenerator ends.", file=sys.stderr) + print(file=sys.stderr) # To make autogen check output more readable. diff --git a/resources/libraries/python/autogen/Testcase.py b/resources/libraries/python/autogen/Testcase.py index 70c212211c..224295e1e2 100644 --- a/resources/libraries/python/autogen/Testcase.py +++ b/resources/libraries/python/autogen/Testcase.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,7 +16,7 @@ from string import Template -class Testcase(object): +class Testcase: """Class containing a template string and a substitution method.""" def __init__(self, template_string): @@ -52,22 +52,23 @@ class Testcase(object): try: fsize = int(frame_size) subst_dict = { - "frame_num": "${%d}" % fsize, - "frame_str": "%dB" % fsize + u"frame_num": f"${{{fsize:d}}}", + u"frame_str": f"{fsize:d}B" } except ValueError: # Assuming an IMIX string. subst_dict = { - "frame_num": str(frame_size), - "frame_str": "IMIX" + u"frame_num": str(frame_size), + u"frame_str": u"IMIX" } cores_str = str(phy_cores) cores_num = int(cores_str) subst_dict.update( { - "cores_num": "${%d}" % cores_num, - "cores_str": phy_cores, - "tc_num": "tc{num:02d}".format(num=num) - }) + u"cores_num": f"${{{cores_num:d}}}", + u"cores_str": phy_cores, + u"tc_num": f"tc{num:02d}" + } + ) return self.template.substitute(subst_dict) @classmethod @@ -82,10 +83,10 @@ class Testcase(object): :returns: Instance for generating testcase text of this type. :rtype: Testcase """ - template_string = r''' -| ${tc_num}-${frame_str}-${cores_str}c-''' + suite_id + r''' -| | [Tags] | ${frame_str} | ${cores_str}C -| | frame_size=${frame_num} | phy_cores=${cores_num} + template_string = f''' +| ${{tc_num}}-${{frame_str}}-${{cores_str}}c-{suite_id} +| | [Tags] | ${{frame_str}} | ${{cores_str}}C +| | frame_size=${{frame_num}} | phy_cores=${{cores_num}} ''' return cls(template_string) @@ -102,9 +103,9 @@ class Testcase(object): """ # TODO: Choose a better frame size identifier for streamed protocols # (TCP, QUIC, SCTP, ...) where DUT (not TG) decides frame size. - template_string = r''' -| ${tc_num}-IMIX-${cores_str}c-''' + suite_id + r''' -| | [Tags] | ${cores_str}C -| | phy_cores=${cores_num} + template_string = f''' +| ${{tc_num}}-IMIX-${{cores_str}}c-{suite_id} +| | [Tags] | ${{cores_str}}C +| | phy_cores=${{cores_num}} ''' return cls(template_string) diff --git a/resources/libraries/python/parsers/JsonParser.py b/resources/libraries/python/parsers/JsonParser.py index 2c8e62ffe3..c597d5a935 100644 --- a/resources/libraries/python/parsers/JsonParser.py +++ b/resources/libraries/python/parsers/JsonParser.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -14,10 +14,11 @@ """Used to parse JSON files or JSON data strings to dictionaries""" import json -from os import uname +from io import open -class JsonParser(object): + +class JsonParser: """Parses JSON data string or files containing JSON data strings""" def __init__(self): pass @@ -33,23 +34,6 @@ class JsonParser(object): :returns: JSON data parsed as python list. :rtype: list """ - if "4.2.0-42-generic" in uname(): - # TODO: remove ugly workaround - # On Ubuntu14.04 the VAT console returns "error:misc" even after - # some commands execute correctly. This causes problems - # with parsing JSON data. - known_errors = ["sw_interface_dump error: Misc", - "lisp_eid_table_dump error: Misc", - "show_lisp_status error: Misc", - "lisp_map_resolver_dump error: Misc", - "show_lisp_pitr error: Misc", - "snat_static_mapping_dump error: Misc", - ] - for item in known_errors: - if item in json_data: - json_data = json_data.replace(item, "") - print("Removing API error: *{0}* " - "from JSON output.".format(item)) parsed_data = json.loads(json_data) return parsed_data @@ -65,6 +49,6 @@ class JsonParser(object): :returns: JSON data parsed as python list. :rtype: list """ - input_data = open(json_file).read() + input_data = open(json_file, "rt").read() parsed_data = JsonParser.parse_data(input_data) return parsed_data diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py index 1908da4153..5359a6e5fb 100644 --- a/resources/libraries/python/ssh.py +++ b/resources/libraries/python/ssh.py @@ -15,7 +15,8 @@ import socket -import StringIO + +from io import StringIO from time import time, sleep from paramiko import RSAKey, SSHClient, AutoAddPolicy @@ -24,23 +25,23 @@ from robot.api import logger from scp import SCPClient, SCPException from resources.libraries.python.OptionString import OptionString -from resources.libraries.python.PythonThree import raise_from -__all__ = ["exec_cmd", "exec_cmd_no_error"] +__all__ = [ + u"exec_cmd", u"exec_cmd_no_error", u"SSH", u"SSHTimeout", u"scp_node" +] # TODO: load priv key class SSHTimeout(Exception): """This exception is raised when a timeout occurs.""" - pass -class SSH(object): +class SSH: """Contains methods for managing and using SSH connections.""" - __MAX_RECV_BUF = 10*1024*1024 - __existing_connections = {} + __MAX_RECV_BUF = 10 * 1024 * 1024 + __existing_connections = dict() def __init__(self): self._ssh = None @@ -55,8 +56,7 @@ class SSH(object): :returns: IP address and port for the specified node. :rtype: int """ - - return hash(frozenset([node['host'], node['port']])) + return hash(frozenset([node[u"host"], node[u"port"]])) def connect(self, node, attempts=5): """Connect to node prior to running exec_command or scp. @@ -74,43 +74,42 @@ class SSH(object): if node_hash in SSH.__existing_connections: self._ssh = SSH.__existing_connections[node_hash] if self._ssh.get_transport().is_active(): - logger.debug('Reusing SSH: {ssh}'.format(ssh=self._ssh)) + logger.debug(f"Reusing SSH: {self._ssh}") else: if attempts > 0: self._reconnect(attempts-1) else: - raise IOError('Cannot connect to {host}'. - format(host=node['host'])) + raise IOError(f"Cannot connect to {node['host']}") else: try: start = time() pkey = None - if 'priv_key' in node: - pkey = RSAKey.from_private_key( - StringIO.StringIO(node['priv_key'])) + if u"priv_key" in node: + pkey = RSAKey.from_private_key(StringIO(node[u"priv_key"])) self._ssh = SSHClient() self._ssh.set_missing_host_key_policy(AutoAddPolicy()) - self._ssh.connect(node['host'], username=node['username'], - password=node.get('password'), pkey=pkey, - port=node['port']) + self._ssh.connect( + node[u"host"], username=node[u"username"], + password=node.get(u"password"), pkey=pkey, + port=node[u"port"] + ) self._ssh.get_transport().set_keepalive(10) SSH.__existing_connections[node_hash] = self._ssh - logger.debug('New SSH to {peer} took {total} seconds: {ssh}'. - format( - peer=self._ssh.get_transport().getpeername(), - total=(time() - start), - ssh=self._ssh)) + logger.debug( + f"New SSH to {self._ssh.get_transport().getpeername()} " + f"took {time() - start} seconds: {self._ssh}" + ) except SSHException as exc: - raise_from(IOError('Cannot connect to {host}'.format( - host=node['host'])), exc) + raise IOError(f"Cannot connect to {node[u'host']}") from exc except NoValidConnectionsError as err: - raise_from(IOError( - 'Unable to connect to port {port} on {host}'.format( - port=node['port'], host=node['host'])), err) + raise IOError( + f"Unable to connect to port {node[u'port']} on " + f"{node[u'host']}" + ) from err def disconnect(self, node=None): """Close SSH connection to the node. @@ -124,8 +123,9 @@ class SSH(object): return node_hash = self._node_hash(node) if node_hash in SSH.__existing_connections: - logger.debug('Disconnecting peer: {host}, {port}'. - format(host=node['host'], port=node['port'])) + logger.debug( + f"Disconnecting peer: {node[u'host']}, {node[u'port']}" + ) ssh = SSH.__existing_connections.pop(node_hash) ssh.close() @@ -138,8 +138,9 @@ class SSH(object): node = self._node self.disconnect(node) self.connect(node, attempts) - logger.debug('Reconnecting peer done: {host}, {port}'. - format(host=node['host'], port=node['port'])) + logger.debug( + f"Reconnecting peer done: {node[u'host']}, {node[u'port']}" + ) def exec_command(self, cmd, timeout=10, log_stdout_err=True): """Execute SSH command on a new channel on the connected Node. @@ -160,8 +161,8 @@ class SSH(object): if isinstance(cmd, (list, tuple)): cmd = OptionString(cmd) cmd = str(cmd) - stdout = StringIO.StringIO() - stderr = StringIO.StringIO() + stdout = u"" + stderr = u"" try: chan = self._ssh.get_transport().open_session(timeout=5) peer = self._ssh.get_transport().getpeername() @@ -171,50 +172,58 @@ class SSH(object): peer = self._ssh.get_transport().getpeername() chan.settimeout(timeout) - logger.trace('exec_command on {peer} with timeout {timeout}: {cmd}' - .format(peer=peer, timeout=timeout, cmd=cmd)) + logger.trace(f"exec_command on {peer} with timeout {timeout}: {cmd}") start = time() chan.exec_command(cmd) while not chan.exit_status_ready() and timeout is not None: if chan.recv_ready(): - stdout.write(chan.recv(self.__MAX_RECV_BUF)) + s_out = chan.recv(self.__MAX_RECV_BUF) + stdout += s_out.decode(encoding=u'utf-8', errors=u'ignore') \ + if isinstance(s_out, bytes) else s_out if chan.recv_stderr_ready(): - stderr.write(chan.recv_stderr(self.__MAX_RECV_BUF)) + s_err = chan.recv_stderr(self.__MAX_RECV_BUF) + stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \ + if isinstance(s_err, bytes) else s_err if time() - start > timeout: raise SSHTimeout( - 'Timeout exception during execution of command: {cmd}\n' - 'Current contents of stdout buffer: {stdout}\n' - 'Current contents of stderr buffer: {stderr}\n' - .format(cmd=cmd, stdout=stdout.getvalue(), - stderr=stderr.getvalue()) + f"Timeout exception during execution of command: {cmd}\n" + f"Current contents of stdout buffer: " + f"{stdout}\n" + f"Current contents of stderr buffer: " + f"{stderr}\n" ) sleep(0.1) return_code = chan.recv_exit_status() while chan.recv_ready(): - stdout.write(chan.recv(self.__MAX_RECV_BUF)) + s_out = chan.recv(self.__MAX_RECV_BUF) + stdout += s_out.decode(encoding=u'utf-8', errors=u'ignore') \ + if isinstance(s_out, bytes) else s_out while chan.recv_stderr_ready(): - stderr.write(chan.recv_stderr(self.__MAX_RECV_BUF)) + s_err = chan.recv_stderr(self.__MAX_RECV_BUF) + stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \ + if isinstance(s_err, bytes) else s_err end = time() - logger.trace('exec_command on {peer} took {total} seconds'. - format(peer=peer, total=end-start)) + logger.trace(f"exec_command on {peer} took {end-start} seconds") - logger.trace('return RC {rc}'.format(rc=return_code)) + logger.trace(f"return RC {return_code}") if log_stdout_err or int(return_code): - logger.trace('return STDOUT {stdout}'. - format(stdout=stdout.getvalue())) - logger.trace('return STDERR {stderr}'. - format(stderr=stderr.getvalue())) - return return_code, stdout.getvalue(), stderr.getvalue() - - def exec_command_sudo(self, cmd, cmd_input=None, timeout=30, - log_stdout_err=True): + logger.trace( + f"return STDOUT {stdout}" + ) + logger.trace( + f"return STDERR {stderr}" + ) + return return_code, stdout, stderr + + def exec_command_sudo( + self, cmd, cmd_input=None, timeout=30, log_stdout_err=True): """Execute SSH command with sudo on a new channel on the connected Node. :param cmd: Command to be executed. @@ -234,21 +243,22 @@ class SSH(object): >>> ssh = SSH() >>> ssh.connect(node) >>> # Execute command without input (sudo -S cmd) - >>> ssh.exec_command_sudo("ifconfig eth0 down") - >>> # Execute command with input (sudo -S cmd <<< "input") - >>> ssh.exec_command_sudo("vpp_api_test", "dump_interface_table") + >>> ssh.exec_command_sudo(u"ifconfig eth0 down") + >>> # Execute command with input (sudo -S cmd <<< 'input') + >>> ssh.exec_command_sudo(u"vpp_api_test", u"dump_interface_table") """ if isinstance(cmd, (list, tuple)): cmd = OptionString(cmd) if cmd_input is None: - command = 'sudo -E -S {c}'.format(c=cmd) + command = f"sudo -E -S {cmd}" else: - command = 'sudo -E -S {c} <<< "{i}"'.format(c=cmd, i=cmd_input) - return self.exec_command(command, timeout, - log_stdout_err=log_stdout_err) + command = f"sudo -E -S {cmd} <<< \"{cmd_input}\"" + return self.exec_command( + command, timeout, log_stdout_err=log_stdout_err + ) - def exec_command_lxc(self, lxc_cmd, lxc_name, lxc_params='', sudo=True, - timeout=30): + def exec_command_lxc( + self, lxc_cmd, lxc_name, lxc_params=u"", sudo=True, timeout=30): """Execute command in LXC on a new SSH channel on the connected Node. :param lxc_cmd: Command to be executed. @@ -263,11 +273,11 @@ class SSH(object): :type timeout: int :returns: return_code, stdout, stderr """ - command = "lxc-attach {p} --name {n} -- /bin/sh -c '{c}'"\ - .format(p=lxc_params, n=lxc_name, c=lxc_cmd) + command = f"lxc-attach {lxc_params} --name {lxc_name} -- /bin/sh " \ + f"-c \"{lxc_cmd}\"" if sudo: - command = 'sudo -E -S {c}'.format(c=command) + command = f"sudo -E -S {command}" return self.exec_command(command, timeout) def interactive_terminal_open(self, time_out=45): @@ -289,18 +299,18 @@ class SSH(object): chan.settimeout(int(time_out)) chan.set_combine_stderr(True) - buf = '' - while not buf.endswith((":~# ", ":~$ ", "~]$ ", "~]# ")): + buf = u"" + while not buf.endswith((u":~# ", u":~$ ", u"~]$ ", u"~]# ")): try: chunk = chan.recv(self.__MAX_RECV_BUF) if not chunk: break buf += chunk if chan.exit_status_ready(): - logger.error('Channel exit status ready') + logger.error(u"Channel exit status ready") break except socket.timeout as exc: - raise_from(Exception('Socket timeout: {0}'.format(buf)), exc) + raise Exception(f"Socket timeout: {buf}") from exc return chan def interactive_terminal_exec_command(self, chan, cmd, prompt): @@ -321,8 +331,8 @@ class SSH(object): from other threads. You must not use this in a program that uses SIGALRM itself (this includes certain profilers) """ - chan.sendall('{c}\n'.format(c=cmd)) - buf = '' + chan.sendall(f"{cmd}\n") + buf = u"" while not buf.endswith(prompt): try: chunk = chan.recv(self.__MAX_RECV_BUF) @@ -330,15 +340,16 @@ class SSH(object): break buf += chunk if chan.exit_status_ready(): - logger.error('Channel exit status ready') + logger.error(u"Channel exit status ready") break except socket.timeout as exc: - raise_from(Exception( - 'Socket timeout during execution of command: ' - '{0}\nBuffer content:\n{1}'.format(cmd, buf)), exc) - tmp = buf.replace(cmd.replace('\n', ''), '') + raise Exception( + f"Socket timeout during execution of command: {cmd}\n" + f"Buffer content:\n{buf}" + ) from exc + tmp = buf.replace(cmd.replace(u"\n", u""), u"") for item in prompt: - tmp.replace(item, '') + tmp.replace(item, u"") return tmp @staticmethod @@ -349,7 +360,8 @@ class SSH(object): """ chan.close() - def scp(self, local_path, remote_path, get=False, timeout=30, + def scp( + self, local_path, remote_path, get=False, timeout=30, wildcard=False): """Copy files from local_path to remote_path or vice versa. @@ -369,19 +381,23 @@ class SSH(object): :type wildcard: bool """ if not get: - logger.trace('SCP {0} to {1}:{2}'.format( - local_path, self._ssh.get_transport().getpeername(), - remote_path)) + logger.trace( + f"SCP {local_path} to " + f"{self._ssh.get_transport().getpeername()}:{remote_path}" + ) else: - logger.trace('SCP {0}:{1} to {2}'.format( - self._ssh.get_transport().getpeername(), remote_path, - local_path)) + logger.trace( + f"SCP {self._ssh.get_transport().getpeername()}:{remote_path} " + f"to {local_path}" + ) # SCPCLient takes a paramiko transport as its only argument if not wildcard: scp = SCPClient(self._ssh.get_transport(), socket_timeout=timeout) else: - scp = SCPClient(self._ssh.get_transport(), sanitize=lambda x: x, - socket_timeout=timeout) + scp = SCPClient( + self._ssh.get_transport(), sanitize=lambda x: x, + socket_timeout=timeout + ) start = time() if not get: scp.put(local_path, remote_path) @@ -389,7 +405,7 @@ class SSH(object): scp.get(remote_path, local_path) scp.close() end = time() - logger.trace('SCP took {0} seconds'.format(end-start)) + logger.trace(f"SCP took {end-start} seconds") def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False): @@ -411,50 +427,27 @@ def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False): :rtype: tuple(int, str, str) """ if node is None: - raise TypeError('Node parameter is None') + raise TypeError(u"Node parameter is None") if cmd is None: - raise TypeError('Command parameter is None') + raise TypeError(u"Command parameter is None") if not cmd: - raise ValueError('Empty command parameter') + raise ValueError(u"Empty command parameter") ssh = SSH() - if node.get('host_port') is not None: - ssh_node = dict() - ssh_node['host'] = '127.0.0.1' - ssh_node['port'] = node['port'] - ssh_node['username'] = node['username'] - ssh_node['password'] = node['password'] - import pexpect - options = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' - tnl = '-L {port}:127.0.0.1:{port}'.format(port=node['port']) - ssh_cmd = 'ssh {tnl} {op} {user}@{host} -p {host_port}'.\ - format(tnl=tnl, op=options, user=node['host_username'], - host=node['host'], host_port=node['host_port']) - logger.trace('Initializing local port forwarding:\n{ssh_cmd}'. - format(ssh_cmd=ssh_cmd)) - child = pexpect.spawn(ssh_cmd) - child.expect('.* password: ') - logger.trace(child.after) - child.sendline(node['host_password']) - child.expect('Welcome .*') - logger.trace(child.after) - logger.trace('Local port forwarding finished.') - else: - ssh_node = node - try: - ssh.connect(ssh_node) + ssh.connect(node) except SSHException as err: - logger.error("Failed to connect to node" + repr(err)) + logger.error(f"Failed to connect to node {node[u'host']}\n{err!r}") return None, None, None try: if not sudo: - (ret_code, stdout, stderr) = ssh.exec_command(cmd, timeout=timeout) + ret_code, stdout, stderr = ssh.exec_command(cmd, timeout=timeout) else: - (ret_code, stdout, stderr) = ssh.exec_command_sudo( - cmd, timeout=timeout) + ret_code, stdout, stderr = ssh.exec_command_sudo( + cmd, timeout=timeout + ) except SSHException as err: logger.error(repr(err)) return None, None, None @@ -497,19 +490,16 @@ def exec_cmd_no_error( """ for _ in range(retries + 1): ret_code, stdout, stderr = exec_cmd( - node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect) + node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect + ) if ret_code == 0: break sleep(1) else: - msg = 'Command execution failed: "{cmd}"\nRC: {rc}\n{stderr}'.format( - cmd=cmd, rc=ret_code, stderr=stderr) + msg = f"Command execution failed: '{cmd}'\nRC: {ret_code}\n{stderr}" logger.info(msg) if message: - if include_reason: - msg = message + '\n' + msg - else: - msg = message + msg = f"{message}\n{msg}" if include_reason else message raise RuntimeError(msg) return stdout, stderr @@ -540,13 +530,11 @@ def scp_node( try: ssh.connect(node) except SSHException as exc: - raise_from(RuntimeError( - 'Failed to connect to {host}!'.format(host=node['host'])), exc) + raise RuntimeError(f"Failed to connect to {node[u'host']}!") from exc try: ssh.scp(local_path, remote_path, get, timeout) except SCPException as exc: - raise_from(RuntimeError( - 'SCP execution failed on {host}!'.format(host=node['host'])), exc) + raise RuntimeError(f"SCP execution failed on {node[u'host']}!") from exc finally: if disconnect: ssh.disconnect() diff --git a/resources/libraries/python/tcp.py b/resources/libraries/python/tcp.py index f3a24cbd06..62cff2f4ff 100644 --- a/resources/libraries/python/tcp.py +++ b/resources/libraries/python/tcp.py @@ -11,27 +11,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""TCP util library. -""" +"""TCP util library.""" -from resources.libraries.python.PapiExecutor import PapiSocketExecutor from resources.libraries.python.Constants import Constants +from resources.libraries.python.PapiExecutor import PapiSocketExecutor -class TCPUtils(object): - """Implementation of the TCP utilities. - """ - www_root_dir = '{rmt_fw_dir}/{wrk_www}'\ - .format(rmt_fw_dir=Constants.REMOTE_FW_DIR, - wrk_www=Constants.RESOURCES_TP_WRK_WWW) +class TCPUtils: + """Implementation of the TCP utilities.""" + + www_root_dir = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_TP_WRK_WWW}" def __init__(self): pass @classmethod - def start_vpp_http_server_params(cls, node, http_static_plugin, - prealloc_fifos, fifo_size, - private_segment_size): + def start_vpp_http_server_params( + cls, node, http_static_plugin, prealloc_fifos, fifo_size, + private_segment_size): """Start the test HTTP server internal application or the HTTP static server plugin internal applicatoin on the given node. @@ -73,16 +70,11 @@ class TCPUtils(object): :type fifo_size: str :type private_segment_size: str """ - if http_static_plugin: - cmd = 'http static server www-root {www_root} '\ - 'prealloc-fifos {prealloc_fifos} fifo-size {fifo_size}'\ - ' private-segment-size {pvt_seg_size}'\ - .format(www_root=cls.www_root_dir, - prealloc_fifos=prealloc_fifos, fifo_size=fifo_size, - pvt_seg_size=private_segment_size) - else: - cmd = 'test http server static prealloc-fifos {prealloc_fifos} '\ - 'fifo-size {fifo_size} private-segment-size {pvt_seg_size}'\ - .format(prealloc_fifos=prealloc_fifos, fifo_size=fifo_size, - pvt_seg_size=private_segment_size) + cmd = f"http static server www-root {cls.www_root_dir} " \ + f"prealloc-fifos {prealloc_fifos} fifo-size {fifo_size} " \ + f"private-segment-size {private_segment_size}" \ + if http_static_plugin \ + else f"test http server static prealloc-fifos {prealloc_fifos} " \ + f"fifo-size {fifo_size} private-segment-size {private_segment_size}" + PapiSocketExecutor.run_cli_cmd(node, cmd) diff --git a/resources/libraries/python/telemetry/SPAN.py b/resources/libraries/python/telemetry/SPAN.py deleted file mode 100644 index 268a268a0d..0000000000 --- a/resources/libraries/python/telemetry/SPAN.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""SPAN setup library""" - -from resources.libraries.python.topology import Topology -from resources.libraries.python.PapiExecutor import PapiSocketExecutor - - -class SPAN(object): - """Class contains methods for setting up SPAN mirroring on DUTs.""" - - def __init__(self): - """Initializer.""" - pass - - @staticmethod - def vpp_get_span_configuration(node, is_l2=False): - """Get full SPAN configuration from VPP node. - - Used by Honeycomb. - - :param node: DUT node. - :type node: dict - - :returns: Full SPAN configuration as list. One list entry for every - source/destination interface pair. - :rtype: list of dict - """ - cmd = "sw_interface_span_dump" - args = dict( - is_l2=1 if is_l2 else 0 - ) - with PapiSocketExecutor(node) as papi_exec: - details = papi_exec.add(cmd, **args).get_details() - - return details - - @staticmethod - def vpp_get_span_configuration_by_interface(node, dst_interface, - ret_format="sw_if_index"): - """Get a list of all interfaces currently being mirrored - to the specified interface. - - Used by Honeycomb. - - :param node: DUT node. - :param dst_interface: Name, sw_if_index or key of interface. - :param ret_format: Optional. Desired format of returned interfaces. - :type node: dict - :type dst_interface: str or int - :type ret_format: string - :returns: List of SPAN source interfaces for the provided destination - interface. - :rtype: list - """ - - data = SPAN.vpp_get_span_configuration(node) - - dst_int = Topology.convert_interface_reference( - node, dst_interface, "sw_if_index") - src_interfaces = [] - for item in data: - if item["sw_if_index_to"] == dst_int: - src_interfaces.append(item["sw_if_index_from"]) - - if ret_format != "sw_if_index": - src_interfaces = [ - Topology.convert_interface_reference( - node, interface, ret_format - ) for interface in src_interfaces] - - return src_interfaces diff --git a/resources/libraries/python/telemetry/__init__.py b/resources/libraries/python/telemetry/__init__.py deleted file mode 100644 index cc11de329f..0000000000 --- a/resources/libraries/python/telemetry/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2016 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -__init__ file for resources/libraries/python/telemetry -""" diff --git a/resources/libraries/python/topology.py b/resources/libraries/python/topology.py index 91578a5ccf..46a6628a0a 100644 --- a/resources/libraries/python/topology.py +++ b/resources/libraries/python/topology.py @@ -17,13 +17,14 @@ import re from collections import Counter -from yaml import load +from yaml import safe_load from robot.api import logger from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError -from robot.api.deco import keyword -__all__ = ["DICT__nodes", 'Topology', 'NodeType', 'SocketType'] +__all__ = [ + u"DICT__nodes", u"Topology", u"NodeType", u"SocketType", u"NodeSubTypeTG" +] def load_topo_from_yaml(): @@ -32,46 +33,48 @@ def load_topo_from_yaml(): :returns: Nodes from loaded topology. """ try: - topo_path = BuiltIn().get_variable_value("${TOPOLOGY_PATH}") + topo_path = BuiltIn().get_variable_value(u"${TOPOLOGY_PATH}") except RobotNotRunningError: return '' with open(topo_path) as work_file: - return load(work_file.read())['nodes'] + return safe_load(work_file.read())[u"nodes"] # pylint: disable=invalid-name -class NodeType(object): +class NodeType: """Defines node types used in topology dictionaries.""" # Device Under Test (this node has VPP running on it) - DUT = 'DUT' + DUT = u"DUT" # Traffic Generator (this node has traffic generator on it) - TG = 'TG' + TG = u"TG" # Virtual Machine (this node running on DUT node) - VM = 'VM' + VM = u"VM" -class NodeSubTypeTG(object): +class NodeSubTypeTG: """Defines node sub-type TG - traffic generator.""" # T-Rex traffic generator - TREX = 'TREX' + TREX = u"TREX" # Moongen - MOONGEN = 'MOONGEN' + MOONGEN = u"MOONGEN" # IxNetwork - IXNET = 'IXNET' + IXNET = u"IXNET" -class SocketType(object): + +class SocketType: """Defines socket types used in topology dictionaries.""" # VPP Socket PAPI - PAPI = 'PAPI' + PAPI = u"PAPI" # VPP PAPI Stats (legacy option until stats are migrated to Socket PAPI) - STATS = 'STATS' + STATS = u"STATS" + DICT__nodes = load_topo_from_yaml() -class Topology(object): +class Topology: """Topology data manipulation and extraction methods. Defines methods used for manipulation and extraction of data from @@ -103,10 +106,10 @@ class Topology(object): node[path[0]] = value return if path[0] not in node: - node[path[0]] = {} + node[path[0]] = dict() elif isinstance(node[path[0]], str): - node[path[0]] = {} if node[path[0]] == '' \ - else {node[path[0]]: ''} + node[path[0]] = dict() if node[path[0]] == u"" \ + else {node[path[0]]: u""} self.add_node_item(node[path[0]], value, path[1:]) @staticmethod @@ -123,9 +126,9 @@ class Topology(object): max_ports = 1000000 iface = None for i in range(1, max_ports): - if node['interfaces'].get(str(ptype) + str(i)) is None: + if node[u"interfaces"].get(str(ptype) + str(i)) is None: iface = str(ptype) + str(i) - node['interfaces'][iface] = dict() + node[u"interfaces"][iface] = dict() break return iface @@ -140,7 +143,7 @@ class Topology(object): :returns: Nothing """ try: - node['interfaces'].pop(iface_key) + node[u"interfaces"].pop(iface_key) except KeyError: pass @@ -154,9 +157,9 @@ class Topology(object): :type ptype: str :returns: Nothing """ - for if_key in list(node['interfaces']): + for if_key in list(node[u"interfaces"]): if if_key.startswith(str(ptype)): - node['interfaces'].pop(if_key) + node[u"interfaces"].pop(if_key) @staticmethod def remove_all_added_ports_on_all_duts_from_topology(nodes): @@ -166,12 +169,13 @@ class Topology(object): :type nodes: dict :returns: Nothing """ - port_types = ('subinterface', 'vlan_subif', 'memif', 'tap', 'vhost', - 'loopback', 'gre_tunnel', 'vxlan_tunnel', 'eth_bond', - 'eth_avf') + port_types = ( + u"subinterface", u"vlan_subif", u"memif", u"tap", u"vhost", + u"loopback", u"gre_tunnel", u"vxlan_tunnel", u"eth_bond", u"eth_avf" + ) for node_data in nodes.values(): - if node_data['type'] == NodeType.DUT: + if node_data[u"type"] == NodeType.DUT: for ptype in port_types: Topology.remove_all_ports(node_data, ptype) @@ -183,10 +187,10 @@ class Topology(object): :type node: dict :returns: Nothing """ - reg_ex = re.compile(r'port\d+_vif\d+') - for if_key in list(node['interfaces']): + reg_ex = re.compile(r"port\d+_vif\d+") + for if_key in list(node[u"interfaces"]): if re.match(reg_ex, if_key): - node['interfaces'].pop(if_key) + node[u"interfaces"].pop(if_key) @staticmethod def remove_all_added_vif_ports_on_all_duts_from_topology(nodes): @@ -198,7 +202,7 @@ class Topology(object): :returns: Nothing """ for node_data in nodes.values(): - if node_data['type'] == NodeType.DUT: + if node_data[u"type"] == NodeType.DUT: Topology.remove_all_vif_ports(node_data) @staticmethod @@ -212,7 +216,7 @@ class Topology(object): :type iface_key: str :type sw_if_index: int """ - node['interfaces'][iface_key]['vpp_sw_index'] = int(sw_if_index) + node[u"interfaces"][iface_key][u"vpp_sw_index"] = int(sw_if_index) @staticmethod def update_interface_name(node, iface_key, name): @@ -225,7 +229,7 @@ class Topology(object): :type iface_key: str :type name: str """ - node['interfaces'][iface_key]['name'] = str(name) + node[u"interfaces"][iface_key][u"name"] = str(name) @staticmethod def update_interface_mac_address(node, iface_key, mac_address): @@ -238,7 +242,7 @@ class Topology(object): :type iface_key: str :type mac_address: str """ - node['interfaces'][iface_key]['mac_address'] = str(mac_address) + node[u"interfaces"][iface_key][u"mac_address"] = str(mac_address) @staticmethod def update_interface_pci_address(node, iface_key, pci_address): @@ -251,7 +255,7 @@ class Topology(object): :type iface_key: str :type pci_address: str """ - node['interfaces'][iface_key]['pci_address'] = str(pci_address) + node[u"interfaces"][iface_key][u"pci_address"] = str(pci_address) @staticmethod def update_interface_vlan(node, iface_key, vlan): @@ -264,7 +268,7 @@ class Topology(object): :type iface_key: str :type vlan: str """ - node['interfaces'][iface_key]['vlan'] = int(vlan) + node[u"interfaces"][iface_key][u"vlan"] = int(vlan) @staticmethod def update_interface_vhost_socket(node, iface_key, vhost_socket): @@ -277,7 +281,7 @@ class Topology(object): :type iface_key: str :type vhost_socket: str """ - node['interfaces'][iface_key]['vhost_socket'] = str(vhost_socket) + node[u"interfaces"][iface_key][u"vhost_socket"] = str(vhost_socket) @staticmethod def update_interface_memif_socket(node, iface_key, memif_socket): @@ -290,7 +294,7 @@ class Topology(object): :type iface_key: str :type memif_socket: str """ - node['interfaces'][iface_key]['memif_socket'] = str(memif_socket) + node[u"interfaces"][iface_key][u"memif_socket"] = str(memif_socket) @staticmethod def update_interface_memif_id(node, iface_key, memif_id): @@ -303,7 +307,7 @@ class Topology(object): :type iface_key: str :type memif_id: str """ - node['interfaces'][iface_key]['memif_id'] = str(memif_id) + node[u"interfaces"][iface_key][u"memif_id"] = str(memif_id) @staticmethod def update_interface_memif_role(node, iface_key, memif_role): @@ -316,7 +320,7 @@ class Topology(object): :type iface_key: str :type memif_role: str """ - node['interfaces'][iface_key]['memif_role'] = str(memif_role) + node[u"interfaces"][iface_key][u"memif_role"] = str(memif_role) @staticmethod def update_interface_tap_dev_name(node, iface_key, dev_name): @@ -330,7 +334,7 @@ class Topology(object): :type dev_name: str :returns: Nothing """ - node['interfaces'][iface_key]['dev_name'] = str(dev_name) + node[u"interfaces"][iface_key][u"dev_name"] = str(dev_name) @staticmethod def get_node_by_hostname(nodes, hostname): @@ -343,7 +347,7 @@ class Topology(object): :returns: Node dictionary or None if not found. """ for node in nodes.values(): - if node['host'] == hostname: + if node[u"host"] == hostname: return node return None @@ -357,11 +361,11 @@ class Topology(object): :returns: Links in the topology. :rtype: list """ - links = [] + links = list() for node in nodes.values(): - for interface in node['interfaces'].values(): - link = interface.get('link') + for interface in node[u"interfaces"].values(): + link = interface.get(u"link") if link is not None: if link not in links: links.append(link) @@ -382,9 +386,9 @@ class Topology(object): :returns: Interface key from topology file :rtype: string """ - interfaces = node['interfaces'] + interfaces = node[u"interfaces"] retval = None - for if_key, if_val in interfaces.iteritems(): + for if_key, if_val in interfaces.items(): k_val = if_val.get(key) if k_val is not None: if k_val == value: @@ -406,7 +410,7 @@ class Topology(object): :returns: Interface key. :rtype: str """ - return Topology._get_interface_by_key_value(node, "name", iface_name) + return Topology._get_interface_by_key_value(node, u"name", iface_name) @staticmethod def get_interface_by_link_name(node, link_name): @@ -422,7 +426,7 @@ class Topology(object): :returns: Interface key of the interface connected to the given link. :rtype: str """ - return Topology._get_interface_by_key_value(node, "link", link_name) + return Topology._get_interface_by_key_value(node, u"link", link_name) def get_interfaces_by_link_names(self, node, link_names): """Return dictionary of dictionaries {"interfaceN", interface name}. @@ -439,14 +443,12 @@ class Topology(object): links. :rtype: dict """ - retval = {} - interface_key_tpl = "interface{}" + retval = dict() interface_number = 1 for link_name in link_names: interface = self.get_interface_by_link_name(node, link_name) - interface_name = self.get_interface_name(node, interface) - interface_key = interface_key_tpl.format(str(interface_number)) - retval[interface_key] = interface_name + retval[f"interface{str(interface_number)}"] = \ + self.get_interface_name(node, interface) interface_number += 1 return retval @@ -465,8 +467,9 @@ class Topology(object): :returns: Interface name of the interface connected to the given link. :rtype: str """ - return Topology._get_interface_by_key_value(node, "vpp_sw_index", - sw_if_index) + return Topology._get_interface_by_key_value( + node, u"vpp_sw_index", sw_if_index + ) @staticmethod def get_interface_sw_index(node, iface_key): @@ -480,8 +483,8 @@ class Topology(object): :rtype: int or None """ try: - if isinstance(iface_key, basestring): - return node['interfaces'][iface_key].get('vpp_sw_index') + if isinstance(iface_key, str): + return node[u"interfaces"][iface_key].get(u"vpp_sw_index") # TODO: use only iface_key, do not use integer return int(iface_key) except (KeyError, ValueError): @@ -499,10 +502,10 @@ class Topology(object): :raises TypeError: If provided interface name is not a string. """ try: - if not isinstance(iface_name, basestring): - raise TypeError("Interface name must be a string.") + if not isinstance(iface_name, str): + raise TypeError(u"Interface name must be a string.") iface_key = Topology.get_interface_by_name(node, iface_name) - return node['interfaces'][iface_key].get('vpp_sw_index') + return node[u"interfaces"][iface_key].get(u"vpp_sw_index") except (KeyError, ValueError): return None @@ -519,7 +522,7 @@ class Topology(object): :rtype: int """ try: - return node['interfaces'][iface_key].get('mtu') + return node[u"interfaces"][iface_key].get(u"mtu") except KeyError: return None @@ -536,7 +539,7 @@ class Topology(object): :rtype: str """ try: - return node['interfaces'][iface_key].get('name') + return node[u"interfaces"][iface_key].get(u"name") except KeyError: return None @@ -562,21 +565,25 @@ class Topology(object): if isinstance(interface, int): key = Topology.get_interface_by_sw_index(node, interface) if key is None: - raise RuntimeError("Interface with sw_if_index={0} does not " - "exist in topology.".format(interface)) + raise RuntimeError( + f"Interface with sw_if_index={interface} does not exist " + f"in topology." + ) elif interface in Topology.get_node_interfaces(node): key = interface - elif interface in Topology.get_links({"dut": node}): + elif interface in Topology.get_links({u"dut": node}): key = Topology.get_interface_by_link_name(node, interface) - elif isinstance(interface, basestring): + elif isinstance(interface, str): key = Topology.get_interface_by_name(node, interface) if key is None: - raise RuntimeError("Interface with key, name or link name " - "\"{0}\" does not exist in topology." - .format(interface)) + raise RuntimeError( + f"Interface with key, name or link name \"{interface}\" " + f"does not exist in topology." + ) else: - raise TypeError("Type of interface argument must be integer" - " or string.") + raise TypeError( + u"Type of interface argument must be integer or string." + ) return key @staticmethod @@ -602,17 +609,18 @@ class Topology(object): key = Topology.convert_interface_reference_to_key(node, interface) conversions = { - "key": lambda x, y: y, - "name": Topology.get_interface_name, - "sw_if_index": Topology.get_interface_sw_index + u"key": lambda x, y: y, + u"name": Topology.get_interface_name, + u"sw_if_index": Topology.get_interface_sw_index } try: return conversions[wanted_format](node, key) except KeyError: - raise ValueError("Unrecognized return value wanted: {0}." - "Valid options are key, name, sw_if_index" - .format(wanted_format)) + raise ValueError( + f"Unrecognized return value wanted: {wanted_format}." + f"Valid options are key, name, sw_if_index" + ) @staticmethod def get_interface_numa_node(node, iface_key): @@ -628,7 +636,7 @@ class Topology(object): :rtype: int """ try: - return node['interfaces'][iface_key].get('numa_node') + return node[u"interfaces"][iface_key].get(u"numa_node") except KeyError: return None @@ -653,7 +661,7 @@ class Topology(object): numa_list = [] for if_key in iface_keys: try: - numa_list.append(node['interfaces'][if_key].get('numa_node')) + numa_list.append(node[u"interfaces"][if_key].get(u"numa_node")) except KeyError: pass @@ -676,7 +684,7 @@ class Topology(object): :returns: Return MAC or None if not found. """ try: - return node['interfaces'][iface_key].get('mac_address') + return node[u"interfaces"][iface_key].get(u"mac_address") except KeyError: return None @@ -691,7 +699,7 @@ class Topology(object): :returns: Return IP4 or None if not found. """ try: - return node['interfaces'][iface_key].get('ip4_address', None) + return node[u"interfaces"][iface_key].get(u"ip4_address") except KeyError: return None @@ -712,11 +720,11 @@ class Topology(object): """ link_name = None # get link name where the interface belongs to - for if_key, if_val in node['interfaces'].iteritems(): - if if_key == 'mgmt': + for if_key, if_val in node[u"interfaces"].items(): + if if_key == u"mgmt": continue if if_key == iface_key: - link_name = if_val['link'] + link_name = if_val[u"link"] break if link_name is None: @@ -725,13 +733,13 @@ class Topology(object): # find link for node_data in nodes_info.values(): # skip self - if node_data['host'] == node['host']: + if node_data[u"host"] == node[u"host"]: continue for if_key, if_val \ - in node_data['interfaces'].iteritems(): - if 'link' not in if_val: + in node_data[u"interfaces"].items(): + if u"link" not in if_val: continue - if if_val['link'] == link_name: + if if_val[u"link"] == link_name: return node_data, if_key return None @@ -746,7 +754,7 @@ class Topology(object): :returns: Return PCI address or None if not found. """ try: - return node['interfaces'][iface_key].get('pci_address') + return node[u"interfaces"][iface_key].get(u"pci_address") except KeyError: return None @@ -761,7 +769,7 @@ class Topology(object): :returns: Return interface driver or None if not found. """ try: - return node['interfaces'][iface_key].get('driver') + return node[u"interfaces"][iface_key].get(u"driver") except KeyError: return None @@ -776,7 +784,7 @@ class Topology(object): :returns: Return interface vlan or None if not found. """ try: - return node['interfaces'][iface_key].get('vlan') + return node[u"interfaces"][iface_key].get(u"vlan") except KeyError: return None @@ -789,7 +797,7 @@ class Topology(object): :returns: Return list of keys of all interfaces. :rtype: list """ - return node['interfaces'].keys() + return node[u"interfaces"].keys() @staticmethod def get_node_link_mac(node, link_name): @@ -802,9 +810,9 @@ class Topology(object): :returns: MAC address string. :rtype: str """ - for port in node['interfaces'].values(): - if port.get('link') == link_name: - return port.get('mac_address') + for port in node[u"interfaces"].values(): + if port.get(u"link") == link_name: + return port.get(u"mac_address") return None @staticmethod @@ -818,27 +826,26 @@ class Topology(object): :returns: List of link names occupied by the node. :rtype: None or list of string """ - interfaces = node['interfaces'] + interfaces = node[u"interfaces"] link_names = [] for interface in interfaces.values(): - if 'link' in interface: - if (filter_list is not None) and ('model' in interface): + if u"link" in interface: + if (filter_list is not None) and (u"model" in interface): for filt in filter_list: - if filt == interface['model']: - link_names.append(interface['link']) - elif (filter_list is not None) and ('model' not in interface): - logger.trace('Cannot apply filter on interface: {}' - .format(str(interface))) + if filt == interface[u"model"]: + link_names.append(interface[u"link"]) + elif (filter_list is not None) and (u"model" not in interface): + logger.trace( + f"Cannot apply filter on interface: {str(interface)}" + ) else: - link_names.append(interface['link']) + link_names.append(interface[u"link"]) if not link_names: link_names = None return link_names - @keyword('Get active links connecting "${node1}" and "${node2}"') - def get_active_connecting_links(self, node1, node2, - filter_list_node1=None, - filter_list_node2=None): + def get_active_connecting_links( + self, node1, node2, filter_list_node1=None, filter_list_node2=None): """Return list of link names that connect together node1 and node2. :param node1: Node topology dictionary. @@ -853,29 +860,27 @@ class Topology(object): :rtype: list """ - logger.trace("node1: {}".format(str(node1))) - logger.trace("node2: {}".format(str(node2))) + logger.trace(f"node1: {str(node1)}") + logger.trace(f"node2: {str(node2)}") node1_links = self._get_node_active_link_names( - node1, - filter_list=filter_list_node1) + node1, filter_list=filter_list_node1 + ) node2_links = self._get_node_active_link_names( - node2, - filter_list=filter_list_node2) + node2, filter_list=filter_list_node2 + ) connecting_links = None if node1_links is None: - logger.error("Unable to find active links for node1") + logger.error(u"Unable to find active links for node1") elif node2_links is None: - logger.error("Unable to find active links for node2") + logger.error(u"Unable to find active links for node2") else: connecting_links = list(set(node1_links).intersection(node2_links)) return connecting_links - @keyword('Get first active connecting link between node "${node1}" and ' - '"${node2}"') def get_first_active_connecting_link(self, node1, node2): - """ + """Get first link connecting the two nodes together. :param node1: Connected node. :param node2: Connected node. @@ -887,11 +892,9 @@ class Topology(object): """ connecting_links = self.get_active_connecting_links(node1, node2) if not connecting_links: - raise RuntimeError("No links connecting the nodes were found") + raise RuntimeError(u"No links connecting the nodes were found") return connecting_links[0] - @keyword('Get egress interfaces name on "${node1}" for link with ' - '"${node2}"') def get_egress_interfaces_name_for_nodes(self, node1, node2): """Get egress interfaces on node1 for link with node2. @@ -902,24 +905,22 @@ class Topology(object): :returns: Egress interfaces. :rtype: list """ - interfaces = [] + interfaces = list() links = self.get_active_connecting_links(node1, node2) if not links: - raise RuntimeError('No link between nodes') - for interface in node1['interfaces'].values(): - link = interface.get('link') + raise RuntimeError(u"No link between nodes") + for interface in node1[u"interfaces"].values(): + link = interface.get(u"link") if link is None: continue if link in links: continue - name = interface.get('name') + name = interface.get(u"name") if name is None: continue interfaces.append(name) return interfaces - @keyword('Get first egress interface name on "${node1}" for link with ' - '"${node2}"') def get_first_egress_interface_for_nodes(self, node1, node2): """Get first egress interface on node1 for link with node2. @@ -932,11 +933,9 @@ class Topology(object): """ interfaces = self.get_egress_interfaces_name_for_nodes(node1, node2) if not interfaces: - raise RuntimeError('No egress interface for nodes') + raise RuntimeError(u"No egress interface for nodes") return interfaces[0] - @keyword('Get link data useful in circular topology test from tg "${tgen}"' - ' dut1 "${dut1}" dut2 "${dut2}"') def get_links_dict_from_nodes(self, tgen, dut1, dut2): """Return link combinations used in tests in circular topology. @@ -970,12 +969,14 @@ class Topology(object): tg_traffic_links = [dut1_tg_link, dut2_tg_link] dut1_bd_links = [dut1_dut2_link, dut1_tg_link] dut2_bd_links = [dut1_dut2_link, dut2_tg_link] - topology_links = {'DUT1_DUT2_LINK': dut1_dut2_link, - 'DUT1_TG_LINK': dut1_tg_link, - 'DUT2_TG_LINK': dut2_tg_link, - 'TG_TRAFFIC_LINKS': tg_traffic_links, - 'DUT1_BD_LINKS': dut1_bd_links, - 'DUT2_BD_LINKS': dut2_bd_links} + topology_links = { + u"DUT1_DUT2_LINK": dut1_dut2_link, + u"DUT1_TG_LINK": dut1_tg_link, + u"DUT2_TG_LINK": dut2_tg_link, + u"TG_TRAFFIC_LINKS": tg_traffic_links, + u"DUT1_BD_LINKS": dut1_bd_links, + u"DUT2_BD_LINKS": dut2_bd_links + } return topology_links @staticmethod @@ -987,7 +988,7 @@ class Topology(object): :returns: True if node is type of TG, otherwise False. :rtype: bool """ - return node['type'] == NodeType.TG + return node[u"type"] == NodeType.TG @staticmethod def get_node_hostname(node): @@ -998,7 +999,7 @@ class Topology(object): :returns: Hostname or IP address. :rtype: str """ - return node['host'] + return node[u"host"] @staticmethod def get_node_arch(node): @@ -1011,10 +1012,10 @@ class Topology(object): :rtype: str """ try: - return node['arch'] + return node[u"arch"] except KeyError: - node['arch'] = 'x86_64' - return 'x86_64' + node[u"arch"] = u"x86_64" + return u"x86_64" @staticmethod def get_cryptodev(node): @@ -1026,7 +1027,7 @@ class Topology(object): :rtype: str """ try: - return node['cryptodev'] + return node[u"cryptodev"] except KeyError: return None @@ -1040,7 +1041,7 @@ class Topology(object): :rtype: str """ try: - return node['uio_driver'] + return node[u"uio_driver"] except KeyError: return None @@ -1055,7 +1056,7 @@ class Topology(object): :returns: Return iface_key or None if not found. """ try: - node['interfaces'][iface_key]['numa_node'] = numa_node_id + node[u"interfaces"][iface_key][u"numa_node"] = numa_node_id return iface_key except KeyError: return None @@ -1070,9 +1071,9 @@ class Topology(object): :type node: dict :type socket_type: SocketType :type socket_id: str - :type socket path: str + :type socket_path: str """ - path = ['sockets', socket_type, socket_id] + path = [u"sockets", socket_type, socket_id] self.add_node_item(node, socket_path, path) @staticmethod @@ -1084,12 +1085,12 @@ class Topology(object): :type node: dict :type socket_type: SocketType :returns: Node sockets or None if not found. - :rtype: list + :rtype: dict """ try: if socket_type: - return node['sockets'][socket_type] - return node['sockets'] + return node[u"sockets"][socket_type] + return node[u"sockets"] except KeyError: return None @@ -1101,5 +1102,5 @@ class Topology(object): :type node: dict """ for node in nodes.values(): - if 'sockets' in node.keys(): - node.pop('sockets') + if u"sockets" in list(node.keys()): + node.pop(u"sockets") |