diff options
author | John DeNisco <jdenisco@cisco.com> | 2017-09-27 16:35:23 -0400 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2017-10-11 18:45:41 +0000 |
commit | 68b0ee3a38e3a86f0389d8cc695915df190c3dfb (patch) | |
tree | 2264f2c552e119cd4bea30ddd8c80600c0c4ae66 /extras/vpp_config/vpplib | |
parent | 35830af800aefdcc6a3767bc101b4c300a74651b (diff) |
Redhat and small system support
Initial 17.10 commit
Final 17.07 cleanup, 17.10 next
Added CentOS grub support, this should complete the CentOS support
Added Centos install/unistall
Added TCP parameters.
Change-Id: I064e3a4118969ac36e62924a6a3f8a98f132ba60
Signed-off-by: John DeNisco <jdenisco@cisco.com>
Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'extras/vpp_config/vpplib')
-rw-r--r-- | extras/vpp_config/vpplib/AutoConfig.py | 1427 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/CpuUtils.py | 287 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/QemuUtils.py | 680 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/VPPUtil.py | 662 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/VppGrubUtil.py | 236 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/VppHugePageUtil.py | 122 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/VppPCIUtil.py | 330 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/__init__.py | 16 | ||||
-rw-r--r-- | extras/vpp_config/vpplib/constants.py | 48 |
9 files changed, 3808 insertions, 0 deletions
diff --git a/extras/vpp_config/vpplib/AutoConfig.py b/extras/vpp_config/vpplib/AutoConfig.py new file mode 100644 index 00000000000..49c7d54257c --- /dev/null +++ b/extras/vpp_config/vpplib/AutoConfig.py @@ -0,0 +1,1427 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Library that supports Auto Configuration.""" + +import logging +import os +import re +import yaml + +from vpplib.VPPUtil import VPPUtil +from vpplib.VppPCIUtil import VppPCIUtil +from vpplib.VppHugePageUtil import VppHugePageUtil +from vpplib.CpuUtils import CpuUtils +from vpplib.VppGrubUtil import VppGrubUtil +from vpplib.QemuUtils import QemuUtils + +__all__ = ["AutoConfig"] + +# Constants +MIN_SYSTEM_CPUS = 2 +MIN_TOTAL_HUGE_PAGES = 1024 +MAX_PERCENT_FOR_HUGE_PAGES = 70 + + +class AutoConfig(object): + """Auto Configuration Tools""" + + def __init__(self, rootdir, filename): + """ + The Auto Configure class. + + :param rootdir: The root directory for all the auto configuration files + :param filename: The autoconfiguration file + :type rootdir: str + :type filename: str + """ + self._autoconfig_filename = rootdir + filename + self._rootdir = rootdir + self._metadata = {} + self._nodes = {} + self._vpp_devices_node = {} + self._hugepage_config = "" + self._loadconfig() + + def get_nodes(self): + """ + Returns the nodes dictionary. + + :returns: The nodes + :rtype: dictionary + """ + + return self._nodes + + @staticmethod + def _autoconfig_backup_file(filename): + """ + Create a backup file. + + :param filename: The file to backup + :type filename: str + """ + + # Does a copy of the file exist, if not create one + ofile = filename + '.orig' + (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile)) + if ret != 0: + logging.debug(stderr) + if stdout.strip('\n') != ofile: + cmd = 'sudo cp {} {}'.format(filename, ofile) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + logging.debug(stderr) + + @staticmethod + def _ask_user_range(question, first, last, default): + """ + Asks the user for a number within a range. + default is returned if return is entered. + + :param question: Text of a question. + :param first: First number in the range + :param last: Last number in the range + :param default: The value returned when return is entered + :type question: string + :type first: int + :type last: int + :type default: int + :returns: The answer to the question + :rtype: int + """ + + while True: + answer = raw_input(question) + if answer == '': + answer = default + break + if re.findall(r'[0-9+]', answer): + if int(answer) in range(first, last + 1): + break + else: + print "Please a value between {} and {} or Return.". \ + format(first, last) + else: + print "Please a number between {} and {} or Return.". \ + format(first, last) + + return int(answer) + + @staticmethod + def _ask_user_yn(question, default): + """ + Asks the user for a yes or no question. + + :param question: Text of a question. + :param default: The value returned when return is entered + :type question: string + :type default: string + :returns: The answer to the question + :rtype: string + """ + + input_valid = False + default = default.lower() + answer = '' + while not input_valid: + answer = raw_input(question) + if answer == '': + answer = default + if re.findall(r'[YyNn]', answer): + input_valid = True + answer = answer[0].lower() + else: + print "Please answer Y, N or Return." + + return answer + + def _loadconfig(self): + """ + Load the testbed configuration, given the auto configuration file. + + """ + + # Get the Topology, from the topology layout file + topo = {} + with open(self._autoconfig_filename, 'r') as stream: + try: + topo = yaml.load(stream) + if 'metadata' in topo: + self._metadata = topo['metadata'] + except yaml.YAMLError as exc: + raise RuntimeError("Couldn't read the Auto config file {}.".format(self._autoconfig_filename, exc)) + + systemfile = self._rootdir + self._metadata['system_config_file'] + if os.path.isfile(systemfile): + with open(systemfile, 'r') as sysstream: + try: + systopo = yaml.load(sysstream) + if 'nodes' in systopo: + self._nodes = systopo['nodes'] + except yaml.YAMLError as sysexc: + raise RuntimeError("Couldn't read the System config file {}.".format(systemfile, sysexc)) + else: + # Get the nodes from Auto Config + if 'nodes' in topo: + self._nodes = topo['nodes'] + + # Set the root directory in all the nodes + for i in self._nodes.items(): + node = i[1] + node['rootdir'] = self._rootdir + + def updateconfig(self): + """ + Update the testbed configuration, given the auto configuration file. + We will write the system configuration file with the current node + information + + """ + + # Initialize the yaml data + ydata = {'metadata': self._metadata, 'nodes': self._nodes} + + # Write the system config file + filename = self._rootdir + self._metadata['system_config_file'] + with open(filename, 'w') as yamlfile: + yaml.dump(ydata, yamlfile, default_flow_style=False) + + def _update_auto_config(self): + """ + Write the auto configuration file with the new configuration data, + input from the user. + + """ + + # Initialize the yaml data + nodes = {} + with open(self._autoconfig_filename, 'r') as stream: + try: + ydata = yaml.load(stream) + if 'nodes' in ydata: + nodes = ydata['nodes'] + except yaml.YAMLError as exc: + print exc + return + + for i in nodes.items(): + key = i[0] + node = i[1] + + # Interfaces + node['interfaces'] = {} + for item in self._nodes[key]['interfaces'].items(): + port = item[0] + interface = item[1] + + node['interfaces'][port] = {} + node['interfaces'][port]['pci_address'] = \ + interface['pci_address'] + if 'mac_address' in interface: + node['interfaces'][port]['mac_address'] = \ + interface['mac_address'] + + if 'total_other_cpus' in self._nodes[key]['cpu']: + node['cpu']['total_other_cpus'] = \ + self._nodes[key]['cpu']['total_other_cpus'] + if 'total_vpp_cpus' in self._nodes[key]['cpu']: + node['cpu']['total_vpp_cpus'] = \ + self._nodes[key]['cpu']['total_vpp_cpus'] + if 'reserve_vpp_main_core' in self._nodes[key]['cpu']: + node['cpu']['reserve_vpp_main_core'] = \ + self._nodes[key]['cpu']['reserve_vpp_main_core'] + + # TCP + if 'active_open_sessions' in self._nodes[key]['tcp']: + node['tcp']['active_open_sessions'] = \ + self._nodes[key]['tcp']['active_open_sessions'] + if 'passive_open_sessions' in self._nodes[key]['tcp']: + node['tcp']['passive_open_sessions'] = \ + self._nodes[key]['tcp']['passive_open_sessions'] + + # Huge pages + node['hugepages']['total'] = self._nodes[key]['hugepages']['total'] + + # Write the auto config config file + with open(self._autoconfig_filename, 'w') as yamlfile: + yaml.dump(ydata, yamlfile, default_flow_style=False) + + def apply_huge_pages(self): + """ + Apply the huge page config + + """ + + for i in self._nodes.items(): + node = i[1] + + hpg = VppHugePageUtil(node) + hpg.hugepages_dryrun_apply() + + @staticmethod + def _apply_vpp_unix(node): + """ + Apply the VPP Unix config + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + + unix = ' nodaemon\n' + if 'unix' not in node['vpp']: + return '' + + unixv = node['vpp']['unix'] + if 'interactive' in unixv: + interactive = unixv['interactive'] + if interactive is True: + unix = ' interactive\n' + + return unix.rstrip('\n') + + @staticmethod + def _apply_vpp_cpu(node): + """ + Apply the VPP cpu config + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + + # Get main core + cpu = '\n' + vpp_main_core = node['cpu']['vpp_main_core'] + if vpp_main_core is not 0: + cpu += ' main-core {}\n'.format(vpp_main_core) + + # Get workers + vpp_workers = node['cpu']['vpp_workers'] + vpp_worker_len = len(vpp_workers) + if vpp_worker_len > 0: + vpp_worker_str = '' + for i, worker in enumerate(vpp_workers): + if i > 0: + vpp_worker_str += ',' + if worker[0] == worker[1]: + vpp_worker_str += "{}".format(worker[0]) + else: + vpp_worker_str += "{}-{}".format(worker[0], worker[1]) + + cpu += ' corelist-workers {}\n'.format(vpp_worker_str) + + return cpu + + @staticmethod + def _apply_vpp_devices(node): + """ + Apply VPP PCI Device configuration to vpp startup. + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + + devices = '' + ports_per_numa = node['cpu']['ports_per_numa'] + total_mbufs = node['cpu']['total_mbufs'] + + for item in ports_per_numa.items(): + value = item[1] + interfaces = value['interfaces'] + + # if 0 was specified for the number of vpp workers, use 1 queue + num_rx_queues = None + num_tx_queues = None + if 'rx_queues' in value: + num_rx_queues = value['rx_queues'] + if 'tx_queues' in value: + num_tx_queues = value['tx_queues'] + + num_rx_desc = None + num_tx_desc = None + + # Create the devices string + for interface in interfaces: + pci_address = interface['pci_address'] + pci_address = pci_address.lstrip("'").rstrip("'") + devices += '\n' + devices += ' dev {} {{ \n'.format(pci_address) + if num_rx_queues: + devices += ' num-rx-queues {}\n'.format(num_rx_queues) + else: + devices += ' num-rx-queues {}\n'.format(1) + if num_tx_queues: + devices += ' num-tx-queues {}\n'.format(num_tx_queues) + if num_rx_desc: + devices += ' num-rx-desc {}\n'.format(num_rx_desc) + if num_tx_desc: + devices += ' num-tx-desc {}\n'.format(num_tx_desc) + devices += ' }' + + if total_mbufs is not 0: + devices += '\n num-mbufs {}'.format(total_mbufs) + + return devices + + @staticmethod + def _calc_vpp_workers(node, vpp_workers, numa_node, + other_cpus_end, total_vpp_workers, + reserve_vpp_main_core): + """ + Calculate the VPP worker information + + :param node: Node dictionary + :param vpp_workers: List of VPP workers + :param numa_node: Numa node + :param other_cpus_end: The end of the cpus allocated for cores + other than vpp + :param total_vpp_workers: The number of vpp workers needed + :param reserve_vpp_main_core: Is there a core needed for + the vpp main core + :type node: dict + :type numa_node: int + :type other_cpus_end: int + :type total_vpp_workers: int + :type reserve_vpp_main_core: bool + :returns: Is a core still needed for the vpp main core + :rtype: bool + """ + + # Can we fit the workers in one of these slices + cpus = node['cpu']['cpus_per_node'][numa_node] + for cpu in cpus: + start = cpu[0] + end = cpu[1] + if start <= other_cpus_end: + start = other_cpus_end + 1 + + if reserve_vpp_main_core: + start += 1 + + workers_end = start + total_vpp_workers - 1 + if workers_end <= end: + if reserve_vpp_main_core: + node['cpu']['vpp_main_core'] = start - 1 + reserve_vpp_main_core = False + if total_vpp_workers: + vpp_workers.append((start, workers_end)) + break + + # We still need to reserve the main core + if reserve_vpp_main_core: + node['cpu']['vpp_main_core'] = other_cpus_end + 1 + + return reserve_vpp_main_core + + @staticmethod + def _calc_desc_and_queues(total_numa_nodes, + total_ports_per_numa, + total_vpp_cpus, + ports_per_numa_value): + """ + Calculate the number of descriptors and queues + + :param total_numa_nodes: The total number of numa nodes + :param total_ports_per_numa: The total number of ports for this + numa node + :param total_vpp_cpus: The total number of cpus to allocate for vpp + :param ports_per_numa_value: The value from the ports_per_numa + dictionary + :type total_numa_nodes: int + :type total_ports_per_numa: int + :type total_vpp_cpus: int + :type ports_per_numa_value: dict + :returns The total number of message buffers + :returns: The total number of vpp workers + :rtype: int + :rtype: int + """ + + # Get the total vpp workers + total_vpp_workers = total_vpp_cpus + ports_per_numa_value['total_vpp_workers'] = total_vpp_workers + + # Get the number of rx queues + rx_queues = max(1, total_vpp_workers) + tx_queues = total_vpp_workers * total_numa_nodes + 1 + + # Get the descriptor entries + desc_entries = 1024 + ports_per_numa_value['rx_queues'] = rx_queues + total_mbufs = (((rx_queues * desc_entries) + + (tx_queues * desc_entries)) * + total_ports_per_numa) + total_mbufs = total_mbufs + + return total_mbufs, total_vpp_workers + + @staticmethod + def _create_ports_per_numa(node, interfaces): + """ + Create a dictionary or ports per numa node + :param node: Node dictionary + :param interfaces: All the interfaces to be used by vpp + :type node: dict + :type interfaces: dict + :returns: The ports per numa dictionary + :rtype: dict + """ + + # Make a list of ports by numa node + ports_per_numa = {} + for item in interfaces.items(): + i = item[1] + if i['numa_node'] not in ports_per_numa: + ports_per_numa[i['numa_node']] = {'interfaces': []} + ports_per_numa[i['numa_node']]['interfaces'].append(i) + else: + ports_per_numa[i['numa_node']]['interfaces'].append(i) + node['cpu']['ports_per_numa'] = ports_per_numa + + return ports_per_numa + + def calculate_cpu_parameters(self): + """ + Calculate the cpu configuration. + + """ + + # Calculate the cpu parameters, needed for the + # vpp_startup and grub configuration + for i in self._nodes.items(): + node = i[1] + + # get total number of nic ports + interfaces = node['interfaces'] + + # Make a list of ports by numa node + ports_per_numa = self._create_ports_per_numa(node, interfaces) + + # Get the number of cpus to skip, we never use the first cpu + other_cpus_start = 1 + other_cpus_end = other_cpus_start + \ + node['cpu']['total_other_cpus'] - 1 + other_workers = None + if other_cpus_end is not 0: + other_workers = (other_cpus_start, other_cpus_end) + node['cpu']['other_workers'] = other_workers + + # Allocate the VPP main core and workers + vpp_workers = [] + reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core'] + total_vpp_cpus = node['cpu']['total_vpp_cpus'] + + # If total_vpp_cpus is 0 or is less than the numa nodes with ports + # then we shouldn't get workers + total_with_main = total_vpp_cpus + if reserve_vpp_main_core: + total_with_main += 1 + total_mbufs = 0 + if total_with_main is not 0: + for item in ports_per_numa.items(): + numa_node = item[0] + value = item[1] + + # Get the number of descriptors and queues + mbufs, total_vpp_workers = self._calc_desc_and_queues( + len(ports_per_numa), + len(value['interfaces']), total_vpp_cpus, value) + total_mbufs += mbufs + + # Get the VPP workers + reserve_vpp_main_core = self._calc_vpp_workers( + node, vpp_workers, numa_node, other_cpus_end, + total_vpp_workers, reserve_vpp_main_core) + + total_mbufs *= 2.5 + total_mbufs = int(total_mbufs) + else: + total_mbufs = 0 + + # Save the info + node['cpu']['vpp_workers'] = vpp_workers + node['cpu']['total_mbufs'] = total_mbufs + + # Write the config + self.updateconfig() + + @staticmethod + def _apply_vpp_tcp(node): + """ + Apply the VPP Unix config + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + + active_open_sessions = node['tcp']['active_open_sessions'] + aos = int(active_open_sessions) + + passive_open_sessions = node['tcp']['passive_open_sessions'] + pos = int(passive_open_sessions) + + # Generate the api-segment gid vpp sheit in any case + if (aos + pos) == 0: + tcp = "api-segment {\n" + tcp = tcp + " gid vpp\n" + tcp = tcp + "}\n" + return tcp.rstrip('\n') + + tcp = "# TCP stack-related configuration parameters\n" + tcp = tcp + "# expecting {:d} client sessions, {:d} server sessions\n\n".format(aos, pos) + tcp = tcp + "heapsize 4g\n\n" + tcp = tcp + "api-segment {\n" + tcp = tcp + " global-size 2000M\n" + tcp = tcp + " api-size 1G\n" + tcp = tcp + "}\n\n" + + tcp = tcp + "session {\n" + tcp = tcp + " event-queue-length " + "{:d}".format(aos + pos) + "\n" + tcp = tcp + " preallocated-sessions " + "{:d}".format(aos + pos) + "\n" + tcp = tcp + " v4-session-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n" + tcp = tcp + " v4-session-table-memory 3g\n" + if aos > 0: + tcp = tcp + " v4-halfopen-table-buckets " + \ + "{:d}".format((aos + pos) / 4) + "\n" + tcp = tcp + " v4-halfopen-table-memory 3g\n" + tcp = tcp + "}\n\n" + + tcp = tcp + "tcp {\n" + tcp = tcp + " preallocated-connections " + "{:d}".format(aos + pos) + "\n" + if aos > 0: + tcp = tcp + " preallocated-half-open-connections " + "{:d}".format(aos) + "\n" + tcp = tcp + " local-endpoints-table-buckets " + "{:d}".format((aos + pos) / 4) + "\n" + tcp = tcp + " local-endpoints-table-memory 3g\n" + tcp = tcp + "}\n\n" + + return tcp.rstrip('\n') + + def apply_vpp_startup(self): + """ + Apply the vpp startup configration + + """ + + # Apply the VPP startup configruation + for i in self._nodes.items(): + node = i[1] + + # Get the startup file + rootdir = node['rootdir'] + sfile = rootdir + node['vpp']['startup_config_file'] + + # Get the devices + devices = self._apply_vpp_devices(node) + + # Get the CPU config + cpu = self._apply_vpp_cpu(node) + + # Get the unix config + unix = self._apply_vpp_unix(node) + + # Get the TCP configuration, if any + tcp = self._apply_vpp_tcp(node) + + # Make a backup if needed + self._autoconfig_backup_file(sfile) + + # Get the template + tfile = sfile + '.template' + (ret, stdout, stderr) = \ + VPPUtil.exec_command('cat {}'.format(tfile)) + if ret != 0: + raise RuntimeError('Executing cat command failed to node {}'. + format(node['host'])) + startup = stdout.format(unix=unix, + cpu=cpu, + devices=devices, + tcp=tcp) + + (ret, stdout, stderr) = \ + VPPUtil.exec_command('rm {}'.format(sfile)) + if ret != 0: + logging.debug(stderr) + + cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('Writing config failed node {}'. + format(node['host'])) + + def apply_grub_cmdline(self): + """ + Apply the grub cmdline + + """ + + for i in self._nodes.items(): + node = i[1] + + # Get the isolated CPUs + other_workers = node['cpu']['other_workers'] + vpp_workers = node['cpu']['vpp_workers'] + vpp_main_core = node['cpu']['vpp_main_core'] + all_workers = [] + if other_workers is not None: + all_workers = [other_workers] + if vpp_main_core is not 0: + all_workers += [(vpp_main_core, vpp_main_core)] + all_workers += vpp_workers + isolated_cpus = '' + for idx, worker in enumerate(all_workers): + if worker is None: + continue + if idx > 0: + isolated_cpus += ',' + if worker[0] == worker[1]: + isolated_cpus += "{}".format(worker[0]) + else: + isolated_cpus += "{}-{}".format(worker[0], worker[1]) + + vppgrb = VppGrubUtil(node) + current_cmdline = vppgrb.get_current_cmdline() + if 'grub' not in node: + node['grub'] = {} + node['grub']['current_cmdline'] = current_cmdline + node['grub']['default_cmdline'] = \ + vppgrb.apply_cmdline(node, isolated_cpus) + + self.updateconfig() + + def get_hugepages(self): + """ + Get the hugepage configuration + + """ + + for i in self._nodes.items(): + node = i[1] + + hpg = VppHugePageUtil(node) + max_map_count, shmmax = hpg.get_huge_page_config() + node['hugepages']['max_map_count'] = max_map_count + node['hugepages']['shmax'] = shmmax + total, free, size, memtotal, memfree = hpg.get_actual_huge_pages() + node['hugepages']['actual_total'] = total + node['hugepages']['free'] = free + node['hugepages']['size'] = size + node['hugepages']['memtotal'] = memtotal + node['hugepages']['memfree'] = memfree + + self.updateconfig() + + def get_grub(self): + """ + Get the grub configuration + + """ + + for i in self._nodes.items(): + node = i[1] + + vppgrb = VppGrubUtil(node) + current_cmdline = vppgrb.get_current_cmdline() + default_cmdline = vppgrb.get_default_cmdline() + + # Get the total number of isolated CPUs + current_iso_cpus = 0 + iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline) + iso_cpurl = len(iso_cpur) + if iso_cpurl > 0: + iso_cpu_str = iso_cpur[0] + iso_cpu_str = iso_cpu_str.split('=')[1] + iso_cpul = iso_cpu_str.split(',') + for iso_cpu in iso_cpul: + isocpuspl = iso_cpu.split('-') + if len(isocpuspl) is 1: + current_iso_cpus += 1 + else: + first = int(isocpuspl[0]) + second = int(isocpuspl[1]) + if first == second: + current_iso_cpus += 1 + else: + current_iso_cpus += second - first + + if 'grub' not in node: + node['grub'] = {} + node['grub']['current_cmdline'] = current_cmdline + node['grub']['default_cmdline'] = default_cmdline + node['grub']['current_iso_cpus'] = current_iso_cpus + + self.updateconfig() + + @staticmethod + def _get_device(node): + """ + Get the device configuration for a single node + + :param node: Node dictionary with cpuinfo. + :type node: dict + + """ + + vpp = VppPCIUtil(node) + vpp.get_all_devices() + + # Save the device information + node['devices'] = {} + node['devices']['dpdk_devices'] = vpp.get_dpdk_devices() + node['devices']['kernel_devices'] = vpp.get_kernel_devices() + node['devices']['other_devices'] = vpp.get_other_devices() + node['devices']['linkup_devices'] = vpp.get_link_up_devices() + + def get_devices_per_node(self): + """ + Get the device configuration for all the nodes + + """ + + for i in self._nodes.items(): + node = i[1] + # Update the interface data + + self._get_device(node) + + self.updateconfig() + + @staticmethod + def get_cpu_layout(node): + """ + Get the cpu layout + + using lscpu -p get the cpu layout. + Returns a list with each item representing a single cpu. + + :param node: Node dictionary. + :type node: dict + :returns: The cpu layout + :rtype: list + """ + + cmd = 'lscpu -p' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'. + format(cmd, node['host'], stderr)) + + pcpus = [] + lines = stdout.split('\n') + for line in lines: + if line == '' or line[0] == '#': + continue + linesplit = line.split(',') + layout = {'cpu': linesplit[0], 'core': linesplit[1], + 'socket': linesplit[2], 'node': linesplit[3]} + + # cpu, core, socket, node + pcpus.append(layout) + + return pcpus + + def get_cpu(self): + """ + Get the cpu configuration + + """ + + # Get the CPU layout + CpuUtils.get_cpu_layout_from_all_nodes(self._nodes) + + for i in self._nodes.items(): + node = i[1] + + # Get the cpu layout + layout = self.get_cpu_layout(node) + node['cpu']['layout'] = layout + + cpuinfo = node['cpuinfo'] + smt_enabled = CpuUtils.is_smt_enabled(cpuinfo) + node['cpu']['smt_enabled'] = smt_enabled + + # We don't want to write the cpuinfo + node['cpuinfo'] = "" + + # Write the config + self.updateconfig() + + def discover(self): + """ + Get the current system configuration. + + """ + + # Get the Huge Page configuration + self.get_hugepages() + + # Get the device configuration + self.get_devices_per_node() + + # Get the CPU configuration + self.get_cpu() + + # Get the current grub cmdline + self.get_grub() + + def _modify_cpu_questions(self, node, total_cpus, numa_nodes): + """ + Ask the user questions related to the cpu configuration. + + :param node: Node dictionary + :param total_cpus: The total number of cpus in the system + :param numa_nodes: The list of numa nodes in the system + :type node: dict + :type total_cpus: int + :type numa_nodes: list + """ + + print "\nYour system has {} core(s) and {} Numa Nodes.". \ + format(total_cpus, len(numa_nodes)) + print "To begin, we suggest not reserving any cores for VPP", + print "or other processes." + print "Then to improve performance try reserving cores as needed. " + + max_other_cores = total_cpus / 2 + question = '\nHow many core(s) do you want to reserve for processes \ +other than VPP? [0-{}][0]? '.format(str(max_other_cores)) + total_other_cpus = self._ask_user_range(question, 0, max_other_cores, + 0) + node['cpu']['total_other_cpus'] = total_other_cpus + + max_vpp_cpus = 4 + total_vpp_cpus = 0 + if max_vpp_cpus > 0: + question = "How many core(s) shall we reserve for VPP workers[0-{}][0]? ". \ + format(max_vpp_cpus) + total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0) + node['cpu']['total_vpp_cpus'] = total_vpp_cpus + + max_main_cpus = max_vpp_cpus - total_vpp_cpus + reserve_vpp_main_core = False + if max_main_cpus > 0: + question = "Should we reserve 1 core for the VPP Main thread? " + question += "[y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + reserve_vpp_main_core = True + node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core + node['cpu']['vpp_main_core'] = 0 + + def modify_cpu(self): + """ + Modify the cpu configuration, asking for the user for the values. + + """ + + # Get the CPU layout + CpuUtils.get_cpu_layout_from_all_nodes(self._nodes) + + for i in self._nodes.items(): + node = i[1] + total_cpus = 0 + total_cpus_per_slice = 0 + cpus_per_node = {} + numa_nodes = [] + cores = [] + cpu_layout = self.get_cpu_layout(node) + + # Assume the number of cpus per slice is always the same as the + # first slice + first_node = '0' + for cpu in cpu_layout: + if cpu['node'] != first_node: + break + total_cpus_per_slice += 1 + + # Get the total number of cpus, cores, and numa nodes from the + # cpu layout + for cpul in cpu_layout: + numa_node = cpul['node'] + core = cpul['core'] + cpu = cpul['cpu'] + total_cpus += 1 + + if numa_node not in cpus_per_node: + cpus_per_node[numa_node] = [] + cpuperslice = int(cpu) % total_cpus_per_slice + if cpuperslice == 0: + cpus_per_node[numa_node].append((int(cpu), int(cpu) + + total_cpus_per_slice - 1)) + if numa_node not in numa_nodes: + numa_nodes.append(numa_node) + if core not in cores: + cores.append(core) + node['cpu']['cpus_per_node'] = cpus_per_node + + # Ask the user some questions + self._modify_cpu_questions(node, total_cpus, numa_nodes) + + # Populate the interfaces with the numa node + ikeys = node['interfaces'].keys() + VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys)) + + # We don't want to write the cpuinfo + node['cpuinfo'] = "" + + # Write the configs + self._update_auto_config() + self.updateconfig() + + def _modify_other_devices(self, node, + other_devices, kernel_devices, dpdk_devices): + """ + Modify the devices configuration, asking for the user for the values. + + """ + + odevices_len = len(other_devices) + if odevices_len > 0: + print "\nThese device(s) are currently NOT being used", + print "by VPP or the OS.\n" + VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False) + question = "\nWould you like to give any of these devices" + question += " back to the OS [Y/n]? " + answer = self._ask_user_yn(question, 'Y') + if answer == 'y': + vppd = {} + for dit in other_devices.items(): + dvid = dit[0] + device = dit[1] + question = "Would you like to use device {} for". \ + format(dvid) + question += " the OS [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + driver = device['unused'][0] + VppPCIUtil.bind_vpp_device(node, driver, dvid) + vppd[dvid] = device + for dit in vppd.items(): + dvid = dit[0] + device = dit[1] + kernel_devices[dvid] = device + del other_devices[dvid] + + odevices_len = len(other_devices) + if odevices_len > 0: + print "\nThese device(s) are still NOT being used ", + print "by VPP or the OS.\n" + VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False) + question = "\nWould you like use any of these for VPP [y/N]? " + answer = self._ask_user_yn(question, 'N') + if answer == 'y': + vppd = {} + for dit in other_devices.items(): + dvid = dit[0] + device = dit[1] + question = "Would you like to use device {} ".format(dvid) + question += "for VPP [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + vppd[dvid] = device + for dit in vppd.items(): + dvid = dit[0] + device = dit[1] + dpdk_devices[dvid] = device + del other_devices[dvid] + + def modify_devices(self): + """ + Modify the devices configuration, asking for the user for the values. + + """ + + for i in self._nodes.items(): + node = i[1] + devices = node['devices'] + other_devices = devices['other_devices'] + kernel_devices = devices['kernel_devices'] + dpdk_devices = devices['dpdk_devices'] + + if other_devices: + self._modify_other_devices(node, other_devices, + kernel_devices, dpdk_devices) + + # Get the devices again for this node + self._get_device(node) + devices = node['devices'] + kernel_devices = devices['kernel_devices'] + dpdk_devices = devices['dpdk_devices'] + + klen = len(kernel_devices) + if klen > 0: + print "\nThese devices have kernel interfaces, but", + print "appear to be safe to use with VPP.\n" + VppPCIUtil.show_vpp_devices(kernel_devices) + question = "\nWould you like to use any of these " + question += "device(s) for VPP [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + vppd = {} + for dit in kernel_devices.items(): + dvid = dit[0] + device = dit[1] + question = "Would you like to use device {} ". \ + format(dvid) + question += "for VPP [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + vppd[dvid] = device + for dit in vppd.items(): + dvid = dit[0] + device = dit[1] + dpdk_devices[dvid] = device + del kernel_devices[dvid] + + dlen = len(dpdk_devices) + if dlen > 0: + print "\nThese device(s) will be used by VPP.\n" + VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False) + question = "\nWould you like to remove any of " + question += "these device(s) [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + vppd = {} + for dit in dpdk_devices.items(): + dvid = dit[0] + device = dit[1] + question = "Would you like to remove {} [y/N]? ". \ + format(dvid) + answer = self._ask_user_yn(question, 'n') + if answer == 'y': + vppd[dvid] = device + for dit in vppd.items(): + dvid = dit[0] + device = dit[1] + driver = device['unused'][0] + VppPCIUtil.bind_vpp_device(node, driver, dvid) + kernel_devices[dvid] = device + del dpdk_devices[dvid] + + interfaces = {} + for dit in dpdk_devices.items(): + dvid = dit[0] + device = dit[1] + VppPCIUtil.vpp_create_interface(interfaces, dvid, device) + node['interfaces'] = interfaces + + print "\nThese device(s) will be used by VPP, please", + print "rerun this option if this is incorrect.\n" + VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False) + + self._update_auto_config() + self.updateconfig() + + def modify_huge_pages(self): + """ + Modify the huge page configuration, asking for the user for the values. + + """ + + for i in self._nodes.items(): + node = i[1] + + total = node['hugepages']['actual_total'] + free = node['hugepages']['free'] + size = node['hugepages']['size'] + memfree = node['hugepages']['memfree'].split(' ')[0] + hugesize = int(size.split(' ')[0]) + # The max number of huge pages should be no more than + # 70% of total free memory + maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES / 100) / hugesize + print "\nThere currently {} {} huge pages free.". \ + format(free, size) + question = "Do you want to reconfigure the number of " + question += "huge pages [y/N]? " + answer = self._ask_user_yn(question, 'n') + if answer == 'n': + node['hugepages']['total'] = total + continue + + print "\nThere currently a total of {} huge pages.". \ + format(total) + question = \ + "How many huge pages do you want [{} - {}][{}]? ".\ + format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES) + answer = self._ask_user_range(question, 1024, maxpages, 1024) + node['hugepages']['total'] = str(answer) + + # Update auto-config.yaml + self._update_auto_config() + + # Rediscover just the hugepages + self.get_hugepages() + + def get_tcp_params(self): + """ + Get the tcp configuration + + """ + # maybe nothing to do here? + self.updateconfig() + + def acquire_tcp_params(self): + """ + Ask the user for TCP stack configuration parameters + + """ + + for i in self._nodes.items(): + node = i[1] + + question = "\nHow many active-open / tcp client sessions are expected " + question = question + "[0-10000000][0]? " + answer = self._ask_user_range(question, 0, 10000000, 0) + # Less than 10K is equivalent to 0 + if int(answer) < 10000: + answer = 0 + node['tcp']['active_open_sessions'] = answer + + question = "How many passive-open / tcp server sessions are expected " + question = question + "[0-10000000][0]? " + answer = self._ask_user_range(question, 0, 10000000, 0) + # Less than 10K is equivalent to 0 + if int(answer) < 10000: + answer = 0 + node['tcp']['passive_open_sessions'] = answer + + # Update auto-config.yaml + self._update_auto_config() + + # Rediscover tcp parameters + self.get_tcp_params() + + @staticmethod + def patch_qemu(node): + """ + Patch qemu with the correct patches. + + :param node: Node dictionary + :type node: dict + """ + + print '\nWe are patching the node "{}":\n'.format(node['host']) + QemuUtils.build_qemu(node, force_install=True, apply_patch=True) + + @staticmethod + def cpu_info(node): + """ + print the CPU information + + """ + + cpu = CpuUtils.get_cpu_info_per_node(node) + + item = 'Model name' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'CPU(s)' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'Thread(s) per core' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'Core(s) per socket' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'Socket(s)' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'NUMA node(s)' + numa_nodes = 0 + if item in cpu: + numa_nodes = int(cpu[item]) + for i in xrange(0, numa_nodes): + item = "NUMA node{} CPU(s)".format(i) + print "{:>20}: {}".format(item, cpu[item]) + item = 'CPU max MHz' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + item = 'CPU min MHz' + if item in cpu: + print "{:>20}: {}".format(item, cpu[item]) + + if node['cpu']['smt_enabled']: + smt = 'Enabled' + else: + smt = 'Disabled' + print "{:>20}: {}".format('SMT', smt) + + # VPP Threads + print "\nVPP Threads: (Name: Cpu Number)" + vpp_processes = cpu['vpp_processes'] + for i in vpp_processes.items(): + print " {:10}: {:4}".format(i[0], i[1]) + + @staticmethod + def device_info(node): + """ + Show the device information. + + """ + + if 'cpu' in node and 'total_mbufs' in node['cpu']: + total_mbufs = node['cpu']['total_mbufs'] + if total_mbufs is not 0: + print "Total Number of Buffers: {}".format(total_mbufs) + + vpp = VppPCIUtil(node) + vpp.get_all_devices() + linkup_devs = vpp.get_link_up_devices() + if len(linkup_devs): + print ("\nDevices with link up (can not be used with VPP):") + vpp.show_vpp_devices(linkup_devs, show_header=False) + # for dev in linkup_devs: + # print (" " + dev) + kernel_devs = vpp.get_kernel_devices() + if len(kernel_devs): + print ("\nDevices bound to kernel drivers:") + vpp.show_vpp_devices(kernel_devs, show_header=False) + else: + print ("\nNo devices bound to kernel drivers") + + dpdk_devs = vpp.get_dpdk_devices() + if len(dpdk_devs): + print ("\nDevices bound to DPDK drivers:") + vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, + show_header=False) + else: + print ("\nNo devices bound to DPDK drivers") + + vpputl = VPPUtil() + interfaces = vpputl.get_hardware(node) + if interfaces == {}: + return + + print ("\nDevices in use by VPP:") + + if len(interfaces.items()) < 2: + print ("None") + return + + print "{:30} {:6} {:4} {:7} {:4} {:7}". \ + format('Name', 'Socket', 'RXQs', + 'RXDescs', 'TXQs', 'TXDescs') + for intf in sorted(interfaces.items()): + name = intf[0] + value = intf[1] + if name == 'local0': + continue + socket = rx_qs = rx_ds = tx_qs = tx_ds = '' + if 'cpu socket' in value: + socket = int(value['cpu socket']) + if 'rx queues' in value: + rx_qs = int(value['rx queues']) + if 'rx descs' in value: + rx_ds = int(value['rx descs']) + if 'tx queues' in value: + tx_qs = int(value['tx queues']) + if 'tx descs' in value: + tx_ds = int(value['tx descs']) + + print ("{:30} {:>6} {:>4} {:>7} {:>4} {:>7}". + format(name, socket, rx_qs, rx_ds, tx_qs, tx_ds)) + + @staticmethod + def hugepage_info(node): + """ + Show the huge page information. + + """ + + hpg = VppHugePageUtil(node) + hpg.show_huge_pages() + + @staticmethod + def min_system_resources(node): + """ + Check the system for basic minimum resources, return true if + there is enough. + + :returns: boolean + :rtype: dict + """ + + min_sys_res = True + + # CPUs + if 'layout' in node['cpu']: + total_cpus = len(node['cpu']['layout']) + if total_cpus < 2: + print "\nThere is only {} CPU(s) available on this system.".format(total_cpus) + print "This is not enough to run VPP." + min_sys_res = False + + # System Memory + if 'free' in node['hugepages'] and \ + 'memfree' in node['hugepages'] and \ + 'size' in node['hugepages']: + free = node['hugepages']['free'] + memfree = float(node['hugepages']['memfree'].split(' ')[0]) + hugesize = float(node['hugepages']['size'].split(' ')[0]) + + memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize + percentmemhugepages = (memhugepages / memfree) * 100 + if free is '0' and \ + percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES: + print "\nThe System has only {} of free memory.".format(int(memfree)) + print "You will not be able to allocate enough Huge Pages for VPP." + min_sys_res = False + + return min_sys_res + + def sys_info(self): + """ + Print the system information + + """ + + for i in self._nodes.items(): + print "\n==============================" + name = i[0] + node = i[1] + + print "NODE: {}\n".format(name) + + # CPU + print "CPU:" + self.cpu_info(node) + + # Grub + print "\nGrub Command Line:" + if 'grub' in node: + print \ + " Current: {}".format( + node['grub']['current_cmdline']) + print \ + " Configured: {}".format( + node['grub']['default_cmdline']) + + # Huge Pages + print "\nHuge Pages:" + self.hugepage_info(node) + + # Devices + print "\nDevices:" + self.device_info(node) + + # Status + print "\nVPP Service Status:" + state, errors = VPPUtil.status(node) + print " {}".format(state) + for e in errors: + print " {}".format(e) + + # Minimum system resources + self.min_system_resources(node) + + print "\n==============================" diff --git a/extras/vpp_config/vpplib/CpuUtils.py b/extras/vpp_config/vpplib/CpuUtils.py new file mode 100644 index 00000000000..f5c23bc141a --- /dev/null +++ b/extras/vpp_config/vpplib/CpuUtils.py @@ -0,0 +1,287 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CPU utilities library.""" + +import re + +from vpplib.VPPUtil import VPPUtil + +__all__ = ["CpuUtils"] + + +class CpuUtils(object): + """CPU utilities""" + + # Number of threads per core. + NR_OF_THREADS = 2 + + @staticmethod + def __str2int(string): + """Conversion from string to integer, 0 in case of empty string. + + :param string: Input string. + :type string: str + :returns: Integer converted from string, 0 in case of ValueError. + :rtype: int + """ + try: + return int(string) + except ValueError: + return 0 + + @staticmethod + def is_smt_enabled(cpu_info): + """Uses CPU mapping to find out if SMT is enabled or not. If SMT is + enabled, the L1d,L1i,L2,L3 setting is the same for two processors. These + two processors are two threads of one core. + + :param cpu_info: CPU info, the output of "lscpu -p". + :type cpu_info: list + :returns: True if SMT is enabled, False if SMT is disabled. + :rtype: bool + """ + + cpu_mems = [item[-4:] for item in cpu_info] + cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS + count = 0 + for cpu_mem in cpu_mems[:cpu_mems_len]: + if cpu_mem in cpu_mems[cpu_mems_len:]: + count += 1 + return bool(count == cpu_mems_len) + + @staticmethod + def get_cpu_layout_from_all_nodes(nodes): + """Retrieve cpu layout from all nodes, assuming all nodes + are Linux nodes. + + :param nodes: DICT__nodes from Topology.DICT__nodes. + :type nodes: dict + :raises RuntimeError: If the ssh command "lscpu -p" fails. + """ + for node in nodes.values(): + cmd = "lscpu -p" + ret, stdout, stderr = VPPUtil.exec_command(cmd) + # parsing of "lscpu -p" output: + # # CPU,Core,Socket,Node,,L1d,L1i,L2,L3 + # 0,0,0,0,,0,0,0,0 + # 1,1,0,0,,1,1,1,0 + if ret != 0: + raise RuntimeError( + "Failed to execute ssh command, ret: {} err: {}".format( + ret, stderr)) + node['cpuinfo'] = list() + for line in stdout.split("\n"): + if line != '' and line[0] != "#": + node['cpuinfo'].append([CpuUtils.__str2int(x) for x in + line.split(",")]) + + @staticmethod + def cpu_node_count(node): + """Return count of numa nodes. + + :param node: Targeted node. + :type node: dict + :returns: Count of numa nodes. + :rtype: int + :raises RuntimeError: If node cpuinfo is not available. + """ + cpu_info = node.get("cpuinfo") + if cpu_info is not None: + return node["cpuinfo"][-1][3] + 1 + else: + raise RuntimeError("Node cpuinfo not available.") + + @staticmethod + def cpu_list_per_node(node, cpu_node, smt_used=False): + """Return node related list of CPU numbers. + + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param smt_used: True - we want to use SMT, otherwise false. + :type node: dict + :type cpu_node: int + :type smt_used: bool + :returns: List of cpu numbers related to numa from argument. + :rtype: list of int + :raises RuntimeError: If node cpuinfo is not available or if SMT is not + enabled. + """ + + cpu_node = int(cpu_node) + cpu_info = node.get("cpuinfo") + if cpu_info is None: + raise RuntimeError("Node cpuinfo not available.") + + smt_enabled = CpuUtils.is_smt_enabled(cpu_info) + if not smt_enabled and smt_used: + raise RuntimeError("SMT is not enabled.") + + cpu_list = [] + for cpu in cpu_info: + if cpu[3] == cpu_node: + cpu_list.append(cpu[0]) + + if not smt_enabled or smt_enabled and smt_used: + pass + + if smt_enabled and not smt_used: + cpu_list_len = len(cpu_list) + cpu_list = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + + return cpu_list + + @staticmethod + def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0, + smt_used=False): + """Return string of node related list of CPU numbers. + + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param skip_cnt: Skip first "skip_cnt" CPUs. + :param cpu_cnt: Count of cpus to return, if 0 then return all. + :param smt_used: True - we want to use SMT, otherwise false. + :type node: dict + :type cpu_node: int + :type skip_cnt: int + :type cpu_cnt: int + :type smt_used: bool + :returns: Cpu numbers related to numa from argument. + :rtype: list + :raises RuntimeError: If we require more cpus than available. + """ + + cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) + + cpu_list_len = len(cpu_list) + if cpu_cnt + skip_cnt > cpu_list_len: + raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).") + + if cpu_cnt == 0: + cpu_cnt = cpu_list_len - skip_cnt + + if smt_used: + cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] + cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list_ex = [cpu for cpu in + cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list.extend(cpu_list_ex) + else: + cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]] + + return cpu_list + + @staticmethod + def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", + smt_used=False): + """Return string of node related list of CPU numbers. + + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param skip_cnt: Skip first "skip_cnt" CPUs. + :param cpu_cnt: Count of cpus to return, if 0 then return all. + :param sep: Separator, default: 1,2,3,4,.... + :param smt_used: True - we want to use SMT, otherwise false. + :type node: dict + :type cpu_node: int + :type skip_cnt: int + :type cpu_cnt: int + :type sep: str + :type smt_used: bool + :returns: Cpu numbers related to numa from argument. + :rtype: str + """ + + cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=cpu_cnt, + smt_used=smt_used) + return sep.join(str(cpu) for cpu in cpu_list) + + @staticmethod + def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-", + smt_used=False): + """Return string of node related range of CPU numbers, e.g. 0-4. + + :param node: Node dictionary with cpuinfo. + :param cpu_node: Numa node number. + :param skip_cnt: Skip first "skip_cnt" CPUs. + :param cpu_cnt: Count of cpus to return, if 0 then return all. + :param sep: Separator, default: "-". + :param smt_used: True - we want to use SMT, otherwise false. + :type node: dict + :type cpu_node: int + :type skip_cnt: int + :type cpu_cnt: int + :type sep: str + :type smt_used: bool + :returns: String of node related range of CPU numbers. + :rtype: str + """ + + cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=cpu_cnt, + smt_used=smt_used) + if smt_used: + cpu_list_len = len(cpu_list) + cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] + cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep, + cpu_list_0[-1], + cpu_list_1[0], sep, + cpu_list_1[-1]) + else: + cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1]) + + return cpu_range + + @staticmethod + def get_cpu_info_per_node(node): + """Return node related list of CPU numbers. + + :param node: Node dictionary with cpuinfo. + :type node: dict + :returns: Important CPU information. + :rtype: dict + """ + + cmd = "lscpu" + ret, stdout, stderr = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError("lscpu command failed on node {} {}." + .format(node['host'], stderr)) + + cpuinfo = {} + lines = stdout.split('\n') + for line in lines: + if line != '': + linesplit = re.split(r':\s+', line) + cpuinfo[linesplit[0]] = linesplit[1] + + cmd = "cat /proc/*/task/*/stat | awk '{print $1" "$2" "$39}'" + ret, stdout, stderr = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError("cat command failed on node {} {}." + .format(node['host'], stderr)) + + vpp_processes = {} + vpp_lines = re.findall(r'\w+\(vpp_\w+\)\w+', stdout) + for line in vpp_lines: + linesplit = re.split(r'\w+\(', line)[1].split(')') + vpp_processes[linesplit[0]] = linesplit[1] + + cpuinfo['vpp_processes'] = vpp_processes + + return cpuinfo diff --git a/extras/vpp_config/vpplib/QemuUtils.py b/extras/vpp_config/vpplib/QemuUtils.py new file mode 100644 index 00000000000..37a13e2afb3 --- /dev/null +++ b/extras/vpp_config/vpplib/QemuUtils.py @@ -0,0 +1,680 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""QEMU utilities library.""" + +from time import time, sleep +import json +import logging + +from vpplib.VPPUtil import VPPUtil +from vpplib.constants import Constants + + +class NodeType(object): + """Defines node types used in topology dictionaries.""" + # Device Under Test (this node has VPP running on it) + DUT = 'DUT' + # Traffic Generator (this node has traffic generator on it) + TG = 'TG' + # Virtual Machine (this node running on DUT node) + VM = 'VM' + + +class QemuUtils(object): + """QEMU utilities.""" + + def __init__(self, qemu_id=1): + self._qemu_id = qemu_id + # Path to QEMU binary + self._qemu_bin = '/usr/bin/qemu-system-x86_64' + # QEMU Machine Protocol socket + self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id) + # QEMU Guest Agent socket + self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id) + # QEMU PID file + self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id) + self._qemu_opt = {} + # Default 1 CPU. + self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1' + # Daemonize the QEMU process after initialization. Default one + # management interface. + self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \ + '-machine pc,accel=kvm,usb=off,mem-merge=off ' \ + '-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none'\ + .format(self._qemu_id) + self._qemu_opt['ssh_fwd_port'] = 10021 + qemu_id + # Default serial console port + self._qemu_opt['serial_port'] = 4555 + qemu_id + # Default 512MB virtual RAM + self._qemu_opt['mem_size'] = 512 + # Default huge page mount point, required for Vhost-user interfaces. + self._qemu_opt['huge_mnt'] = '/mnt/huge' + # Default do not allocate huge pages. + self._qemu_opt['huge_allocate'] = False + # Default image for CSIT virl setup + self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img' + # VM node info dict + self._vm_info = { + 'type': NodeType.VM, + 'port': self._qemu_opt['ssh_fwd_port'], + 'username': 'cisco', + 'password': 'cisco', + 'interfaces': {}, + } + # Virtio queue count + self._qemu_opt['queues'] = 1 + self._vhost_id = 0 + self._ssh = None + self._node = None + self._socks = [self._qmp_sock, self._qga_sock] + + def qemu_set_bin(self, path): + """Set binary path for QEMU. + + :param path: Absolute path in filesystem. + :type path: str + """ + self._qemu_bin = path + + def qemu_set_smp(self, cpus, cores, threads, sockets): + """Set SMP option for QEMU. + + :param cpus: Number of CPUs. + :param cores: Number of CPU cores on one socket. + :param threads: Number of threads on one CPU core. + :param sockets: Number of discrete sockets in the system. + :type cpus: int + :type cores: int + :type threads: int + :type sockets: int + """ + self._qemu_opt['smp'] = '-smp {},cores={},threads={},sockets={}'.format( + cpus, cores, threads, sockets) + + def qemu_set_ssh_fwd_port(self, fwd_port): + """Set host port for guest SSH forwarding. + + :param fwd_port: Port number on host for guest SSH forwarding. + :type fwd_port: int + """ + self._qemu_opt['ssh_fwd_port'] = fwd_port + self._vm_info['port'] = fwd_port + + def qemu_set_serial_port(self, port): + """Set serial console port. + + :param port: Serial console port. + :type port: int + """ + self._qemu_opt['serial_port'] = port + + def qemu_set_mem_size(self, mem_size): + """Set virtual RAM size. + + :param mem_size: RAM size in Mega Bytes. + :type mem_size: int + """ + self._qemu_opt['mem_size'] = int(mem_size) + + def qemu_set_huge_mnt(self, huge_mnt): + """Set hugefile mount point. + + :param huge_mnt: System hugefile mount point. + :type huge_mnt: int + """ + self._qemu_opt['huge_mnt'] = huge_mnt + + def qemu_set_huge_allocate(self): + """Set flag to allocate more huge pages if needed.""" + self._qemu_opt['huge_allocate'] = True + + def qemu_set_disk_image(self, disk_image): + """Set disk image. + + :param disk_image: Path of the disk image. + :type disk_image: str + """ + self._qemu_opt['disk_image'] = disk_image + + def qemu_set_affinity(self, *host_cpus): + """Set qemu affinity by getting thread PIDs via QMP and taskset to list + of CPU cores. + + :param host_cpus: List of CPU cores. + :type host_cpus: list + """ + qemu_cpus = self._qemu_qmp_exec('query-cpus')['return'] + + if len(qemu_cpus) != len(host_cpus): + logging.debug('Host CPU count {0}, Qemu Thread count {1}'.format( + len(host_cpus), len(qemu_cpus))) + raise ValueError('Host CPU count must match Qemu Thread count') + + for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus): + cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id']) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logging.debug('Set affinity failed {0}'.format(stderr)) + raise RuntimeError('Set affinity failed on {0}'.format( + self._node['host'])) + + def qemu_set_scheduler_policy(self): + """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU + processes. + + :raises RuntimeError: Set scheduler policy failed. + """ + qemu_cpus = self._qemu_qmp_exec('query-cpus')['return'] + + for qemu_cpu in qemu_cpus: + cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id']) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logging.debug('Set SCHED_RR failed {0}'.format(stderr)) + raise RuntimeError('Set SCHED_RR failed on {0}'.format( + self._node['host'])) + + def qemu_set_node(self, node): + """Set node to run QEMU on. + + :param node: Node to run QEMU on. + :type node: dict + """ + self._node = node + self._vm_info['host'] = node['host'] + + def qemu_add_vhost_user_if(self, socket, server=True, mac=None): + """Add Vhost-user interface. + + :param socket: Path of the unix socket. + :param server: If True the socket shall be a listening socket. + :param mac: Vhost-user interface MAC address (optional, otherwise is + used auto-generated MAC 52:54:00:00:xx:yy). + :type socket: str + :type server: bool + :type mac: str + """ + self._vhost_id += 1 + # Create unix socket character device. + chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id, + socket) + if server is True: + chardev += ',server' + self._qemu_opt['options'] += chardev + # Create Vhost-user network backend. + netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}' + .format(self._vhost_id, self._qemu_opt['queues'])) + self._qemu_opt['options'] += netdev + # If MAC is not specified use auto-generated MAC address based on + # template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU + # with ID 1 is 52:54:00:00:01:01 + if mac is None: + mac = '52:54:00:00:{0:02x}:{1:02x}'.\ + format(self._qemu_id, self._vhost_id) + extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\ + 'guest_tso6=off,guest_ecn=off,mrg_rxbuf=off' + # Create Virtio network device. + device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format( + self._vhost_id, mac, extend_options) + self._qemu_opt['options'] += device + # Add interface MAC and socket to the node dict + if_data = {'mac_address': mac, 'socket': socket} + if_name = 'vhost{}'.format(self._vhost_id) + self._vm_info['interfaces'][if_name] = if_data + # Add socket to the socket list + self._socks.append(socket) + + def _qemu_qmp_exec(self, cmd): + """Execute QMP command. + + QMP is JSON based protocol which allows to control QEMU instance. + + :param cmd: QMP command to execute. + :type cmd: str + :return: Command output in python representation of JSON format. The + { "return": {} } response is QMP's success response. An error + response will contain the "error" keyword instead of "return". + """ + # To enter command mode, the qmp_capabilities command must be issued. + qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \ + '{ \\"execute\\": \\"' + cmd + \ + '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock + + (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd) + if int(ret_code) != 0: + logging.debug('QMP execute failed {0}'.format(stderr)) + raise RuntimeError('QMP execute "{0}"' + ' failed on {1}'.format(cmd, self._node['host'])) + logging.debug(stdout) + # Skip capabilities negotiation messages. + out_list = stdout.splitlines() + if len(out_list) < 3: + raise RuntimeError('Invalid QMP output on {0}'.format( + self._node['host'])) + return json.loads(out_list[2]) + + def _qemu_qga_flush(self): + """Flush the QGA parser state + """ + qga_cmd = '(printf "\xFF"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \ + self._qga_sock + # TODO: probably need something else + (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) + if int(ret_code) != 0: + logging.debug('QGA execute failed {0}'.format(stderr)) + raise RuntimeError('QGA execute "{0}" ' + 'failed on {1}'.format(qga_cmd, + self._node['host'])) + logging.debug(stdout) + if not stdout: + return {} + return json.loads(stdout.split('\n', 1)[0]) + + def _qemu_qga_exec(self, cmd): + """Execute QGA command. + + QGA provide access to a system-level agent via standard QMP commands. + + :param cmd: QGA command to execute. + :type cmd: str + """ + qga_cmd = '(echo "{ \\"execute\\": \\"' + \ + cmd + \ + '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \ + self._qga_sock + (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) + if int(ret_code) != 0: + logging.debug('QGA execute failed {0}'.format(stderr)) + raise RuntimeError('QGA execute "{0}"' + ' failed on {1}'.format(cmd, self._node['host'])) + logging.debug(stdout) + if not stdout: + return {} + return json.loads(stdout.split('\n', 1)[0]) + + def _wait_until_vm_boot(self, timeout=60): + """Wait until QEMU VM is booted. + + Ping QEMU guest agent each 5s until VM booted or timeout. + + :param timeout: Waiting timeout in seconds (optional, default 60s). + :type timeout: int + """ + start = time() + while True: + if time() - start > timeout: + raise RuntimeError('timeout, VM {0} not booted on {1}'.format( + self._qemu_opt['disk_image'], self._node['host'])) + out = None + try: + self._qemu_qga_flush() + out = self._qemu_qga_exec('guest-ping') + except ValueError: + logging.debug('QGA guest-ping unexpected output {}'.format(out)) + # Empty output - VM not booted yet + if not out: + sleep(5) + # Non-error return - VM booted + elif out.get('return') is not None: + break + # Skip error and wait + elif out.get('error') is not None: + sleep(5) + else: + # If there is an unexpected output from QGA guest-info, try + # again until timeout. + logging.debug('QGA guest-ping unexpected output {}'.format(out)) + + logging.debug('VM {0} booted on {1}'.format(self._qemu_opt['disk_image'], + self._node['host'])) + + def _update_vm_interfaces(self): + """Update interface names in VM node dict.""" + # Send guest-network-get-interfaces command via QGA, output example: + # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"}, + # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]} + out = self._qemu_qga_exec('guest-network-get-interfaces') + interfaces = out.get('return') + mac_name = {} + if not interfaces: + raise RuntimeError('Get VM {0} interface list failed on {1}'.format( + self._qemu_opt['disk_image'], self._node['host'])) + # Create MAC-name dict + for interface in interfaces: + if 'hardware-address' not in interface: + continue + mac_name[interface['hardware-address']] = interface['name'] + # Match interface by MAC and save interface name + for interface in self._vm_info['interfaces'].values(): + mac = interface.get('mac_address') + if_name = mac_name.get(mac) + if if_name is None: + logging.debug('Interface name for MAC {} not found'.format(mac)) + else: + interface['name'] = if_name + + def _huge_page_check(self, allocate=False): + """Huge page check.""" + huge_mnt = self._qemu_opt.get('huge_mnt') + mem_size = self._qemu_opt.get('mem_size') + + # Get huge pages information + huge_size = self._get_huge_page_size() + huge_free = self._get_huge_page_free(huge_size) + huge_total = self._get_huge_page_total(huge_size) + + # Check if memory reqested by qemu is available on host + if (mem_size * 1024) > (huge_free * huge_size): + # If we want to allocate hugepage dynamically + if allocate: + mem_needed = abs((huge_free * huge_size) - (mem_size * 1024)) + huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total + max_map_count = huge_to_allocate*4 + # Increase maximum number of memory map areas a process may have + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format( + max_map_count) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + # Increase hugepage count + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format( + huge_to_allocate) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logging.debug('Mount huge pages failed {0}'.format(stderr)) + raise RuntimeError('Mount huge pages failed on {0}'.format( + self._node['host'])) + # If we do not want to allocate dynamicaly end with error + else: + raise RuntimeError( + 'Not enough free huge pages: {0}, ' + '{1} MB'.format(huge_free, huge_free * huge_size) + ) + # Check if huge pages mount point exist + has_huge_mnt = False + (_, output, _) = self._ssh.exec_command('cat /proc/mounts') + for line in output.splitlines(): + # Try to find something like: + # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 + mount = line.split() + if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt: + has_huge_mnt = True + break + # If huge page mount point not exist create one + if not has_huge_mnt: + cmd = 'mkdir -p {0}'.format(huge_mnt) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logging.debug('Create mount dir failed: {0}'.format(stderr)) + raise RuntimeError('Create mount dir failed on {0}'.format( + self._node['host'])) + cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format( + huge_mnt) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logging.debug('Mount huge pages failed {0}'.format(stderr)) + raise RuntimeError('Mount huge pages failed on {0}'.format( + self._node['host'])) + + def _get_huge_page_size(self): + """Get default size of huge pages in system. + + :returns: Default size of free huge pages in system. + :rtype: int + :raises: RuntimeError if reading failed for three times. + """ + # TODO: remove to dedicated library + cmd_huge_size = "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'" + for _ in range(3): + (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_size) + if ret == 0: + try: + huge_size = int(out) + except ValueError: + logging.debug('Reading huge page size information failed') + else: + break + else: + raise RuntimeError('Getting huge page size information failed.') + return huge_size + + def _get_huge_page_free(self, huge_size): + """Get total number of huge pages in system. + + :param huge_size: Size of hugepages. + :type huge_size: int + :returns: Number of free huge pages in system. + :rtype: int + :raises: RuntimeError if reading failed for three times. + """ + # TODO: add numa aware option + # TODO: remove to dedicated library + cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ + 'free_hugepages'.format(huge_size) + for _ in range(3): + (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free) + if ret == 0: + try: + huge_free = int(out) + except ValueError: + logging.debug('Reading free huge pages information failed') + else: + break + else: + raise RuntimeError('Getting free huge pages information failed.') + return huge_free + + def _get_huge_page_total(self, huge_size): + """Get total number of huge pages in system. + + :param huge_size: Size of hugepages. + :type huge_size: int + :returns: Total number of huge pages in system. + :rtype: int + :raises: RuntimeError if reading failed for three times. + """ + # TODO: add numa aware option + # TODO: remove to dedicated library + cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ + 'nr_hugepages'.format(huge_size) + for _ in range(3): + (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total) + if ret == 0: + try: + huge_total = int(out) + except ValueError: + logging.debug('Reading total huge pages information failed') + else: + break + else: + raise RuntimeError('Getting total huge pages information failed.') + return huge_total + + def qemu_start(self): + """Start QEMU and wait until VM boot. + + :return: VM node info. + :rtype: dict + .. note:: First set at least node to run QEMU on. + .. warning:: Starts only one VM on the node. + """ + # SSH forwarding + ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format( + self._qemu_opt.get('ssh_fwd_port')) + # Memory and huge pages + mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \ + 'share=on -m {0} -numa node,memdev=mem'.format( + self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt')) + + # By default check only if hugepages are available. + # If 'huge_allocate' is set to true try to allocate as well. + self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate')) + + # Disk option + drive = '-drive file={0},format=raw,cache=none,if=virtio'.format( + self._qemu_opt.get('disk_image')) + # Setup QMP via unix socket + qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock) + # Setup serial console + serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \ + 'nowait -device isa-serial,chardev=gnc0'.format( + self._qemu_opt.get('serial_port')) + # Setup QGA via chardev (unix socket) and isa-serial channel + qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \ + '-device isa-serial,chardev=qga0'.format(self._qga_sock) + # Graphic setup + graphic = '-monitor none -display none -vga none' + # PID file + pid = '-pidfile {}'.format(self._pid_file) + + # Run QEMU + cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'.format( + self._qemu_bin, self._qemu_opt.get('smp'), mem, ssh_fwd, + self._qemu_opt.get('options'), + drive, qmp, serial, qga, graphic, pid) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300) + if int(ret_code) != 0: + logging.debug('QEMU start failed {0}'.format(stderr)) + raise RuntimeError('QEMU start failed on {0}'.format( + self._node['host'])) + logging.debug('QEMU running') + # Wait until VM boot + try: + self._wait_until_vm_boot() + except RuntimeError: + self.qemu_kill_all() + self.qemu_clear_socks() + raise + # Update interface names in VM node dict + self._update_vm_interfaces() + # Return VM node dict + return self._vm_info + + def qemu_quit(self): + """Quit the QEMU emulator.""" + out = self._qemu_qmp_exec('quit') + err = out.get('error') + if err is not None: + raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format( + self._node['host'], json.dumps(err))) + + def qemu_system_powerdown(self): + """Power down the system (if supported).""" + out = self._qemu_qmp_exec('system_powerdown') + err = out.get('error') + if err is not None: + raise RuntimeError( + 'QEMU system powerdown failed on {0}, ' + 'error: {1}'.format(self._node['host'], json.dumps(err)) + ) + + def qemu_system_reset(self): + """Reset the system.""" + out = self._qemu_qmp_exec('system_reset') + err = out.get('error') + if err is not None: + raise RuntimeError( + 'QEMU system reset failed on {0}, ' + 'error: {1}'.format(self._node['host'], json.dumps(err))) + + def qemu_kill(self): + """Kill qemu process.""" + # Note: in QEMU start phase there are 3 QEMU processes because we + # daemonize QEMU + self._ssh.exec_command_sudo('chmod +r {}'.format(self._pid_file)) + self._ssh.exec_command_sudo('kill -SIGKILL $(cat {})' + .format(self._pid_file)) + # Delete PID file + cmd = 'rm -f {}'.format(self._pid_file) + self._ssh.exec_command_sudo(cmd) + + def qemu_kill_all(self, node=None): + """Kill all qemu processes on DUT node if specified. + + :param node: Node to kill all QEMU processes on. + :type node: dict + """ + if node: + self.qemu_set_node(node) + self._ssh.exec_command_sudo('pkill -SIGKILL qemu') + + def qemu_clear_socks(self): + """Remove all sockets created by QEMU.""" + # If serial console port still open kill process + cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port')) + self._ssh.exec_command_sudo(cmd) + # Delete all created sockets + for sock in self._socks: + cmd = 'rm -f {}'.format(sock) + self._ssh.exec_command_sudo(cmd) + + def qemu_system_status(self): + """Return current VM status. + + VM should be in following status: + + - debug: QEMU running on a debugger + - finish-migrate: paused to finish the migration process + - inmigrate: waiting for an incoming migration + - internal-error: internal error has occurred + - io-error: the last IOP has failed + - paused: paused + - postmigrate: paused following a successful migrate + - prelaunch: QEMU was started with -S and guest has not started + - restore-vm: paused to restore VM state + - running: actively running + - save-vm: paused to save the VM state + - shutdown: shut down (and -no-shutdown is in use) + - suspended: suspended (ACPI S3) + - watchdog: watchdog action has been triggered + - guest-panicked: panicked as a result of guest OS panic + + :return: VM status. + :rtype: str + """ + out = self._qemu_qmp_exec('query-status') + ret = out.get('return') + if ret is not None: + return ret.get('status') + else: + err = out.get('error') + raise RuntimeError( + 'QEMU query-status failed on {0}, ' + 'error: {1}'.format(self._node['host'], json.dumps(err))) + + @staticmethod + def build_qemu(node, force_install=False, apply_patch=False): + """Build QEMU from sources. + + :param node: Node to build QEMU on. + :param force_install: If True, then remove previous build. + :param apply_patch: If True, then apply patches from qemu_patches dir. + :type node: dict + :type force_install: bool + :type apply_patch: bool + :raises: RuntimeError if building QEMU failed. + """ + + directory = ' --directory={0}'.format(Constants.QEMU_INSTALL_DIR) + version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION) + force = ' --force' if force_install else '' + patch = ' --patch' if apply_patch else '' + + (ret_code, stdout, stderr) = VPPUtil. \ + exec_command( + "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}'". + format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH, + version, directory, force, patch), 1000) + + if int(ret_code) != 0: + logging.debug('QEMU build failed {0}'.format(stdout + stderr)) + raise RuntimeError('QEMU build failed on {0}'.format(node['host'])) diff --git a/extras/vpp_config/vpplib/VPPUtil.py b/extras/vpp_config/vpplib/VPPUtil.py new file mode 100644 index 00000000000..350b7759a03 --- /dev/null +++ b/extras/vpp_config/vpplib/VPPUtil.py @@ -0,0 +1,662 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""VPP util library""" +import logging +import re +import subprocess +import platform + +from collections import Counter + +# VPP_VERSION = '1707' +VPP_VERSION = '1710' + + +class VPPUtil(object): + """General class for any VPP related methods/functions.""" + + @staticmethod + def exec_command(cmd, timeout=None): + """Execute a command on the local node. + + :param cmd: Command to run locally. + :param timeout: Timeout value + :type cmd: str + :type timeout: int + :return return_code, stdout, stderr + :rtype: tuple(int, str, str) + """ + + logging.info(" Local Command: {}".format(cmd)) + out = '' + err = '' + prc = subprocess.Popen(cmd, shell=True, bufsize=1, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + with prc.stdout: + for line in iter(prc.stdout.readline, b''): + logging.info(" {}".format(line.strip('\n'))) + out += line + + with prc.stderr: + for line in iter(prc.stderr.readline, b''): + logging.warn(" {}".format(line.strip('\n'))) + err += line + + ret = prc.wait() + + return ret, out, err + + def _autoconfig_backup_file(self, filename): + """ + Create a backup file. + + :param filename: The file to backup + :type filename: str + """ + + # Does a copy of the file exist, if not create one + ofile = filename + '.orig' + (ret, stdout, stderr) = self.exec_command('ls {}'.format(ofile)) + if ret != 0: + logging.debug(stderr) + if stdout.strip('\n') != ofile: + cmd = 'sudo cp {} {}'.format(filename, ofile) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + logging.debug(stderr) + + def _install_vpp_pkg_ubuntu(self, node, pkg): + """ + Install the VPP packages + + :param node: Node dictionary + :param pkg: The vpp packages + :type node: dict + :type pkg: string + """ + + cmd = 'apt-get -y install {}'.format(pkg) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], stdout, stderr)) + + def _install_vpp_pkg_centos(self, node, pkg): + """ + Install the VPP packages + + :param node: Node dictionary + :param pkg: The vpp packages + :type node: dict + :type pkg: string + """ + + cmd = 'yum -y install {}'.format(pkg) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], stdout, stderr)) + + def _install_vpp_ubuntu(self, node, fdio_release=VPP_VERSION, + ubuntu_version='xenial'): + """ + Install the VPP packages + + :param node: Node dictionary with cpuinfo. + :param fdio_release: VPP release number + :param ubuntu_version: Ubuntu Version + :type node: dict + :type fdio_release: string + :type ubuntu_version: string + """ + + # Modify the sources list + sfile = '/etc/apt/sources.list.d/99fd.io.list' + + # Backup the sources list + self._autoconfig_backup_file(sfile) + + # Remove the current file + cmd = 'rm {}'.format(sfile) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + logging.debug('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + reps = 'deb [trusted=yes] https://nexus.fd.io/content/' + reps += 'repositories/fd.io.stable.{}.ubuntu.{}.main/ ./\n' \ + .format(fdio_release, ubuntu_version) + + cmd = 'echo "{0}" | sudo tee {1}'.format(reps, sfile) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + # Install the package + cmd = 'apt-get -y update' + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} apt-get update failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + self._install_vpp_pkg_ubuntu(node, 'vpp-lib') + self._install_vpp_pkg_ubuntu(node, 'vpp') + self._install_vpp_pkg_ubuntu(node, 'vpp-plugins') + self._install_vpp_pkg_ubuntu(node, 'vpp-dpdk-dkms') + self._install_vpp_pkg_ubuntu(node, 'vpp-dpdk-dev') + self._install_vpp_pkg_ubuntu(node, 'vpp-api-python') + self._install_vpp_pkg_ubuntu(node, 'vpp-api-java') + self._install_vpp_pkg_ubuntu(node, 'vpp-api-lua') + self._install_vpp_pkg_ubuntu(node, 'vpp-dev') + self._install_vpp_pkg_ubuntu(node, 'vpp-dbg') + + def _install_vpp_centos(self, node, fdio_release=VPP_VERSION, + centos_version='centos7'): + """ + Install the VPP packages + + :param node: Node dictionary with cpuinfo. + :param fdio_release: VPP release number + :param centos_version: Ubuntu Version + :type node: dict + :type fdio_release: string + :type centos_version: string + """ + + # Modify the sources list + sfile = '/etc/yum.repos.d/fdio-release.repo' + + # Backup the sources list + self._autoconfig_backup_file(sfile) + + # Remove the current file + cmd = 'rm {}'.format(sfile) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + logging.debug('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + reps = '[fdio-stable-{}]\n'.format(fdio_release) + reps += 'name=fd.io stable/{} branch latest merge\n'.format(fdio_release) + reps += 'baseurl=https://nexus.fd.io/content/repositories/fd.io.stable.{}.{}/\n'.\ + format(fdio_release, centos_version) + reps += 'enabled=1\n' + reps += 'gpgcheck=0' + + cmd = 'echo "{0}" | sudo tee {1}'.format(reps, sfile) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + # Install the packages + + self._install_vpp_pkg_centos(node, 'vpp-lib') + self._install_vpp_pkg_centos(node, 'vpp') + self._install_vpp_pkg_centos(node, 'vpp-plugins') + # jadfix Check with Ole + # self._install_vpp_pkg_centos(node, 'vpp-dpdk-devel') + self._install_vpp_pkg_centos(node, 'vpp-api-python') + self._install_vpp_pkg_centos(node, 'vpp-api-java') + self._install_vpp_pkg_centos(node, 'vpp-api-lua') + self._install_vpp_pkg_centos(node, 'vpp-devel') + + def install_vpp(self, node): + """ + Install the VPP packages + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + distro = self.get_linux_distro() + if distro[0] == 'Ubuntu': + self._install_vpp_ubuntu(node) + elif distro[0] == 'CentOS Linux': + logging.info("Install CentOS") + self._install_vpp_centos(node) + else: + return + + def _uninstall_vpp_pkg_ubuntu(self, node, pkg): + """ + Uninstall the VPP packages + + :param node: Node dictionary + :param pkg: The vpp packages + :type node: dict + :type pkg: string + """ + cmd = 'dpkg --purge {}'.format(pkg) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], stdout, stderr)) + + def _uninstall_vpp_pkg_centos(self, node, pkg): + """ + Uninstall the VPP packages + + :param node: Node dictionary + :param pkg: The vpp packages + :type node: dict + :type pkg: string + """ + cmd = 'yum -y remove {}'.format(pkg) + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], stdout, stderr)) + + def _uninstall_vpp_ubuntu(self, node): + """ + Uninstall the VPP packages + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + pkgs = self.get_installed_vpp_pkgs() + + if len(pkgs) > 0: + if 'version' in pkgs[0]: + logging.info("Uninstall Ubuntu Packages") + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-api-python') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-api-java') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-api-lua') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-plugins') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-dpdk-dev') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-dpdk-dkms') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-dev') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-dbg') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp') + self._uninstall_vpp_pkg_ubuntu(node, 'vpp-lib') + else: + logging.info("Uninstall locally installed Ubuntu Packages") + for pkg in pkgs: + self._uninstall_vpp_pkg_ubuntu(node, pkg['name']) + else: + logging.error("There are no Ubuntu packages installed") + + def _uninstall_vpp_centos(self, node): + """ + Uninstall the VPP packages + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + + pkgs = self.get_installed_vpp_pkgs() + + if len(pkgs) > 0: + if 'version' in pkgs[0]: + logging.info("Uninstall CentOS Packages") + self._uninstall_vpp_pkg_centos(node, 'vpp-api-python') + self._uninstall_vpp_pkg_centos(node, 'vpp-api-java') + self._uninstall_vpp_pkg_centos(node, 'vpp-api-lua') + self._uninstall_vpp_pkg_centos(node, 'vpp-plugins') + self._uninstall_vpp_pkg_centos(node, 'vpp-dpdk-devel') + self._uninstall_vpp_pkg_centos(node, 'vpp-devel') + self._uninstall_vpp_pkg_centos(node, 'vpp') + self._uninstall_vpp_pkg_centos(node, 'vpp-lib') + else: + logging.info("Uninstall locally installed CentOS Packages") + for pkg in pkgs: + self._uninstall_vpp_pkg_centos(node, pkg['name']) + else: + logging.error("There are no CentOS packages installed") + + def uninstall_vpp(self, node): + """ + Uninstall the VPP packages + + :param node: Node dictionary with cpuinfo. + :type node: dict + """ + distro = self.get_linux_distro() + if distro[0] == 'Ubuntu': + self._uninstall_vpp_ubuntu(node) + elif distro[0] == 'CentOS Linux': + logging.info("Uninstall CentOS") + self._uninstall_vpp_centos(node) + else: + return + + def show_vpp_settings(self, *additional_cmds): + """ + Print default VPP settings. In case others are needed, can be + accepted as next parameters (each setting one parameter), preferably + in form of a string. + + :param additional_cmds: Additional commands that the vpp should print + settings for. + :type additional_cmds: tuple + """ + def_setting_tb_displayed = { + 'IPv6 FIB': 'ip6 fib', + 'IPv4 FIB': 'ip fib', + 'Interface IP': 'int addr', + 'Interfaces': 'int', + 'ARP': 'ip arp', + 'Errors': 'err' + } + + if additional_cmds: + for cmd in additional_cmds: + def_setting_tb_displayed['Custom Setting: {}'.format(cmd)] \ + = cmd + + for _, value in def_setting_tb_displayed.items(): + self.exec_command('vppctl sh {}'.format(value)) + + @staticmethod + def get_hardware(node): + """ + Get the VPP hardware information and return it in a + dictionary + + :param node: VPP node. + :type node: dict + :returns: Dictionary containing improtant VPP information + :rtype: dictionary + """ + + interfaces = {} + cmd = 'vppctl show hard' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + return interfaces + + lines = stdout.split('\n') + if len(lines[0]) is not 0: + if lines[0].split(' ')[0] == 'FileNotFoundError': + return interfaces + + for line in lines: + if len(line) is 0: + continue + + # If the first character is not whitespace + # create a new interface + if len(re.findall(r'\s', line[0])) is 0: + spl = line.split() + name = spl[0] + interfaces[name] = {} + interfaces[name]['index'] = spl[1] + interfaces[name]['state'] = spl[2] + + # Ethernet address + rfall = re.findall(r'Ethernet address', line) + if rfall: + spl = line.split() + interfaces[name]['mac'] = spl[2] + + # Carrier + rfall = re.findall(r'carrier', line) + if rfall: + spl = line.split('carrier ') + interfaces[name]['carrier'] = spl[1] + + # Socket + rfall = re.findall(r'cpu socket', line) + if rfall: + spl = line.split('cpu socket ') + interfaces[name]['cpu socket'] = spl[1] + + # Queues and Descriptors + rfall = re.findall(r'rx queues', line) + if rfall: + spl = line.split(',') + interfaces[name]['rx queues'] = spl[0].lstrip(' ').split(' ')[2] + interfaces[name]['rx descs'] = spl[1].split(' ')[3] + interfaces[name]['tx queues'] = spl[2].split(' ')[3] + interfaces[name]['tx descs'] = spl[3].split(' ')[3] + + return interfaces + + def _get_installed_vpp_pkgs_ubuntu(self): + """ + Get the VPP hardware information and return it in a + dictionary + + :returns: List of the packages installed + :rtype: list + """ + + pkgs = [] + cmd = 'dpkg -l | grep vpp' + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + return pkgs + + lines = stdout.split('\n') + for line in lines: + items = line.split() + if len(items) < 2: + continue + pkg = {'name': items[1], 'version': items[2]} + pkgs.append(pkg) + + return pkgs + + def _get_installed_vpp_pkgs_centos(self): + """ + Get the VPP hardware information and return it in a + dictionary + + :returns: List of the packages installed + :rtype: list + """ + + pkgs = [] + cmd = 'rpm -qa | grep vpp' + (ret, stdout, stderr) = self.exec_command(cmd) + if ret != 0: + return pkgs + + lines = stdout.split('\n') + for line in lines: + if len(line) == 0: + continue + + items = line.split() + if len(items) < 2: + pkg = {'name': items[0]} + else: + pkg = {'name': items[1], 'version': items[2]} + + pkgs.append(pkg) + + return pkgs + + def get_installed_vpp_pkgs(self): + """ + Get the VPP hardware information and return it in a + dictionary + + :returns: List of the packages installed + :rtype: list + """ + + distro = self.get_linux_distro() + if distro[0] == 'Ubuntu': + pkgs = self._get_installed_vpp_pkgs_ubuntu() + elif distro[0] == 'CentOS Linux': + pkgs = self._get_installed_vpp_pkgs_centos() + else: + return [] + + return pkgs + + @staticmethod + def get_interfaces_numa_node(node, *iface_keys): + """Get numa node on which are located most of the interfaces. + + Return numa node with highest count of interfaces provided as arguments. + Return 0 if the interface does not have numa_node information available. + If all interfaces have unknown location (-1), then return 0. + If most of interfaces have unknown location (-1), but there are + some interfaces with known location, then return the second most + location of the provided interfaces. + + :param node: Node from DICT__nodes. + :param iface_keys: Interface keys for lookup. + :type node: dict + :type iface_keys: strings + """ + numa_list = [] + for if_key in iface_keys: + try: + numa_list.append(node['interfaces'][if_key].get('numa_node')) + except KeyError: + pass + + numa_cnt_mc = Counter(numa_list).most_common() + numa_cnt_mc_len = len(numa_cnt_mc) + if numa_cnt_mc_len > 0 and numa_cnt_mc[0][0] != -1: + return numa_cnt_mc[0][0] + elif numa_cnt_mc_len > 1 and numa_cnt_mc[0][0] == -1: + return numa_cnt_mc[1][0] + + return 0 + + @staticmethod + def start(node): + """ + + Starts vpp for a given node + + :param node: VPP node. + :type node: dict + """ + + cmd = 'service vpp start' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'. + format(cmd, node['host'], + stdout, stderr)) + + @staticmethod + def stop(node): + """ + + Stops vpp for a given node + + :param node: VPP node. + :type node: dict + """ + + cmd = 'service vpp stop' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'. + format(cmd, node['host'], + stdout, stderr)) + + @staticmethod + def status(node): + """ + + Gets VPP status + + :param: node + :type node: dict + :returns: status, errors + :rtype: tuple(str, list) + """ + errors = [] + vutil = VPPUtil() + pkgs = vutil.get_installed_vpp_pkgs() + if len(pkgs) == 0: + return "Not Installed", errors + + cmd = 'service vpp status' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + + # Get the active status + state = re.findall(r'Active:[\w (\)]+', stdout)[0].split(' ') + if len(state) > 2: + statestr = "{} {}".format(state[1], state[2]) + else: + statestr = "Invalid" + + # For now we won't look for DPDK errors + # lines = stdout.split('\n') + # for line in lines: + # if 'EAL' in line or \ + # 'FAILURE' in line or \ + # 'failed' in line or \ + # 'Failed' in line: + # errors.append(line.lstrip(' ')) + + return statestr, errors + + @staticmethod + def get_linux_distro(): + """ + Get the linux distribution and check if it is supported + + :returns: linux distro, None if the distro is not supported + :rtype: list + """ + + distro = platform.linux_distribution() + if distro[0] == 'Ubuntu' or \ + distro[0] == 'CentOS Linux' or \ + distro[:26] == 'Linux Distribution Red Hat': + return distro + else: + raise RuntimeError('Linux Distribution {} is not supported'.format(distro[0])) + + @staticmethod + def version(): + """ + + Gets VPP Version information + + :returns: version + :rtype: dict + """ + + version = {} + cmd = 'vppctl show version verbose' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + return version + + lines = stdout.split('\n') + if len(lines[0]) is not 0: + if lines[0].split(' ')[0] == 'FileNotFoundError': + return version + + for line in lines: + if len(line) is 0: + continue + dct = line.split(':') + version[dct[0]] = dct[1].lstrip(' ') + + return version diff --git a/extras/vpp_config/vpplib/VppGrubUtil.py b/extras/vpp_config/vpplib/VppGrubUtil.py new file mode 100644 index 00000000000..4aac427c22a --- /dev/null +++ b/extras/vpp_config/vpplib/VppGrubUtil.py @@ -0,0 +1,236 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""VPP Grub Utility Library.""" + +import re + +from vpplib.VPPUtil import VPPUtil + +__all__ = ['VppGrubUtil'] + + +class VppGrubUtil(object): + """ VPP Grub Utilities.""" + + def _get_current_cmdline(self): + """ + Using /proc/cmdline return the current grub cmdline + + :returns: The current grub cmdline + :rtype: string + """ + + # Get the memory information using /proc/meminfo + cmd = 'sudo cat /proc/cmdline' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} on node {} {} {}'. + format(cmd, self._node['host'], + stdout, stderr)) + + self._current_cmdline = stdout.strip('\n') + + def _get_default_cmdline(self): + """ + Using /etc/default/grub return the default grub cmdline + + :returns: The default grub cmdline + :rtype: string + """ + + # Get the default grub cmdline + rootdir = self._node['rootdir'] + gfile = self._node['cpu']['grub_config_file'] + grubcmdline = self._node['cpu']['grubcmdline'] + cmd = 'cat {}'.format(rootdir + gfile) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} Executing failed on node {} {}'. + format(cmd, self._node['host'], stderr)) + + # Get the Default Linux command line, ignoring commented lines + lines = stdout.split('\n') + for line in lines: + if line == '' or line[0] == '#': + continue + ldefault = re.findall(r'{}=.+'.format(grubcmdline), line) + if ldefault: + self._default_cmdline = ldefault[0] + break + + def get_current_cmdline(self): + """ + Returns the saved grub cmdline + + :returns: The saved grub cmdline + :rtype: string + """ + return self._current_cmdline + + def get_default_cmdline(self): + """ + Returns the default grub cmdline + + :returns: The default grub cmdline + :rtype: string + """ + return self._default_cmdline + + def create_cmdline(self, isolated_cpus): + """ + Create the new grub cmdline + + :param isolated_cpus: The isolated cpu string + :type isolated_cpus: string + :returns: The command line + :rtype: string + """ + grubcmdline = self._node['cpu']['grubcmdline'] + cmdline = self._default_cmdline + value = cmdline.split('{}='.format(grubcmdline))[1] + value = value.rstrip('"').lstrip('"') + + iommu = re.findall(r'iommu=\w+', value) + pstate = re.findall(r'intel_pstate=\w+', value) + # If there is already some iommu commands set, leave them, + # if not use ours + if iommu == [] and pstate == []: + value = '{} intel_pstate=disable'.format(value) + + # Replace isolcpus with ours + isolcpus = re.findall(r'isolcpus=[\w+\-,]+', value) + if not isolcpus: + if isolated_cpus != '': + value = "{} isolcpus={}".format(value, isolated_cpus) + else: + if isolated_cpus != '': + value = re.sub(r'isolcpus=[\w+\-,]+', + 'isolcpus={}'.format(isolated_cpus), + value) + else: + value = re.sub(r'isolcpus=[\w+\-,]+', '', value) + + nohz = re.findall(r'nohz_full=[\w+\-,]+', value) + if not nohz: + if isolated_cpus != '': + value = "{} nohz_full={}".format(value, isolated_cpus) + else: + if isolated_cpus != '': + value = re.sub(r'nohz_full=[\w+\-,]+', + 'nohz_full={}'.format(isolated_cpus), + value) + else: + value = re.sub(r'nohz_full=[\w+\-,]+', '', value) + + rcu = re.findall(r'rcu_nocbs=[\w+\-,]+', value) + if not rcu: + if isolated_cpus != '': + value = "{} rcu_nocbs={}".format(value, isolated_cpus) + else: + if isolated_cpus != '': + value = re.sub(r'rcu_nocbs=[\w+\-,]+', + 'rcu_nocbs={}'.format(isolated_cpus), + value) + else: + value = re.sub(r'rcu_nocbs=[\w+\-,]+', '', value) + + value = value.lstrip(' ').rstrip(' ') + cmdline = '{}="{}"'.format(grubcmdline, value) + return cmdline + + def apply_cmdline(self, node, isolated_cpus): + """ + Apply cmdline to the default grub file + + :param node: Node dictionary with cpuinfo. + :param isolated_cpus: The isolated cpu string + :type node: dict + :type isolated_cpus: string + :return The vpp cmdline + :rtype string + """ + + vpp_cmdline = self.create_cmdline(isolated_cpus) + if vpp_cmdline == '': + return vpp_cmdline + + # Update grub + # Save the original file + rootdir = node['rootdir'] + grubcmdline = node['cpu']['grubcmdline'] + ofilename = rootdir + node['cpu']['grub_config_file'] + '.orig' + filename = rootdir + node['cpu']['grub_config_file'] + + # Write the output file + # Does a copy of the original file exist, if not create one + (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofilename)) + if ret != 0: + if stdout.strip('\n') != ofilename: + cmd = 'sudo cp {} {}'.format(filename, ofilename) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'. + format(cmd, self._node['host'], stderr)) + + # Get the contents of the current grub config file + cmd = 'cat {}'.format(filename) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + self._node['host'], + stderr)) + + # Write the new contents + # Get the Default Linux command line, ignoring commented lines + content = "" + lines = stdout.split('\n') + for line in lines: + if line == '': + content += line + '\n' + continue + if line[0] == '#': + content += line + '\n' + continue + + ldefault = re.findall(r'{}=.+'.format(grubcmdline), line) + if ldefault: + content += vpp_cmdline + '\n' + else: + content += line + '\n' + + content = content.replace(r"`", r"\`") + content = content.rstrip('\n') + cmd = "sudo cat > {0} << EOF\n{1}\n".format(filename, content) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + self._node['host'], + stderr)) + + return vpp_cmdline + + def __init__(self, node): + distro = VPPUtil.get_linux_distro() + if distro[0] == 'Ubuntu': + node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX_DEFAULT' + else: + node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX' + + self._node = node + self._current_cmdline = "" + self._default_cmdline = "" + self._get_current_cmdline() + self._get_default_cmdline() diff --git a/extras/vpp_config/vpplib/VppHugePageUtil.py b/extras/vpp_config/vpplib/VppHugePageUtil.py new file mode 100644 index 00000000000..43df72a455a --- /dev/null +++ b/extras/vpp_config/vpplib/VppHugePageUtil.py @@ -0,0 +1,122 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""VPP Huge Page Utilities""" + +import re + +from vpplib.VPPUtil import VPPUtil + +# VPP Huge page File +DEFAULT_VPP_HUGE_PAGE_CONFIG_FILENAME = "/etc/vpp/80-vpp.conf" +VPP_HUGEPAGE_CONFIG = """ +vm.nr_hugepages={nr_hugepages} +vm.max_map_count={max_map_count} +vm.hugetlb_shm_group=0 +kernel.shmmax={shmmax} +""" + + +class VppHugePageUtil(object): + """ + Huge Page Utilities + """ + def hugepages_dryrun_apply(self): + """ + Apply the huge page configuration + + """ + + node = self._node + hugepages = node['hugepages'] + + vpp_hugepage_config = VPP_HUGEPAGE_CONFIG.format( + nr_hugepages=hugepages['total'], + max_map_count=hugepages['max_map_count'], + shmmax=hugepages['shmax']) + + rootdir = node['rootdir'] + filename = rootdir + node['hugepages']['hugepage_config_file'] + + cmd = 'echo "{0}" | sudo tee {1}'.\ + format(vpp_hugepage_config, filename) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'. + format(cmd, node['host'], + stdout, stderr)) + + def get_actual_huge_pages(self): + """ + Get the current huge page configuration + + :returns the hugepage total, hugepage free, hugepage size, + total memory, and total memory free + :rtype: tuple + """ + + # Get the memory information using /proc/meminfo + cmd = 'sudo cat /proc/meminfo' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError( + '{} failed on node {} {} {}'.format( + cmd, self._node['host'], + stdout, stderr)) + + total = re.findall(r'HugePages_Total:\s+\w+', stdout) + free = re.findall(r'HugePages_Free:\s+\w+', stdout) + size = re.findall(r'Hugepagesize:\s+\w+\s+\w+', stdout) + memtotal = re.findall(r'MemTotal:\s+\w+\s+\w+', stdout) + memfree = re.findall(r'MemFree:\s+\w+\s+\w+', stdout) + + total = total[0].split(':')[1].lstrip() + free = free[0].split(':')[1].lstrip() + size = size[0].split(':')[1].lstrip() + memtotal = memtotal[0].split(':')[1].lstrip() + memfree = memfree[0].split(':')[1].lstrip() + return total, free, size, memtotal, memfree + + def show_huge_pages(self): + """ + Print the current huge page configuration + + """ + + node = self._node + hugepages = node['hugepages'] + print " {:30}: {}".format("Total System Memory", + hugepages['memtotal']) + print " {:30}: {}".format("Total Free Memory", + hugepages['memfree']) + print " {:30}: {}".format("Actual Huge Page Total", + hugepages['actual_total']) + print " {:30}: {}".format("Configured Huge Page Total", + hugepages['total']) + print " {:30}: {}".format("Huge Pages Free", hugepages['free']) + print " {:30}: {}".format("Huge Page Size", hugepages['size']) + + def get_huge_page_config(self): + """ + Returns the huge page config. + + :returns: The map max count and shmmax + """ + + total = self._node['hugepages']['total'] + max_map_count = int(total) * 2 + 1024 + shmmax = int(total) * 2 * 1024 * 1024 + return max_map_count, shmmax + + def __init__(self, node): + self._node = node diff --git a/extras/vpp_config/vpplib/VppPCIUtil.py b/extras/vpp_config/vpplib/VppPCIUtil.py new file mode 100644 index 00000000000..829d66ae374 --- /dev/null +++ b/extras/vpp_config/vpplib/VppPCIUtil.py @@ -0,0 +1,330 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""VPP PCI Utility libraries""" + +import re + +from vpplib.VPPUtil import VPPUtil + +DPDK_SCRIPT = "/vpp/vpp-config/scripts/dpdk-devbind.py" + +# PCI Device id regular expresssion +PCI_DEV_ID_REGEX = '[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+.[0-9A-Fa-f]+' + + +class VppPCIUtil(object): + """ + PCI Utilities + + """ + + @staticmethod + def _create_device_list(device_string): + """ + Returns a list of PCI devices + + :param device_string: The devices string from dpdk_devbind + :returns: The device list + :rtype: dictionary + """ + + devices = {} + + ids = re.findall(PCI_DEV_ID_REGEX, device_string) + descriptions = re.findall(r'\'([\s\S]*?)\'', device_string) + unused = re.findall(r'unused=[\w,]+', device_string) + + for i, j in enumerate(ids): + device = {'description': descriptions[i]} + if unused: + device['unused'] = unused[i].split('=')[1].split(',') + + cmd = 'ls /sys/bus/pci/devices/{}/driver/module/drivers'. \ + format(ids[i]) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret == 0: + device['driver'] = stdout.split(':')[1].rstrip('\n') + + cmd = 'cat /sys/bus/pci/devices/{}/numa_node'.format(ids[i]) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed {} {}'. + format(cmd, stderr, stdout)) + numa_node = stdout.rstrip('\n') + if numa_node == '-1': + device['numa_node'] = '0' + else: + device['numa_node'] = numa_node + + interfaces = [] + device['interfaces'] = [] + cmd = 'ls /sys/bus/pci/devices/{}/net'.format(ids[i]) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret == 0: + interfaces = stdout.rstrip('\n').split() + device['interfaces'] = interfaces + + l2_addrs = [] + for intf in interfaces: + cmd = 'cat /sys/bus/pci/devices/{}/net/{}/address'.format( + ids[i], intf) + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed {} {}'. + format(cmd, stderr, stdout)) + + l2_addrs.append(stdout.rstrip('\n')) + + device['l2addr'] = l2_addrs + + devices[ids[i]] = device + + return devices + + def __init__(self, node): + self._node = node + self._dpdk_devices = {} + self._kernel_devices = {} + self._other_devices = {} + self._crypto_dpdk_devices = {} + self._crypto_kernel_devices = {} + self._crypto_other_devices = {} + self._link_up_devices = {} + + def get_all_devices(self): + """ + Returns a list of all the devices + + """ + + node = self._node + rootdir = node['rootdir'] + dpdk_script = rootdir + DPDK_SCRIPT + cmd = dpdk_script + ' --status' + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + + # Get the network devices using the DPDK + # First get everything after using DPDK + stda = stdout.split('Network devices using DPDK-compatible driver')[1] + # Then get everything before using kernel driver + using_dpdk = stda.split('Network devices using kernel driver')[0] + self._dpdk_devices = self._create_device_list(using_dpdk) + + # Get the network devices using the kernel + stda = stdout.split('Network devices using kernel driver')[1] + using_kernel = stda.split('Other network devices')[0] + self._kernel_devices = self._create_device_list(using_kernel) + + # Get the other network devices + stda = stdout.split('Other network devices')[1] + other = stda.split('Crypto devices using DPDK-compatible driver')[0] + self._other_devices = self._create_device_list(other) + + # Get the crypto devices using the DPDK + stda = stdout.split('Crypto devices using DPDK-compatible driver')[1] + crypto_using_dpdk = stda.split('Crypto devices using kernel driver')[0] + self._crypto_dpdk_devices = self._create_device_list( + crypto_using_dpdk) + + # Get the network devices using the kernel + stda = stdout.split('Crypto devices using kernel driver')[1] + crypto_using_kernel = stda.split('Other crypto devices')[0] + self._crypto_kernel_devices = self._create_device_list( + crypto_using_kernel) + + # Get the other network devices + crypto_other = stdout.split('Other crypto devices')[1] + self._crypto_other_devices = self._create_device_list(crypto_other) + + # Get the devices used by the kernel + for devk in self._kernel_devices.items(): + dvid = devk[0] + device = devk[1] + for i in device['interfaces']: + cmd = "ip addr show " + i + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {}'.format( + cmd, + node['host'], + stderr)) + lstate = re.findall(r'state \w+', stdout)[0].split(' ')[1] + + # Take care of the links that are UP + if lstate == 'UP': + device['linkup'] = True + self._link_up_devices[dvid] = device + + for devl in self._link_up_devices.items(): + dvid = devl[0] + del self._kernel_devices[dvid] + + def get_dpdk_devices(self): + """ + Returns a list the dpdk devices + + """ + return self._dpdk_devices + + def get_kernel_devices(self): + """ + Returns a list the kernel devices + + """ + return self._kernel_devices + + def get_other_devices(self): + """ + Returns a list the other devices + + """ + return self._other_devices + + def get_crypto_dpdk_devices(self): + """ + Returns a list the crypto dpdk devices + + """ + return self._crypto_dpdk_devices + + def get_crypto_kernel_devices(self): + """ + Returns a list the crypto kernel devices + + """ + return self._crypto_kernel_devices + + def get_crypto_other_devices(self): + """ + Returns a list the crypto other devices + + """ + return self._crypto_other_devices + + def get_link_up_devices(self): + """ + Returns a list the link up devices + + """ + return self._link_up_devices + + @staticmethod + def vpp_create_interface(interfaces, device_id, device): + """ + Create an interface using the device is and device + + """ + + name = 'port' + str(len(interfaces)) + interfaces[name] = {} + interfaces[name]['pci_address'] = device_id + interfaces[name]['numa_node'] = device['numa_node'] + if 'l2addr' in device: + l2_addrs = device['l2addr'] + for i, j in enumerate(l2_addrs): + if i > 0: + mname = 'mac_address' + str(i + 1) + interfaces[name][mname] = l2_addrs[i] + else: + interfaces[name]['mac_address'] = l2_addrs[i] + + @staticmethod + def show_vpp_devices(devices, show_interfaces=True, show_header=True): + """ + show the vpp devices specified in the argument + + :param devices: A list of devices + :param show_interfaces: show the kernel information + :type devices: dict + :type show_interfaces: bool + """ + + + if show_interfaces: + header = "{:15} {:25} {:50}".format("PCI ID", + "Kernel Interface(s)", + "Description") + else: + header = "{:15} {:50}".format("PCI ID", + "Description") + dashseparator = ("-" * (len(header) - 2)) + + + if show_header == True: + print header + print dashseparator + for dit in devices.items(): + dvid = dit[0] + device = dit[1] + if show_interfaces: + interfaces = device['interfaces'] + interface = '' + for i, j in enumerate(interfaces): + if i > 0: + interface += ',' + interfaces[i] + else: + interface = interfaces[i] + + print "{:15} {:25} {:50}".format( + dvid, interface, device['description']) + else: + print "{:15} {:50}".format( + dvid, device['description']) + + @staticmethod + def unbind_vpp_device(node, device_id): + """ + unbind the device specified + + :param node: Node dictionary with cpuinfo. + :param device_id: The device id + :type node: dict + :type device_id: string + """ + + + rootdir = node['rootdir'] + dpdk_script = rootdir + DPDK_SCRIPT + cmd = dpdk_script + ' -u ' + ' ' + device_id + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], + stdout, stderr)) + + @staticmethod + def bind_vpp_device(node, driver, device_id): + """ + bind the device specified + + :param node: Node dictionary with cpuinfo. + :param driver: The driver + :param device_id: The device id + :type node: dict + :type driver: string + :type device_id: string + """ + + rootdir = node['rootdir'] + dpdk_script = rootdir + DPDK_SCRIPT + cmd = dpdk_script + ' -b ' + driver + ' ' + device_id + (ret, stdout, stderr) = VPPUtil.exec_command(cmd) + if ret != 0: + raise RuntimeError('{} failed on node {} {} {}'.format( + cmd, node['host'], stdout, stderr)) diff --git a/extras/vpp_config/vpplib/__init__.py b/extras/vpp_config/vpplib/__init__.py new file mode 100644 index 00000000000..54b42722be7 --- /dev/null +++ b/extras/vpp_config/vpplib/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +__init__ file for directory lib +""" diff --git a/extras/vpp_config/vpplib/constants.py b/extras/vpp_config/vpplib/constants.py new file mode 100644 index 00000000000..051a21cf023 --- /dev/null +++ b/extras/vpp_config/vpplib/constants.py @@ -0,0 +1,48 @@ +# Copyright (c) 2016 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Constants used in CSIT.""" + + +class Constants(object): + """Constants used in CSIT.""" + + # OpenVPP testing directory location at topology nodes + REMOTE_FW_DIR = '/tmp/openvpp-testing' + + # shell scripts location + RESOURCES_LIB_SH = 'resources/libraries/bash' + + # vat templates location + RESOURCES_TPL_VAT = 'resources/templates/vat' + + # OpenVPP VAT binary name + VAT_BIN_NAME = 'vpp_api_test' + + # QEMU version to install + QEMU_INSTALL_VERSION = 'qemu-2.5.0' + + # QEMU install directory + QEMU_INSTALL_DIR = '/opt/qemu-2.5.0' + + # Honeycomb directory location at topology nodes: + REMOTE_HC_DIR = '/opt/honeycomb' + + # Honeycomb persistence files location + REMOTE_HC_PERSIST = '/var/lib/honeycomb/persist' + + # Honeycomb templates location + RESOURCES_TPL_HC = 'resources/templates/honeycomb' + + # ODL Client Restconf listener port + ODL_PORT = 8181 |