aboutsummaryrefslogtreecommitdiffstats
path: root/extras/vpp_config/vpplib
diff options
context:
space:
mode:
authorKlement Sekera <klement.sekera@gmail.com>2022-04-26 19:02:15 +0200
committerOle Tr�an <otroan@employees.org>2022-05-10 18:52:08 +0000
commitd9b0c6fbf7aa5bd9af84264105b39c82028a4a29 (patch)
tree4f786cfd8ebc2443cb11e11b74c8657204068898 /extras/vpp_config/vpplib
parentf90348bcb4afd0af2611cefc43b17ef3042b511c (diff)
tests: replace pycodestyle with black
Drop pycodestyle for code style checking in favor of black. Black is much faster, stable PEP8 compliant code style checker offering also automatic formatting. It aims to be very stable and produce smallest diffs. It's used by many small and big projects. Running checkstyle with black takes a few seconds with a terse output. Thus, test-checkstyle-diff is no longer necessary. Expand scope of checkstyle to all python files in the repo, replacing test-checkstyle with checkstyle-python. Also, fixstyle-python is now available for automatic style formatting. Note: python virtualenv has been consolidated in test/Makefile, test/requirements*.txt which will eventually be moved to a central location. This is required to simply the automated generation of docker executor images in the CI. Type: improvement Change-Id: I022a326603485f58585e879ac0f697fceefbc9c8 Signed-off-by: Klement Sekera <klement.sekera@gmail.com> Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
Diffstat (limited to 'extras/vpp_config/vpplib')
-rw-r--r--extras/vpp_config/vpplib/AutoConfig.py1205
-rw-r--r--extras/vpp_config/vpplib/CpuUtils.py88
-rw-r--r--extras/vpp_config/vpplib/QemuUtils.py484
-rw-r--r--extras/vpp_config/vpplib/VPPUtil.py435
-rw-r--r--extras/vpp_config/vpplib/VppGrubUtil.py146
-rw-r--r--extras/vpp_config/vpplib/VppHugePageUtil.py76
-rw-r--r--extras/vpp_config/vpplib/VppPCIUtil.py165
-rw-r--r--extras/vpp_config/vpplib/constants.py18
8 files changed, 1389 insertions, 1228 deletions
diff --git a/extras/vpp_config/vpplib/AutoConfig.py b/extras/vpp_config/vpplib/AutoConfig.py
index 62f18e27929..9a79039f69e 100644
--- a/extras/vpp_config/vpplib/AutoConfig.py
+++ b/extras/vpp_config/vpplib/AutoConfig.py
@@ -41,9 +41,9 @@ MIN_SYSTEM_CPUS = 2
MIN_TOTAL_HUGE_PAGES = 1024
MAX_PERCENT_FOR_HUGE_PAGES = 70
-IPERFVM_XML = 'configs/iperf-vm.xml'
-IPERFVM_IMAGE = 'images/xenial-mod.img'
-IPERFVM_ISO = 'configs/cloud-config.iso'
+IPERFVM_XML = "configs/iperf-vm.xml"
+IPERFVM_IMAGE = "images/xenial-mod.img"
+IPERFVM_ISO = "configs/cloud-config.iso"
class AutoConfig(object):
@@ -90,12 +90,12 @@ class AutoConfig(object):
"""
# Does a copy of the file exist, if not create one
- ofile = filename + '.orig'
- (ret, stdout, stderr) = VPPUtil.exec_command('ls {}'.format(ofile))
+ ofile = filename + ".orig"
+ (ret, stdout, stderr) = VPPUtil.exec_command("ls {}".format(ofile))
if ret != 0:
logging.debug(stderr)
- if stdout.strip('\n') != ofile:
- cmd = 'sudo cp {} {}'.format(filename, ofile)
+ if stdout.strip("\n") != ofile:
+ cmd = "sudo cp {} {}".format(filename, ofile)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
@@ -114,14 +114,14 @@ class AutoConfig(object):
while True:
answer = input("Please enter the IPv4 Address [n.n.n.n/n]: ")
try:
- ipinput = answer.split('/')
+ ipinput = answer.split("/")
ipaddr = ip_address(ipinput[0])
if len(ipinput) > 1:
- plen = answer.split('/')[1]
+ plen = answer.split("/")[1]
else:
answer = input("Please enter the netmask [n.n.n.n]: ")
plen = ip_address(answer).netmask_bits()
- return '{}/{}'.format(ipaddr, plen)
+ return "{}/{}".format(ipaddr, plen)
except ValueError:
print("Please enter a valid IPv4 address.")
@@ -145,18 +145,22 @@ class AutoConfig(object):
while True:
answer = input(question)
- if answer == '':
+ if answer == "":
answer = default
break
- if re.findall(r'[0-9+]', answer):
+ if re.findall(r"[0-9+]", answer):
if int(answer) in range(first, last + 1):
break
else:
- print("Please a value between {} and {} or Return.".
- format(first, last))
+ print(
+ "Please a value between {} and {} or Return.".format(
+ first, last
+ )
+ )
else:
- print("Please a number between {} and {} or Return.".
- format(first, last))
+ print(
+ "Please a number between {} and {} or Return.".format(first, last)
+ )
return int(answer)
@@ -175,12 +179,12 @@ class AutoConfig(object):
input_valid = False
default = default.lower()
- answer = ''
+ answer = ""
while not input_valid:
answer = input(question)
- if answer == '':
+ if answer == "":
answer = default
- if re.findall(r'[YyNn]', answer):
+ if re.findall(r"[YyNn]", answer):
input_valid = True
answer = answer[0].lower()
else:
@@ -196,36 +200,40 @@ class AutoConfig(object):
# Get the Topology, from the topology layout file
topo = {}
- with open(self._autoconfig_filename, 'r') as stream:
+ with open(self._autoconfig_filename, "r") as stream:
try:
topo = yaml.load(stream)
- if 'metadata' in topo:
- self._metadata = topo['metadata']
+ if "metadata" in topo:
+ self._metadata = topo["metadata"]
except yaml.YAMLError as exc:
raise RuntimeError(
"Couldn't read the Auto config file {}.".format(
- self._autoconfig_filename, exc))
+ self._autoconfig_filename, exc
+ )
+ )
- systemfile = self._rootdir + self._metadata['system_config_file']
+ systemfile = self._rootdir + self._metadata["system_config_file"]
if self._clean is False and os.path.isfile(systemfile):
- with open(systemfile, 'r') as sysstream:
+ with open(systemfile, "r") as sysstream:
try:
systopo = yaml.load(sysstream)
- if 'nodes' in systopo:
- self._nodes = systopo['nodes']
+ if "nodes" in systopo:
+ self._nodes = systopo["nodes"]
except yaml.YAMLError as sysexc:
raise RuntimeError(
"Couldn't read the System config file {}.".format(
- systemfile, sysexc))
+ systemfile, sysexc
+ )
+ )
else:
# Get the nodes from Auto Config
- if 'nodes' in topo:
- self._nodes = topo['nodes']
+ if "nodes" in topo:
+ self._nodes = topo["nodes"]
# Set the root directory in all the nodes
for i in self._nodes.items():
node = i[1]
- node['rootdir'] = self._rootdir
+ node["rootdir"] = self._rootdir
def updateconfig(self):
"""
@@ -236,11 +244,11 @@ class AutoConfig(object):
"""
# Initialize the yaml data
- ydata = {'metadata': self._metadata, 'nodes': self._nodes}
+ ydata = {"metadata": self._metadata, "nodes": self._nodes}
# Write the system config file
- filename = self._rootdir + self._metadata['system_config_file']
- with open(filename, 'w') as yamlfile:
+ filename = self._rootdir + self._metadata["system_config_file"]
+ with open(filename, "w") as yamlfile:
yaml.dump(ydata, yamlfile)
def _update_auto_config(self):
@@ -252,11 +260,11 @@ class AutoConfig(object):
# Initialize the yaml data
nodes = {}
- with open(self._autoconfig_filename, 'r') as stream:
+ with open(self._autoconfig_filename, "r") as stream:
try:
ydata = yaml.load(stream)
- if 'nodes' in ydata:
- nodes = ydata['nodes']
+ if "nodes" in ydata:
+ nodes = ydata["nodes"]
except yaml.YAMLError as exc:
print(exc)
return
@@ -266,41 +274,45 @@ class AutoConfig(object):
node = i[1]
# Interfaces
- node['interfaces'] = {}
- for item in self._nodes[key]['interfaces'].items():
+ node["interfaces"] = {}
+ for item in self._nodes[key]["interfaces"].items():
port = item[0]
interface = item[1]
- node['interfaces'][port] = {}
- addr = '{}'.format(interface['pci_address'])
- node['interfaces'][port]['pci_address'] = addr
- if 'mac_address' in interface:
- node['interfaces'][port]['mac_address'] = \
- interface['mac_address']
-
- if 'total_other_cpus' in self._nodes[key]['cpu']:
- node['cpu']['total_other_cpus'] = \
- self._nodes[key]['cpu']['total_other_cpus']
- if 'total_vpp_cpus' in self._nodes[key]['cpu']:
- node['cpu']['total_vpp_cpus'] = \
- self._nodes[key]['cpu']['total_vpp_cpus']
- if 'reserve_vpp_main_core' in self._nodes[key]['cpu']:
- node['cpu']['reserve_vpp_main_core'] = \
- self._nodes[key]['cpu']['reserve_vpp_main_core']
+ node["interfaces"][port] = {}
+ addr = "{}".format(interface["pci_address"])
+ node["interfaces"][port]["pci_address"] = addr
+ if "mac_address" in interface:
+ node["interfaces"][port]["mac_address"] = interface["mac_address"]
+
+ if "total_other_cpus" in self._nodes[key]["cpu"]:
+ node["cpu"]["total_other_cpus"] = self._nodes[key]["cpu"][
+ "total_other_cpus"
+ ]
+ if "total_vpp_cpus" in self._nodes[key]["cpu"]:
+ node["cpu"]["total_vpp_cpus"] = self._nodes[key]["cpu"][
+ "total_vpp_cpus"
+ ]
+ if "reserve_vpp_main_core" in self._nodes[key]["cpu"]:
+ node["cpu"]["reserve_vpp_main_core"] = self._nodes[key]["cpu"][
+ "reserve_vpp_main_core"
+ ]
# TCP
- if 'active_open_sessions' in self._nodes[key]['tcp']:
- node['tcp']['active_open_sessions'] = \
- self._nodes[key]['tcp']['active_open_sessions']
- if 'passive_open_sessions' in self._nodes[key]['tcp']:
- node['tcp']['passive_open_sessions'] = \
- self._nodes[key]['tcp']['passive_open_sessions']
+ if "active_open_sessions" in self._nodes[key]["tcp"]:
+ node["tcp"]["active_open_sessions"] = self._nodes[key]["tcp"][
+ "active_open_sessions"
+ ]
+ if "passive_open_sessions" in self._nodes[key]["tcp"]:
+ node["tcp"]["passive_open_sessions"] = self._nodes[key]["tcp"][
+ "passive_open_sessions"
+ ]
# Huge pages
- node['hugepages']['total'] = self._nodes[key]['hugepages']['total']
+ node["hugepages"]["total"] = self._nodes[key]["hugepages"]["total"]
# Write the auto config config file
- with open(self._autoconfig_filename, 'w') as yamlfile:
+ with open(self._autoconfig_filename, "w") as yamlfile:
yaml.dump(ydata, yamlfile)
def apply_huge_pages(self):
@@ -325,28 +337,28 @@ class AutoConfig(object):
"""
# Get main core
- cpu = '\n'
- if 'vpp_main_core' in node['cpu']:
- vpp_main_core = node['cpu']['vpp_main_core']
+ cpu = "\n"
+ if "vpp_main_core" in node["cpu"]:
+ vpp_main_core = node["cpu"]["vpp_main_core"]
else:
vpp_main_core = 0
if vpp_main_core != 0:
- cpu += ' main-core {}\n'.format(vpp_main_core)
+ cpu += " main-core {}\n".format(vpp_main_core)
# Get workers
- vpp_workers = node['cpu']['vpp_workers']
+ vpp_workers = node["cpu"]["vpp_workers"]
vpp_worker_len = len(vpp_workers)
if vpp_worker_len > 0:
- vpp_worker_str = ''
+ vpp_worker_str = ""
for i, worker in enumerate(vpp_workers):
if i > 0:
- vpp_worker_str += ','
+ vpp_worker_str += ","
if worker[0] == worker[1]:
vpp_worker_str += "{}".format(worker[0])
else:
vpp_worker_str += "{}-{}".format(worker[0], worker[1])
- cpu += ' corelist-workers {}\n'.format(vpp_worker_str)
+ cpu += " corelist-workers {}\n".format(vpp_worker_str)
return cpu
@@ -359,41 +371,41 @@ class AutoConfig(object):
:type node: dict
"""
- devices = ''
- ports_per_numa = node['cpu']['ports_per_numa']
+ devices = ""
+ ports_per_numa = node["cpu"]["ports_per_numa"]
for item in ports_per_numa.items():
value = item[1]
- interfaces = value['interfaces']
+ interfaces = value["interfaces"]
# if 0 was specified for the number of vpp workers, use 1 queue
num_rx_queues = None
num_tx_queues = None
- if 'rx_queues' in value:
- num_rx_queues = value['rx_queues']
- if 'tx_queues' in value:
- num_tx_queues = value['tx_queues']
+ if "rx_queues" in value:
+ num_rx_queues = value["rx_queues"]
+ if "tx_queues" in value:
+ num_tx_queues = value["tx_queues"]
num_rx_desc = None
num_tx_desc = None
# Create the devices string
for interface in interfaces:
- pci_address = interface['pci_address']
+ pci_address = interface["pci_address"]
pci_address = pci_address.lstrip("'").rstrip("'")
- devices += '\n'
- devices += ' dev {} {{ \n'.format(pci_address)
+ devices += "\n"
+ devices += " dev {} {{ \n".format(pci_address)
if num_rx_queues:
- devices += ' num-rx-queues {}\n'.format(num_rx_queues)
+ devices += " num-rx-queues {}\n".format(num_rx_queues)
else:
- devices += ' num-rx-queues {}\n'.format(1)
+ devices += " num-rx-queues {}\n".format(1)
if num_tx_queues:
- devices += ' num-tx-queues {}\n'.format(num_tx_queues)
+ devices += " num-tx-queues {}\n".format(num_tx_queues)
if num_rx_desc:
- devices += ' num-rx-desc {}\n'.format(num_rx_desc)
+ devices += " num-rx-desc {}\n".format(num_rx_desc)
if num_tx_desc:
- devices += ' num-tx-desc {}\n'.format(num_tx_desc)
- devices += ' }'
+ devices += " num-tx-desc {}\n".format(num_tx_desc)
+ devices += " }"
return devices
@@ -405,20 +417,25 @@ class AutoConfig(object):
:param node: Node dictionary with cpuinfo.
:type node: dict
"""
- buffers = ''
- total_mbufs = node['cpu']['total_mbufs']
+ buffers = ""
+ total_mbufs = node["cpu"]["total_mbufs"]
# If the total mbufs is not 0 or less than the default, set num-bufs
logging.debug("Total mbufs: {}".format(total_mbufs))
if total_mbufs != 0 and total_mbufs > 16384:
- buffers += ' buffers-per-numa {}'.format(total_mbufs)
+ buffers += " buffers-per-numa {}".format(total_mbufs)
return buffers
@staticmethod
- def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
- total_vpp_workers,
- reserve_vpp_main_core):
+ def _calc_vpp_workers(
+ node,
+ vpp_workers,
+ numa_node,
+ other_cpus_end,
+ total_vpp_workers,
+ reserve_vpp_main_core,
+ ):
"""
Calculate the VPP worker information
@@ -440,7 +457,7 @@ class AutoConfig(object):
"""
# Can we fit the workers in one of these slices
- cpus = node['cpu']['cpus_per_node'][numa_node]
+ cpus = node["cpu"]["cpus_per_node"][numa_node]
for cpu in cpus:
start = cpu[0]
end = cpu[1]
@@ -454,7 +471,7 @@ class AutoConfig(object):
if workers_end <= end:
if reserve_vpp_main_core:
- node['cpu']['vpp_main_core'] = start - 1
+ node["cpu"]["vpp_main_core"] = start - 1
reserve_vpp_main_core = False
if total_vpp_workers:
vpp_workers.append((start, workers_end))
@@ -462,15 +479,14 @@ class AutoConfig(object):
# We still need to reserve the main core
if reserve_vpp_main_core:
- node['cpu']['vpp_main_core'] = other_cpus_end + 1
+ node["cpu"]["vpp_main_core"] = other_cpus_end + 1
return reserve_vpp_main_core
@staticmethod
- def _calc_desc_and_queues(total_numa_nodes,
- total_ports_per_numa,
- total_rx_queues,
- ports_per_numa_value):
+ def _calc_desc_and_queues(
+ total_numa_nodes, total_ports_per_numa, total_rx_queues, ports_per_numa_value
+ ):
"""
Calculate the number of descriptors and queues
@@ -494,8 +510,10 @@ class AutoConfig(object):
# Get the descriptor entries
desc_entries = 1024
- ports_per_numa_value['rx_queues'] = rx_queues
- total_mbufs = ((rx_queues * desc_entries) + (tx_queues * desc_entries)) * total_ports_per_numa
+ ports_per_numa_value["rx_queues"] = rx_queues
+ total_mbufs = (
+ (rx_queues * desc_entries) + (tx_queues * desc_entries)
+ ) * total_ports_per_numa
return total_mbufs
@@ -515,12 +533,12 @@ class AutoConfig(object):
ports_per_numa = {}
for item in interfaces.items():
i = item[1]
- if i['numa_node'] not in ports_per_numa:
- ports_per_numa[i['numa_node']] = {'interfaces': []}
- ports_per_numa[i['numa_node']]['interfaces'].append(i)
+ if i["numa_node"] not in ports_per_numa:
+ ports_per_numa[i["numa_node"]] = {"interfaces": []}
+ ports_per_numa[i["numa_node"]]["interfaces"].append(i)
else:
- ports_per_numa[i['numa_node']]['interfaces'].append(i)
- node['cpu']['ports_per_numa'] = ports_per_numa
+ ports_per_numa[i["numa_node"]]["interfaces"].append(i)
+ node["cpu"]["ports_per_numa"] = ports_per_numa
return ports_per_numa
@@ -536,24 +554,24 @@ class AutoConfig(object):
node = i[1]
# get total number of nic ports
- interfaces = node['interfaces']
+ interfaces = node["interfaces"]
# Make a list of ports by numa node
ports_per_numa = self._create_ports_per_numa(node, interfaces)
# Get the number of cpus to skip, we never use the first cpu
other_cpus_start = 1
- other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
+ other_cpus_end = other_cpus_start + node["cpu"]["total_other_cpus"] - 1
other_workers = None
if other_cpus_end != 0:
other_workers = (other_cpus_start, other_cpus_end)
- node['cpu']['other_workers'] = other_workers
+ node["cpu"]["other_workers"] = other_workers
# Allocate the VPP main core and workers
vpp_workers = []
- reserve_vpp_main_core = node['cpu']['reserve_vpp_main_core']
- total_vpp_cpus = node['cpu']['total_vpp_cpus']
- total_rx_queues = node['cpu']['total_rx_queues']
+ reserve_vpp_main_core = node["cpu"]["reserve_vpp_main_core"]
+ total_vpp_cpus = node["cpu"]["total_vpp_cpus"]
+ total_rx_queues = node["cpu"]["total_rx_queues"]
# If total_vpp_cpus is 0 or is less than the numa nodes with ports
# then we shouldn't get workers
@@ -572,14 +590,21 @@ class AutoConfig(object):
# Get the number of descriptors and queues
mbufs = self._calc_desc_and_queues(
len(ports_per_numa),
- len(value['interfaces']), total_rx_queues, value)
+ len(value["interfaces"]),
+ total_rx_queues,
+ value,
+ )
total_mbufs += mbufs
# Get the VPP workers
reserve_vpp_main_core = self._calc_vpp_workers(
- node, vpp_workers, numa_node,
- other_cpus_end, total_workers_node,
- reserve_vpp_main_core)
+ node,
+ vpp_workers,
+ numa_node,
+ other_cpus_end,
+ total_workers_node,
+ reserve_vpp_main_core,
+ )
total_mbufs *= 2.5
total_mbufs = int(total_mbufs)
@@ -587,8 +612,8 @@ class AutoConfig(object):
total_mbufs = 0
# Save the info
- node['cpu']['vpp_workers'] = vpp_workers
- node['cpu']['total_mbufs'] = total_mbufs
+ node["cpu"]["vpp_workers"] = vpp_workers
+ node["cpu"]["total_mbufs"] = total_mbufs
# Write the config
self.updateconfig()
@@ -602,54 +627,55 @@ class AutoConfig(object):
:type node: dict
"""
- active_open_sessions = node['tcp']['active_open_sessions']
+ active_open_sessions = node["tcp"]["active_open_sessions"]
aos = int(active_open_sessions)
- passive_open_sessions = node['tcp']['passive_open_sessions']
+ passive_open_sessions = node["tcp"]["passive_open_sessions"]
pos = int(passive_open_sessions)
# Generate the api-segment gid vpp sheit in any case
if (aos + pos) == 0:
- tcp = '\n'.join([
+ tcp = "\n".join(["api-segment {", " gid vpp", "}"])
+ return tcp.rstrip("\n")
+
+ tcp = "\n".join(
+ [
+ "# TCP stack-related configuration parameters",
+ "# expecting {:d} client sessions, {:d} server sessions\n".format(
+ aos, pos
+ ),
+ "heapsize 4g\n",
"api-segment {",
- " gid vpp",
- "}"
- ])
- return tcp.rstrip('\n')
-
- tcp = '\n'.join([
- "# TCP stack-related configuration parameters",
- "# expecting {:d} client sessions, {:d} server sessions\n".format(
- aos, pos),
- "heapsize 4g\n",
- "api-segment {",
- " global-size 2000M",
- " api-size 1G",
- "}\n",
-
- "session {",
- " event-queue-length {:d}".format(aos + pos),
- " preallocated-sessions {:d}".format(aos + pos),
- " v4-session-table-buckets {:d}".format((aos + pos) // 4),
- " v4-session-table-memory 3g\n"
- ])
+ " global-size 2000M",
+ " api-size 1G",
+ "}\n",
+ "session {",
+ " event-queue-length {:d}".format(aos + pos),
+ " preallocated-sessions {:d}".format(aos + pos),
+ " v4-session-table-buckets {:d}".format((aos + pos) // 4),
+ " v4-session-table-memory 3g\n",
+ ]
+ )
if aos > 0:
- tcp = tcp + " v4-halfopen-table-buckets {:d}".format(
- (aos + pos) // 4) + "\n"
+ tcp = (
+ tcp + " v4-halfopen-table-buckets {:d}".format((aos + pos) // 4) + "\n"
+ )
tcp = tcp + " v4-halfopen-table-memory 3g\n"
- tcp = tcp + " local-endpoints-table-buckets {:d}".format(
- (aos + pos) // 4) + "\n"
+ tcp = (
+ tcp
+ + " local-endpoints-table-buckets {:d}".format((aos + pos) // 4)
+ + "\n"
+ )
tcp = tcp + " local-endpoints-table-memory 3g\n"
tcp = tcp + "}\n\n"
tcp = tcp + "tcp {\n"
tcp = tcp + " preallocated-connections {:d}".format(aos + pos) + "\n"
if aos > 0:
- tcp = tcp + " preallocated-half-open-connections {:d}".format(
- aos) + "\n"
+ tcp = tcp + " preallocated-half-open-connections {:d}".format(aos) + "\n"
tcp = tcp + "}\n\n"
- return tcp.rstrip('\n')
+ return tcp.rstrip("\n")
def apply_vpp_startup(self):
"""
@@ -662,8 +688,8 @@ class AutoConfig(object):
node = i[1]
# Get the startup file
- rootdir = node['rootdir']
- sfile = rootdir + node['vpp']['startup_config_file']
+ rootdir = node["rootdir"]
+ sfile = rootdir + node["vpp"]["startup_config_file"]
# Get the buffers
devices = self._apply_vpp_devices(node)
@@ -680,27 +706,22 @@ class AutoConfig(object):
self._autoconfig_backup_file(sfile)
# Get the template
- tfile = sfile + '.template'
- (ret, stdout, stderr) = \
- VPPUtil.exec_command('cat {}'.format(tfile))
+ tfile = sfile + ".template"
+ (ret, stdout, stderr) = VPPUtil.exec_command("cat {}".format(tfile))
if ret != 0:
- raise RuntimeError('Executing cat command failed to node {}'.
- format(node['host']))
- startup = stdout.format(cpu=cpu,
- buffers=buffers,
- devices=devices,
- tcp=tcp)
-
- (ret, stdout, stderr) = \
- VPPUtil.exec_command('rm {}'.format(sfile))
+ raise RuntimeError(
+ "Executing cat command failed to node {}".format(node["host"])
+ )
+ startup = stdout.format(cpu=cpu, buffers=buffers, devices=devices, tcp=tcp)
+
+ (ret, stdout, stderr) = VPPUtil.exec_command("rm {}".format(sfile))
if ret != 0:
logging.debug(stderr)
cmd = "sudo cat > {0} << EOF\n{1}\n".format(sfile, startup)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('Writing config failed node {}'.
- format(node['host']))
+ raise RuntimeError("Writing config failed node {}".format(node["host"]))
def apply_grub_cmdline(self):
"""
@@ -712,10 +733,10 @@ class AutoConfig(object):
node = i[1]
# Get the isolated CPUs
- other_workers = node['cpu']['other_workers']
- vpp_workers = node['cpu']['vpp_workers']
- if 'vpp_main_core' in node['cpu']:
- vpp_main_core = node['cpu']['vpp_main_core']
+ other_workers = node["cpu"]["other_workers"]
+ vpp_workers = node["cpu"]["vpp_workers"]
+ if "vpp_main_core" in node["cpu"]:
+ vpp_main_core = node["cpu"]["vpp_main_core"]
else:
vpp_main_core = 0
all_workers = []
@@ -724,12 +745,12 @@ class AutoConfig(object):
if vpp_main_core != 0:
all_workers += [(vpp_main_core, vpp_main_core)]
all_workers += vpp_workers
- isolated_cpus = ''
+ isolated_cpus = ""
for idx, worker in enumerate(all_workers):
if worker is None:
continue
if idx > 0:
- isolated_cpus += ','
+ isolated_cpus += ","
if worker[0] == worker[1]:
isolated_cpus += "{}".format(worker[0])
else:
@@ -737,11 +758,10 @@ class AutoConfig(object):
vppgrb = VppGrubUtil(node)
current_cmdline = vppgrb.get_current_cmdline()
- if 'grub' not in node:
- node['grub'] = {}
- node['grub']['current_cmdline'] = current_cmdline
- node['grub']['default_cmdline'] = \
- vppgrb.apply_cmdline(node, isolated_cpus)
+ if "grub" not in node:
+ node["grub"] = {}
+ node["grub"]["current_cmdline"] = current_cmdline
+ node["grub"]["default_cmdline"] = vppgrb.apply_cmdline(node, isolated_cpus)
self.updateconfig()
@@ -756,14 +776,14 @@ class AutoConfig(object):
hpg = VppHugePageUtil(node)
max_map_count, shmmax = hpg.get_huge_page_config()
- node['hugepages']['max_map_count'] = max_map_count
- node['hugepages']['shmax'] = shmmax
+ node["hugepages"]["max_map_count"] = max_map_count
+ node["hugepages"]["shmax"] = shmmax
total, free, size, memtotal, memfree = hpg.get_actual_huge_pages()
- node['hugepages']['actual_total'] = total
- node['hugepages']['free'] = free
- node['hugepages']['size'] = size
- node['hugepages']['memtotal'] = memtotal
- node['hugepages']['memfree'] = memfree
+ node["hugepages"]["actual_total"] = total
+ node["hugepages"]["free"] = free
+ node["hugepages"]["size"] = size
+ node["hugepages"]["memtotal"] = memtotal
+ node["hugepages"]["memfree"] = memfree
self.updateconfig()
@@ -782,14 +802,14 @@ class AutoConfig(object):
# Get the total number of isolated CPUs
current_iso_cpus = 0
- iso_cpur = re.findall(r'isolcpus=[\w+\-,]+', current_cmdline)
+ iso_cpur = re.findall(r"isolcpus=[\w+\-,]+", current_cmdline)
iso_cpurl = len(iso_cpur)
if iso_cpurl > 0:
iso_cpu_str = iso_cpur[0]
- iso_cpu_str = iso_cpu_str.split('=')[1]
- iso_cpul = iso_cpu_str.split(',')
+ iso_cpu_str = iso_cpu_str.split("=")[1]
+ iso_cpul = iso_cpu_str.split(",")
for iso_cpu in iso_cpul:
- isocpuspl = iso_cpu.split('-')
+ isocpuspl = iso_cpu.split("-")
if len(isocpuspl) == 1:
current_iso_cpus += 1
else:
@@ -800,11 +820,11 @@ class AutoConfig(object):
else:
current_iso_cpus += second - first
- if 'grub' not in node:
- node['grub'] = {}
- node['grub']['current_cmdline'] = current_cmdline
- node['grub']['default_cmdline'] = default_cmdline
- node['grub']['current_iso_cpus'] = current_iso_cpus
+ if "grub" not in node:
+ node["grub"] = {}
+ node["grub"]["current_cmdline"] = current_cmdline
+ node["grub"]["default_cmdline"] = default_cmdline
+ node["grub"]["current_iso_cpus"] = current_iso_cpus
self.updateconfig()
@@ -822,11 +842,11 @@ class AutoConfig(object):
vpp.get_all_devices()
# Save the device information
- node['devices'] = {}
- node['devices']['dpdk_devices'] = vpp.get_dpdk_devices()
- node['devices']['kernel_devices'] = vpp.get_kernel_devices()
- node['devices']['other_devices'] = vpp.get_other_devices()
- node['devices']['linkup_devices'] = vpp.get_link_up_devices()
+ node["devices"] = {}
+ node["devices"]["dpdk_devices"] = vpp.get_dpdk_devices()
+ node["devices"]["kernel_devices"] = vpp.get_kernel_devices()
+ node["devices"]["other_devices"] = vpp.get_other_devices()
+ node["devices"]["linkup_devices"] = vpp.get_link_up_devices()
def get_devices_per_node(self):
"""
@@ -856,20 +876,25 @@ class AutoConfig(object):
:rtype: list
"""
- cmd = 'lscpu -p'
+ cmd = "lscpu -p"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.
- format(cmd, node['host'], stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
pcpus = []
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
- if line == '' or line[0] == '#':
+ if line == "" or line[0] == "#":
continue
- linesplit = line.split(',')
- layout = {'cpu': linesplit[0], 'core': linesplit[1],
- 'socket': linesplit[2], 'node': linesplit[3]}
+ linesplit = line.split(",")
+ layout = {
+ "cpu": linesplit[0],
+ "core": linesplit[1],
+ "socket": linesplit[2],
+ "node": linesplit[3],
+ }
# cpu, core, socket, node
pcpus.append(layout)
@@ -890,14 +915,14 @@ class AutoConfig(object):
# Get the cpu layout
layout = self.get_cpu_layout(node)
- node['cpu']['layout'] = layout
+ node["cpu"]["layout"] = layout
- cpuinfo = node['cpuinfo']
+ cpuinfo = node["cpuinfo"]
smt_enabled = CpuUtils.is_smt_enabled(cpuinfo)
- node['cpu']['smt_enabled'] = smt_enabled
+ node["cpu"]["smt_enabled"] = smt_enabled
# We don't want to write the cpuinfo
- node['cpuinfo'] = ""
+ node["cpuinfo"] = ""
# Write the config
self.updateconfig()
@@ -932,46 +957,59 @@ class AutoConfig(object):
:type numa_nodes: list
"""
- print("\nYour system has {} core(s) and {} Numa Nodes.".
- format(total_cpus, len(numa_nodes)))
- print("To begin, we suggest not reserving any cores for "
- "VPP or other processes.")
- print("Then to improve performance start reserving cores and "
- "adding queues as needed.")
+ print(
+ "\nYour system has {} core(s) and {} Numa Nodes.".format(
+ total_cpus, len(numa_nodes)
+ )
+ )
+ print(
+ "To begin, we suggest not reserving any cores for "
+ "VPP or other processes."
+ )
+ print(
+ "Then to improve performance start reserving cores and "
+ "adding queues as needed."
+ )
# Leave 1 for the general system
total_cpus -= 1
max_vpp_cpus = min(total_cpus, 4)
total_vpp_cpus = 0
if max_vpp_cpus > 0:
- question = "\nHow many core(s) shall we reserve for " \
- "VPP [0-{}][0]? ".format(max_vpp_cpus)
+ question = (
+ "\nHow many core(s) shall we reserve for "
+ "VPP [0-{}][0]? ".format(max_vpp_cpus)
+ )
total_vpp_cpus = self._ask_user_range(question, 0, max_vpp_cpus, 0)
- node['cpu']['total_vpp_cpus'] = total_vpp_cpus
+ node["cpu"]["total_vpp_cpus"] = total_vpp_cpus
total_other_cpus = 0
max_other_cores = total_cpus - total_vpp_cpus
if max_other_cores > 0:
- question = 'How many core(s) do you want to reserve for ' \
- 'processes other than VPP? [0-{}][0]? '. format(str(max_other_cores))
+ question = (
+ "How many core(s) do you want to reserve for "
+ "processes other than VPP? [0-{}][0]? ".format(str(max_other_cores))
+ )
total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
- node['cpu']['total_other_cpus'] = total_other_cpus
+ node["cpu"]["total_other_cpus"] = total_other_cpus
max_main_cpus = total_cpus - total_vpp_cpus - total_other_cpus
reserve_vpp_main_core = False
if max_main_cpus > 0:
question = "Should we reserve 1 core for the VPP Main thread? "
question += "[y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
reserve_vpp_main_core = True
- node['cpu']['reserve_vpp_main_core'] = reserve_vpp_main_core
- node['cpu']['vpp_main_core'] = 0
+ node["cpu"]["reserve_vpp_main_core"] = reserve_vpp_main_core
+ node["cpu"]["vpp_main_core"] = 0
- question = "How many RX queues per port shall we use for " \
- "VPP [1-4][1]? ".format(max_vpp_cpus)
+ question = (
+ "How many RX queues per port shall we use for "
+ "VPP [1-4][1]? ".format(max_vpp_cpus)
+ )
total_rx_queues = self._ask_user_range(question, 1, 4, 1)
- node['cpu']['total_rx_queues'] = total_rx_queues
+ node["cpu"]["total_rx_queues"] = total_rx_queues
def modify_cpu(self, ask_questions=True):
"""
@@ -995,50 +1033,50 @@ class AutoConfig(object):
# Assume the number of cpus per slice is always the same as the
# first slice
- first_node = '0'
+ first_node = "0"
for cpu in cpu_layout:
- if cpu['node'] != first_node:
+ if cpu["node"] != first_node:
break
total_cpus_per_slice += 1
# Get the total number of cpus, cores, and numa nodes from the
# cpu layout
for cpul in cpu_layout:
- numa_node = cpul['node']
- core = cpul['core']
- cpu = cpul['cpu']
+ numa_node = cpul["node"]
+ core = cpul["core"]
+ cpu = cpul["cpu"]
total_cpus += 1
if numa_node not in cpus_per_node:
cpus_per_node[numa_node] = []
cpuperslice = int(cpu) % total_cpus_per_slice
if cpuperslice == 0:
- cpus_per_node[numa_node].append((int(cpu), int(cpu) +
- total_cpus_per_slice - 1))
+ cpus_per_node[numa_node].append(
+ (int(cpu), int(cpu) + total_cpus_per_slice - 1)
+ )
if numa_node not in numa_nodes:
numa_nodes.append(numa_node)
if core not in cores:
cores.append(core)
- node['cpu']['cpus_per_node'] = cpus_per_node
+ node["cpu"]["cpus_per_node"] = cpus_per_node
# Ask the user some questions
if ask_questions and total_cpus >= 4:
self._modify_cpu_questions(node, total_cpus, numa_nodes)
# Populate the interfaces with the numa node
- if 'interfaces' in node:
- ikeys = node['interfaces'].keys()
+ if "interfaces" in node:
+ ikeys = node["interfaces"].keys()
VPPUtil.get_interfaces_numa_node(node, *tuple(ikeys))
# We don't want to write the cpuinfo
- node['cpuinfo'] = ""
+ node["cpuinfo"] = ""
# Write the configs
self._update_auto_config()
self.updateconfig()
- def _modify_other_devices(self, node,
- other_devices, kernel_devices, dpdk_devices):
+ def _modify_other_devices(self, node, other_devices, kernel_devices, dpdk_devices):
"""
Modify the devices configuration, asking for the user for the values.
@@ -1046,31 +1084,31 @@ class AutoConfig(object):
odevices_len = len(other_devices)
if odevices_len > 0:
- print("\nThese device(s) are currently NOT being used "
- "by VPP or the OS.\n")
+ print(
+ "\nThese device(s) are currently NOT being used " "by VPP or the OS.\n"
+ )
VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
question = "\nWould you like to give any of these devices"
question += " back to the OS [Y/n]? "
- answer = self._ask_user_yn(question, 'Y')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "Y")
+ if answer == "y":
vppd = {}
for dit in other_devices.items():
dvid = dit[0]
device = dit[1]
- question = "Would you like to use device {} for". \
- format(dvid)
+ question = "Would you like to use device {} for".format(dvid)
question += " the OS [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
- if 'unused' in device and len(
- device['unused']) != 0 and \
- device['unused'][0] != '':
- driver = device['unused'][0]
- ret = VppPCIUtil.bind_vpp_device(
- node, driver, dvid)
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
+ if (
+ "unused" in device
+ and len(device["unused"]) != 0
+ and device["unused"][0] != ""
+ ):
+ driver = device["unused"][0]
+ ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
if ret:
- logging.debug(
- 'Could not bind device {}'.format(dvid))
+ logging.debug("Could not bind device {}".format(dvid))
else:
vppd[dvid] = device
for dit in vppd.items():
@@ -1081,34 +1119,35 @@ class AutoConfig(object):
odevices_len = len(other_devices)
if odevices_len > 0:
- print("\nThese device(s) are still NOT being used "
- "by VPP or the OS.\n")
+ print("\nThese device(s) are still NOT being used " "by VPP or the OS.\n")
VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)
question = "\nWould you like use any of these for VPP [y/N]? "
- answer = self._ask_user_yn(question, 'N')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "N")
+ if answer == "y":
vppd = {}
for dit in other_devices.items():
dvid = dit[0]
device = dit[1]
question = "Would you like to use device {} ".format(dvid)
question += "for VPP [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
vppd[dvid] = device
for dit in vppd.items():
dvid = dit[0]
device = dit[1]
- if 'unused' in device and len(device['unused']) != 0 and \
- device['unused'][0] != '':
- driver = device['unused'][0]
+ if (
+ "unused" in device
+ and len(device["unused"]) != 0
+ and device["unused"][0] != ""
+ ):
+ driver = device["unused"][0]
logging.debug(
- 'Binding device {} to driver {}'.format(dvid,
- driver))
+ "Binding device {} to driver {}".format(dvid, driver)
+ )
ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
if ret:
- logging.debug(
- 'Could not bind device {}'.format(dvid))
+ logging.debug("Could not bind device {}".format(dvid))
else:
dpdk_devices[dvid] = device
del other_devices[dvid]
@@ -1121,22 +1160,23 @@ class AutoConfig(object):
for i in self._nodes.items():
node = i[1]
- devices = node['devices']
- all_devices = devices['other_devices']
- all_devices.update(devices['dpdk_devices'])
- all_devices.update(devices['kernel_devices'])
+ devices = node["devices"]
+ all_devices = devices["other_devices"]
+ all_devices.update(devices["dpdk_devices"])
+ all_devices.update(devices["kernel_devices"])
current_ifcs = {}
interfaces = {}
- if 'interfaces' in node:
- current_ifcs = node['interfaces']
+ if "interfaces" in node:
+ current_ifcs = node["interfaces"]
if current_ifcs:
for ifc in current_ifcs.values():
- dvid = ifc['pci_address']
+ dvid = ifc["pci_address"]
if dvid in all_devices:
- VppPCIUtil.vpp_create_interface(interfaces, dvid,
- all_devices[dvid])
- node['interfaces'] = interfaces
+ VppPCIUtil.vpp_create_interface(
+ interfaces, dvid, all_devices[dvid]
+ )
+ node["interfaces"] = interfaces
self.updateconfig()
@@ -1148,86 +1188,98 @@ class AutoConfig(object):
for i in self._nodes.items():
node = i[1]
- devices = node['devices']
- other_devices = devices['other_devices']
- kernel_devices = devices['kernel_devices']
- dpdk_devices = devices['dpdk_devices']
+ devices = node["devices"]
+ other_devices = devices["other_devices"]
+ kernel_devices = devices["kernel_devices"]
+ dpdk_devices = devices["dpdk_devices"]
if other_devices:
- self._modify_other_devices(node, other_devices,
- kernel_devices, dpdk_devices)
+ self._modify_other_devices(
+ node, other_devices, kernel_devices, dpdk_devices
+ )
# Get the devices again for this node
self._get_device(node)
- devices = node['devices']
- kernel_devices = devices['kernel_devices']
- dpdk_devices = devices['dpdk_devices']
+ devices = node["devices"]
+ kernel_devices = devices["kernel_devices"]
+ dpdk_devices = devices["dpdk_devices"]
klen = len(kernel_devices)
if klen > 0:
print("\nThese devices are safe to be used with VPP.\n")
VppPCIUtil.show_vpp_devices(kernel_devices)
- question = "\nWould you like to use any of these " \
- "device(s) for VPP [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ question = (
+ "\nWould you like to use any of these " "device(s) for VPP [y/N]? "
+ )
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
vppd = {}
for dit in kernel_devices.items():
dvid = dit[0]
device = dit[1]
question = "Would you like to use device {} ".format(dvid)
question += "for VPP [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
vppd[dvid] = device
for dit in vppd.items():
dvid = dit[0]
device = dit[1]
- if 'unused' in device and len(
- device['unused']) != 0 and device['unused'][0] != '':
- driver = device['unused'][0]
- question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
- logging.debug('Binding device {} to driver {}'.format(dvid, driver))
+ if (
+ "unused" in device
+ and len(device["unused"]) != 0
+ and device["unused"][0] != ""
+ ):
+ driver = device["unused"][0]
+ question = "Would you like to bind the driver {} for {} [y/N]? ".format(
+ driver, dvid
+ )
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
+ logging.debug(
+ "Binding device {} to driver {}".format(
+ dvid, driver
+ )
+ )
ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
if ret:
- logging.debug('Could not bind device {}'.format(dvid))
+ logging.debug(
+ "Could not bind device {}".format(dvid)
+ )
dpdk_devices[dvid] = device
del kernel_devices[dvid]
dlen = len(dpdk_devices)
if dlen > 0:
print("\nThese device(s) are already using DPDK.\n")
- VppPCIUtil.show_vpp_devices(dpdk_devices,
- show_interfaces=False)
+ VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)
question = "\nWould you like to remove any of "
question += "these device(s) [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
vppdl = {}
for dit in dpdk_devices.items():
dvid = dit[0]
device = dit[1]
- question = "Would you like to remove {} [y/N]? ". \
- format(dvid)
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
+ question = "Would you like to remove {} [y/N]? ".format(dvid)
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
vppdl[dvid] = device
for dit in vppdl.items():
dvid = dit[0]
device = dit[1]
- if 'unused' in device and len(
- device['unused']) != 0 and device['unused'][0] != '':
- driver = device['unused'][0]
+ if (
+ "unused" in device
+ and len(device["unused"]) != 0
+ and device["unused"][0] != ""
+ ):
+ driver = device["unused"][0]
logging.debug(
- 'Binding device {} to driver {}'.format(
- dvid, driver))
- ret = VppPCIUtil.bind_vpp_device(node, driver,
- dvid)
+ "Binding device {} to driver {}".format(dvid, driver)
+ )
+ ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)
if ret:
- logging.debug(
- 'Could not bind device {}'.format(dvid))
+ logging.debug("Could not bind device {}".format(dvid))
else:
kernel_devices[dvid] = device
del dpdk_devices[dvid]
@@ -1237,7 +1289,7 @@ class AutoConfig(object):
dvid = dit[0]
device = dit[1]
VppPCIUtil.vpp_create_interface(interfaces, dvid, device)
- node['interfaces'] = interfaces
+ node["interfaces"] = interfaces
self._update_auto_config()
self.updateconfig()
@@ -1251,29 +1303,27 @@ class AutoConfig(object):
for i in self._nodes.items():
node = i[1]
- total = node['hugepages']['actual_total']
- free = node['hugepages']['free']
- size = node['hugepages']['size']
- memfree = node['hugepages']['memfree'].split(' ')[0]
- hugesize = int(size.split(' ')[0])
+ total = node["hugepages"]["actual_total"]
+ free = node["hugepages"]["free"]
+ size = node["hugepages"]["size"]
+ memfree = node["hugepages"]["memfree"].split(" ")[0]
+ hugesize = int(size.split(" ")[0])
# The max number of huge pages should be no more than
# 70% of total free memory
maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // hugesize
- print("\nThere currently {} {} huge pages free.".format(
- free, size))
- question = "Do you want to reconfigure the number of " \
- "huge pages [y/N]? "
- answer = self._ask_user_yn(question, 'n')
- if answer == 'n':
- node['hugepages']['total'] = total
+ print("\nThere currently {} {} huge pages free.".format(free, size))
+ question = "Do you want to reconfigure the number of " "huge pages [y/N]? "
+ answer = self._ask_user_yn(question, "n")
+ if answer == "n":
+ node["hugepages"]["total"] = total
continue
- print("\nThere currently a total of {} huge pages.".
- format(total))
- question = "How many huge pages do you want [{} - {}][{}]? ". \
- format(MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES)
+ print("\nThere currently a total of {} huge pages.".format(total))
+ question = "How many huge pages do you want [{} - {}][{}]? ".format(
+ MIN_TOTAL_HUGE_PAGES, maxpages, MIN_TOTAL_HUGE_PAGES
+ )
answer = self._ask_user_range(question, 1024, maxpages, 1024)
- node['hugepages']['total'] = str(answer)
+ node["hugepages"]["total"] = str(answer)
# Update auto-config.yaml
self._update_auto_config()
@@ -1298,21 +1348,25 @@ class AutoConfig(object):
for i in self._nodes.items():
node = i[1]
- question = "\nHow many active-open / tcp client sessions are " \
- "expected [0-10000000][0]? "
+ question = (
+ "\nHow many active-open / tcp client sessions are "
+ "expected [0-10000000][0]? "
+ )
answer = self._ask_user_range(question, 0, 10000000, 0)
# Less than 10K is equivalent to 0
if int(answer) < 10000:
answer = 0
- node['tcp']['active_open_sessions'] = answer
+ node["tcp"]["active_open_sessions"] = answer
- question = "How many passive-open / tcp server sessions are " \
- "expected [0-10000000][0]? "
+ question = (
+ "How many passive-open / tcp server sessions are "
+ "expected [0-10000000][0]? "
+ )
answer = self._ask_user_range(question, 0, 10000000, 0)
# Less than 10K is equivalent to 0
if int(answer) < 10000:
answer = 0
- node['tcp']['passive_open_sessions'] = answer
+ node["tcp"]["passive_open_sessions"] = answer
# Update auto-config.yaml
self._update_auto_config()
@@ -1329,7 +1383,7 @@ class AutoConfig(object):
:type node: dict
"""
- print('\nWe are patching the node "{}":\n'.format(node['host']))
+ print('\nWe are patching the node "{}":\n'.format(node["host"]))
QemuUtils.build_qemu(node, force_install=True, apply_patch=True)
@staticmethod
@@ -1341,44 +1395,44 @@ class AutoConfig(object):
cpu = CpuUtils.get_cpu_info_per_node(node)
- item = 'Model name'
+ item = "Model name"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'CPU(s)'
+ item = "CPU(s)"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'Thread(s) per core'
+ item = "Thread(s) per core"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'Core(s) per socket'
+ item = "Core(s) per socket"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'Socket(s)'
+ item = "Socket(s)"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'NUMA node(s)'
+ item = "NUMA node(s)"
numa_nodes = 0
if item in cpu:
numa_nodes = int(cpu[item])
for i in range(0, numa_nodes):
item = "NUMA node{} CPU(s)".format(i)
print("{:>20}: {}".format(item, cpu[item]))
- item = 'CPU max MHz'
+ item = "CPU max MHz"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- item = 'CPU min MHz'
+ item = "CPU min MHz"
if item in cpu:
print("{:>20}: {}".format(item, cpu[item]))
- if node['cpu']['smt_enabled']:
- smt = 'Enabled'
+ if node["cpu"]["smt_enabled"]:
+ smt = "Enabled"
else:
- smt = 'Disabled'
- print("{:>20}: {}".format('SMT', smt))
+ smt = "Disabled"
+ print("{:>20}: {}".format("SMT", smt))
# VPP Threads
print("\nVPP Threads: (Name: Cpu Number)")
- vpp_processes = cpu['vpp_processes']
+ vpp_processes = cpu["vpp_processes"]
for i in vpp_processes.items():
print(" {:10}: {:4}".format(i[0], i[1]))
@@ -1389,8 +1443,8 @@ class AutoConfig(object):
"""
- if 'cpu' in node and 'total_mbufs' in node['cpu']:
- total_mbufs = node['cpu']['total_mbufs']
+ if "cpu" in node and "total_mbufs" in node["cpu"]:
+ total_mbufs = node["cpu"]["total_mbufs"]
if total_mbufs != 0:
print("Total Number of Buffers: {}".format(total_mbufs))
@@ -1412,16 +1466,14 @@ class AutoConfig(object):
dpdk_devs = vpp.get_dpdk_devices()
if len(dpdk_devs):
print("\nDevices bound to DPDK drivers:")
- vpp.show_vpp_devices(dpdk_devs, show_interfaces=True,
- show_header=False)
+ vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)
else:
print("\nNo devices bound to DPDK drivers")
other_devs = vpp.get_other_devices()
if len(other_devs):
print("\nDevices not bound to Kernel or DPDK drivers:")
- vpp.show_vpp_devices(other_devs, show_interfaces=True,
- show_header=False)
+ vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)
else:
print("\nNo devices not bound to Kernel or DPDK drivers")
@@ -1436,28 +1488,33 @@ class AutoConfig(object):
print("None")
return
- print("{:30} {:4} {:4} {:7} {:4} {:7}".
- format('Name', 'Numa', 'RXQs',
- 'RXDescs', 'TXQs', 'TXDescs'))
+ print(
+ "{:30} {:4} {:4} {:7} {:4} {:7}".format(
+ "Name", "Numa", "RXQs", "RXDescs", "TXQs", "TXDescs"
+ )
+ )
for intf in sorted(interfaces.items()):
name = intf[0]
value = intf[1]
- if name == 'local0':
+ if name == "local0":
continue
- numa = rx_qs = rx_ds = tx_qs = tx_ds = ''
- if 'numa' in value:
- numa = int(value['numa'])
- if 'rx queues' in value:
- rx_qs = int(value['rx queues'])
- if 'rx descs' in value:
- rx_ds = int(value['rx descs'])
- if 'tx queues' in value:
- tx_qs = int(value['tx queues'])
- if 'tx descs' in value:
- tx_ds = int(value['tx descs'])
-
- print("{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".
- format(name, numa, rx_qs, rx_ds, tx_qs, tx_ds))
+ numa = rx_qs = rx_ds = tx_qs = tx_ds = ""
+ if "numa" in value:
+ numa = int(value["numa"])
+ if "rx queues" in value:
+ rx_qs = int(value["rx queues"])
+ if "rx descs" in value:
+ rx_ds = int(value["rx descs"])
+ if "tx queues" in value:
+ tx_qs = int(value["tx queues"])
+ if "tx descs" in value:
+ tx_ds = int(value["tx descs"])
+
+ print(
+ "{:30} {:>4} {:>4} {:>7} {:>4} {:>7}".format(
+ name, numa, rx_qs, rx_ds, tx_qs, tx_ds
+ )
+ )
@staticmethod
def hugepage_info(node):
@@ -1476,7 +1533,7 @@ class AutoConfig(object):
:returns: boolean
"""
- if 'interfaces' in node and len(node['interfaces']):
+ if "interfaces" in node and len(node["interfaces"]):
return True
else:
return False
@@ -1493,30 +1550,33 @@ class AutoConfig(object):
min_sys_res = True
# CPUs
- if 'layout' in node['cpu']:
- total_cpus = len(node['cpu']['layout'])
+ if "layout" in node["cpu"]:
+ total_cpus = len(node["cpu"]["layout"])
if total_cpus < 2:
- print("\nThere is only {} CPU(s) available on this system. "
- "This is not enough to run VPP.".format(total_cpus))
+ print(
+ "\nThere is only {} CPU(s) available on this system. "
+ "This is not enough to run VPP.".format(total_cpus)
+ )
min_sys_res = False
# System Memory
- if 'free' in node['hugepages'] and \
- 'memfree' in node['hugepages'] and \
- 'size' in node['hugepages']:
- free = node['hugepages']['free']
- memfree = float(node['hugepages']['memfree'].split(' ')[0])
- hugesize = float(node['hugepages']['size'].split(' ')[0])
+ if (
+ "free" in node["hugepages"]
+ and "memfree" in node["hugepages"]
+ and "size" in node["hugepages"]
+ ):
+ free = node["hugepages"]["free"]
+ memfree = float(node["hugepages"]["memfree"].split(" ")[0])
+ hugesize = float(node["hugepages"]["size"].split(" ")[0])
memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize
percentmemhugepages = (memhugepages / memfree) * 100
- if free is '0' and \
- percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
+ if free is "0" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:
print(
"\nThe System has only {} of free memory. You will not "
"be able to allocate enough Huge Pages for VPP.".format(
- int(
- memfree))
+ int(memfree)
+ )
)
min_sys_res = False
@@ -1541,11 +1601,9 @@ class AutoConfig(object):
# Grub
print("\nGrub Command Line:")
- if 'grub' in node:
- print(" Current: {}".format(
- node['grub']['current_cmdline']))
- print(" Configured: {}".format(
- node['grub']['default_cmdline']))
+ if "grub" in node:
+ print(" Current: {}".format(node["grub"]["current_cmdline"]))
+ print(" Configured: {}".format(node["grub"]["default_cmdline"]))
# Huge Pages
print("\nHuge Pages:")
@@ -1586,17 +1644,18 @@ class AutoConfig(object):
interfaces_with_ip = []
for intf in sorted(interfaces.items()):
name = intf[0]
- if name == 'local0':
+ if name == "local0":
continue
- question = "Would you like add address to " \
- "interface {} [Y/n]? ".format(name)
- answer = self._ask_user_yn(question, 'y')
- if answer == 'y':
+ question = "Would you like add address to " "interface {} [Y/n]? ".format(
+ name
+ )
+ answer = self._ask_user_yn(question, "y")
+ if answer == "y":
address = {}
addr = self._ask_user_ipv4()
- address['name'] = name
- address['addr'] = addr
+ address["name"] = name
+ address["addr"] = addr
interfaces_with_ip.append(address)
return interfaces_with_ip
@@ -1618,40 +1677,37 @@ class AutoConfig(object):
for items in sorted(current_ints.items()):
name = items[0]
value = items[1]
- if 'address' not in value:
- address = 'Not Set'
+ if "address" not in value:
+ address = "Not Set"
else:
- address = value['address']
- print("{:30} {:20} {:10}".format(name, address,
- value['state']))
- question = "\nWould you like to keep this configuration " \
- "[Y/n]? "
- answer = self._ask_user_yn(question, 'y')
- if answer == 'y':
+ address = value["address"]
+ print("{:30} {:20} {:10}".format(name, address, value["state"]))
+ question = "\nWould you like to keep this configuration " "[Y/n]? "
+ answer = self._ask_user_yn(question, "y")
+ if answer == "y":
continue
else:
- print("\nThere are currently no interfaces with IP "
- "addresses.")
+ print("\nThere are currently no interfaces with IP " "addresses.")
# Create a script that add the ip addresses to the interfaces
# and brings the interfaces up
ints_with_addrs = self._ipv4_interface_setup_questions(node)
- content = ''
+ content = ""
for ints in ints_with_addrs:
- name = ints['name']
- addr = ints['addr']
- setipstr = 'set int ip address {} {}\n'.format(name, addr)
- setintupstr = 'set int state {} up\n'.format(name)
+ name = ints["name"]
+ addr = ints["addr"]
+ setipstr = "set int ip address {} {}\n".format(name, addr)
+ setintupstr = "set int state {} up\n".format(name)
content += setipstr + setintupstr
# Write the content to the script
- rootdir = node['rootdir']
- filename = rootdir + '/vpp/vpp-config/scripts/set_int_ipv4_and_up'
- with open(filename, 'w+') as sfile:
+ rootdir = node["rootdir"]
+ filename = rootdir + "/vpp/vpp-config/scripts/set_int_ipv4_and_up"
+ with open(filename, "w+") as sfile:
sfile.write(content)
# Execute the script
- cmd = 'vppctl exec {}'.format(filename)
+ cmd = "vppctl exec {}".format(filename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
@@ -1679,12 +1735,13 @@ class AutoConfig(object):
# First delete all the Virtual interfaces
for intf in sorted(interfaces.items()):
name = intf[0]
- if name[:7] == 'Virtual':
- cmd = 'vppctl delete vhost-user {}'.format(name)
+ if name[:7] == "Virtual":
+ cmd = "vppctl delete vhost-user {}".format(name)
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd, node['host'], stderr))
+ logging.debug(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
# Create a virtual interface, for each interface the user wants to use
interfaces = vpputl.get_hardware(node)
@@ -1694,36 +1751,38 @@ class AutoConfig(object):
inum = 1
for intf in sorted(interfaces.items()):
name = intf[0]
- if name == 'local0':
+ if name == "local0":
continue
- question = "Would you like connect this interface {} to " \
- "the VM [Y/n]? ".format(name)
- answer = self._ask_user_yn(question, 'y')
- if answer == 'y':
- sockfilename = '/var/run/vpp/{}.sock'.format(
- name.replace('/', '_'))
+ question = (
+ "Would you like connect this interface {} to "
+ "the VM [Y/n]? ".format(name)
+ )
+ answer = self._ask_user_yn(question, "y")
+ if answer == "y":
+ sockfilename = "/var/run/vpp/{}.sock".format(name.replace("/", "_"))
if os.path.exists(sockfilename):
os.remove(sockfilename)
- cmd = 'vppctl create vhost-user socket {} server'.format(
- sockfilename)
+ cmd = "vppctl create vhost-user socket {} server".format(sockfilename)
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
raise RuntimeError(
- "Couldn't execute the command {}, {}.".format(cmd,
- stderr))
- vintname = stdout.rstrip('\r\n')
+ "Couldn't execute the command {}, {}.".format(cmd, stderr)
+ )
+ vintname = stdout.rstrip("\r\n")
- cmd = 'chmod 777 {}'.format(sockfilename)
+ cmd = "chmod 777 {}".format(sockfilename)
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
raise RuntimeError(
- "Couldn't execute the command {}, {}.".format(cmd,
- stderr))
-
- interface = {'name': name,
- 'virtualinterface': '{}'.format(vintname),
- 'bridge': '{}'.format(inum)}
+ "Couldn't execute the command {}, {}.".format(cmd, stderr)
+ )
+
+ interface = {
+ "name": name,
+ "virtualinterface": "{}".format(vintname),
+ "bridge": "{}".format(inum),
+ }
inum += 1
interfaces_with_virtual_interfaces.append(interface)
@@ -1743,49 +1802,58 @@ class AutoConfig(object):
print("\nThis the current bridge configuration:")
VPPUtil.show_bridge(node)
question = "\nWould you like to keep this configuration [Y/n]? "
- answer = self._ask_user_yn(question, 'y')
- if answer == 'y':
+ answer = self._ask_user_yn(question, "y")
+ if answer == "y":
continue
# Create a script that builds a bridge configuration with
# physical interfaces and virtual interfaces
ints_with_vints = self._create_vints_questions(node)
- content = ''
+ content = ""
for intf in ints_with_vints:
- vhoststr = '\n'.join([
- 'comment { The following command creates the socket }',
- 'comment { and returns a virtual interface }',
- 'comment {{ create vhost-user socket '
- '/var/run/vpp/sock{}.sock server }}\n'.format(
- intf['bridge'])
- ])
-
- setintdnstr = 'set interface state {} down\n'.format(
- intf['name'])
-
- setintbrstr = 'set interface l2 bridge {} {}\n'.format(
- intf['name'], intf['bridge'])
- setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
- intf['virtualinterface'], intf['bridge'])
+ vhoststr = "\n".join(
+ [
+ "comment { The following command creates the socket }",
+ "comment { and returns a virtual interface }",
+ "comment {{ create vhost-user socket "
+ "/var/run/vpp/sock{}.sock server }}\n".format(intf["bridge"]),
+ ]
+ )
+
+ setintdnstr = "set interface state {} down\n".format(intf["name"])
+
+ setintbrstr = "set interface l2 bridge {} {}\n".format(
+ intf["name"], intf["bridge"]
+ )
+ setvintbrstr = "set interface l2 bridge {} {}\n".format(
+ intf["virtualinterface"], intf["bridge"]
+ )
# set interface state VirtualEthernet/0/0/0 up
- setintvststr = 'set interface state {} up\n'.format(
- intf['virtualinterface'])
+ setintvststr = "set interface state {} up\n".format(
+ intf["virtualinterface"]
+ )
# set interface state VirtualEthernet/0/0/0 down
- setintupstr = 'set interface state {} up\n'.format(
- intf['name'])
-
- content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
+ setintupstr = "set interface state {} up\n".format(intf["name"])
+
+ content += (
+ vhoststr
+ + setintdnstr
+ + setintbrstr
+ + setvintbrstr
+ + setintvststr
+ + setintupstr
+ )
# Write the content to the script
- rootdir = node['rootdir']
- filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
- with open(filename, 'w+') as sfile:
+ rootdir = node["rootdir"]
+ filename = rootdir + "/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp"
+ with open(filename, "w+") as sfile:
sfile.write(content)
# Execute the script
- cmd = 'vppctl exec {}'.format(filename)
+ cmd = "vppctl exec {}".format(filename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
@@ -1813,12 +1881,13 @@ class AutoConfig(object):
# First delete all the Virtual interfaces
for intf in sorted(interfaces.items()):
name = intf[0]
- if name[:7] == 'Virtual':
- cmd = 'vppctl delete vhost-user {}'.format(name)
+ if name[:7] == "Virtual":
+ cmd = "vppctl delete vhost-user {}".format(name)
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd, node['host'], stderr))
+ logging.debug(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
# Create a virtual interface, for each interface the user wants to use
interfaces = vpputl.get_hardware(node)
@@ -1828,39 +1897,45 @@ class AutoConfig(object):
inum = 1
while True:
- print('\nPlease pick one interface to connect to the iperf VM.')
+ print("\nPlease pick one interface to connect to the iperf VM.")
for intf in sorted(interfaces.items()):
name = intf[0]
- if name == 'local0':
+ if name == "local0":
continue
- question = "Would you like connect this interface {} to " \
- "the VM [y/N]? ".format(name)
- answer = self._ask_user_yn(question, 'n')
- if answer == 'y':
- self._sockfilename = '/var/run/vpp/{}.sock'.format(
- name.replace('/', '_'))
+ question = (
+ "Would you like connect this interface {} to "
+ "the VM [y/N]? ".format(name)
+ )
+ answer = self._ask_user_yn(question, "n")
+ if answer == "y":
+ self._sockfilename = "/var/run/vpp/{}.sock".format(
+ name.replace("/", "_")
+ )
if os.path.exists(self._sockfilename):
os.remove(self._sockfilename)
- cmd = 'vppctl create vhost-user socket {} server'.format(
- self._sockfilename)
+ cmd = "vppctl create vhost-user socket {} server".format(
+ self._sockfilename
+ )
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
raise RuntimeError(
- "Couldn't execute the command {}, {}.".format(
- cmd, stderr))
- vintname = stdout.rstrip('\r\n')
+ "Couldn't execute the command {}, {}.".format(cmd, stderr)
+ )
+ vintname = stdout.rstrip("\r\n")
- cmd = 'chmod 777 {}'.format(self._sockfilename)
+ cmd = "chmod 777 {}".format(self._sockfilename)
(ret, stdout, stderr) = vpputl.exec_command(cmd)
if ret != 0:
raise RuntimeError(
- "Couldn't execute the command {}, {}.".format(
- cmd, stderr))
-
- interface = {'name': name,
- 'virtualinterface': '{}'.format(vintname),
- 'bridge': '{}'.format(inum)}
+ "Couldn't execute the command {}, {}.".format(cmd, stderr)
+ )
+
+ interface = {
+ "name": name,
+ "virtualinterface": "{}".format(vintname),
+ "bridge": "{}".format(inum),
+ }
inum += 1
interfaces_with_virtual_interfaces.append(interface)
return interfaces_with_virtual_interfaces
@@ -1879,52 +1954,62 @@ class AutoConfig(object):
print("\nThis the current bridge configuration:")
ifaces = VPPUtil.show_bridge(node)
question = "\nWould you like to keep this configuration [Y/n]? "
- answer = self._ask_user_yn(question, 'y')
- if answer == 'y':
- self._sockfilename = '/var/run/vpp/{}.sock'.format(
- ifaces[0]['name'].replace('/', '_'))
+ answer = self._ask_user_yn(question, "y")
+ if answer == "y":
+ self._sockfilename = "/var/run/vpp/{}.sock".format(
+ ifaces[0]["name"].replace("/", "_")
+ )
if os.path.exists(self._sockfilename):
continue
# Create a script that builds a bridge configuration with
# physical interfaces and virtual interfaces
ints_with_vints = self._iperf_vm_questions(node)
- content = ''
+ content = ""
for intf in ints_with_vints:
- vhoststr = '\n'.join([
- 'comment { The following command creates the socket }',
- 'comment { and returns a virtual interface }',
- 'comment {{ create vhost-user socket '
- '/var/run/vpp/sock{}.sock server }}\n'.format(
- intf['bridge'])
- ])
-
- setintdnstr = 'set interface state {} down\n'.format(
- intf['name'])
-
- setintbrstr = 'set interface l2 bridge {} {}\n'.format(
- intf['name'], intf['bridge'])
- setvintbrstr = 'set interface l2 bridge {} {}\n'.format(
- intf['virtualinterface'], intf['bridge'])
+ vhoststr = "\n".join(
+ [
+ "comment { The following command creates the socket }",
+ "comment { and returns a virtual interface }",
+ "comment {{ create vhost-user socket "
+ "/var/run/vpp/sock{}.sock server }}\n".format(intf["bridge"]),
+ ]
+ )
+
+ setintdnstr = "set interface state {} down\n".format(intf["name"])
+
+ setintbrstr = "set interface l2 bridge {} {}\n".format(
+ intf["name"], intf["bridge"]
+ )
+ setvintbrstr = "set interface l2 bridge {} {}\n".format(
+ intf["virtualinterface"], intf["bridge"]
+ )
# set interface state VirtualEthernet/0/0/0 up
- setintvststr = 'set interface state {} up\n'.format(
- intf['virtualinterface'])
+ setintvststr = "set interface state {} up\n".format(
+ intf["virtualinterface"]
+ )
# set interface state VirtualEthernet/0/0/0 down
- setintupstr = 'set interface state {} up\n'.format(
- intf['name'])
-
- content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
+ setintupstr = "set interface state {} up\n".format(intf["name"])
+
+ content += (
+ vhoststr
+ + setintdnstr
+ + setintbrstr
+ + setvintbrstr
+ + setintvststr
+ + setintupstr
+ )
# Write the content to the script
- rootdir = node['rootdir']
- filename = rootdir + '/vpp/vpp-config/scripts/create_iperf_vm'
- with open(filename, 'w+') as sfile:
+ rootdir = node["rootdir"]
+ filename = rootdir + "/vpp/vpp-config/scripts/create_iperf_vm"
+ with open(filename, "w+") as sfile:
sfile.write(content)
# Execute the script
- cmd = 'vppctl exec {}'.format(filename)
+ cmd = "vppctl exec {}".format(filename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
@@ -1943,21 +2028,22 @@ class AutoConfig(object):
:type name: str
"""
- cmd = 'virsh list'
+ cmd = "virsh list"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
raise RuntimeError(
- "Couldn't execute the command {} : {}".format(cmd, stderr))
+ "Couldn't execute the command {} : {}".format(cmd, stderr)
+ )
if re.findall(name, stdout):
- cmd = 'virsh destroy {}'.format(name)
+ cmd = "virsh destroy {}".format(name)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
raise RuntimeError(
- "Couldn't execute the command {} : {}".format(
- cmd, stderr))
+ "Couldn't execute the command {} : {}".format(cmd, stderr)
+ )
def create_iperf_vm(self, vmname):
"""
@@ -1968,36 +2054,39 @@ class AutoConfig(object):
# Read the iperf VM template file
distro = VPPUtil.get_linux_distro()
- if distro[0] == 'Ubuntu':
- tfilename = \
- '{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template'.format(
- self._rootdir)
+ if distro[0] == "Ubuntu":
+ tfilename = "{}/vpp/vpp-config/configs/iperf-ubuntu.xml.template".format(
+ self._rootdir
+ )
else:
- tfilename = \
- '{}/vpp/vpp-config/configs/iperf-centos.xml.template'.format(
- self._rootdir)
+ tfilename = "{}/vpp/vpp-config/configs/iperf-centos.xml.template".format(
+ self._rootdir
+ )
- with open(tfilename, 'r') as tfile:
+ with open(tfilename, "r") as tfile:
tcontents = tfile.read()
tfile.close()
# Add the variables
- imagename = '{}/vpp/vpp-config/{}'.format(
- self._rootdir, IPERFVM_IMAGE)
- isoname = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_ISO)
- tcontents = tcontents.format(vmname=vmname, imagename=imagename,
- isoname=isoname,
- vhostsocketname=self._sockfilename)
+ imagename = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_IMAGE)
+ isoname = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_ISO)
+ tcontents = tcontents.format(
+ vmname=vmname,
+ imagename=imagename,
+ isoname=isoname,
+ vhostsocketname=self._sockfilename,
+ )
# Write the xml
- ifilename = '{}/vpp/vpp-config/{}'.format(self._rootdir, IPERFVM_XML)
- with open(ifilename, 'w+') as ifile:
+ ifilename = "{}/vpp/vpp-config/{}".format(self._rootdir, IPERFVM_XML)
+ with open(ifilename, "w+") as ifile:
ifile.write(tcontents)
ifile.close()
- cmd = 'virsh create {}'.format(ifilename)
+ cmd = "virsh create {}".format(ifilename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
raise RuntimeError(
- "Couldn't execute the command {} : {}".format(cmd, stderr))
+ "Couldn't execute the command {} : {}".format(cmd, stderr)
+ )
diff --git a/extras/vpp_config/vpplib/CpuUtils.py b/extras/vpp_config/vpplib/CpuUtils.py
index 23f418d33be..f6ba3d74746 100644
--- a/extras/vpp_config/vpplib/CpuUtils.py
+++ b/extras/vpp_config/vpplib/CpuUtils.py
@@ -78,13 +78,14 @@ class CpuUtils(object):
# 1,1,0,0,,1,1,1,0
if ret != 0:
raise RuntimeError(
- "Failed to execute ssh command, ret: {} err: {}".format(
- ret, stderr))
- node['cpuinfo'] = list()
+ "Failed to execute ssh command, ret: {} err: {}".format(ret, stderr)
+ )
+ node["cpuinfo"] = list()
for line in stdout.split("\n"):
- if line != '' and line[0] != "#":
- node['cpuinfo'].append([CpuUtils.__str2int(x) for x in
- line.split(",")])
+ if line != "" and line[0] != "#":
+ node["cpuinfo"].append(
+ [CpuUtils.__str2int(x) for x in line.split(",")]
+ )
@staticmethod
def cpu_node_count(node):
@@ -137,13 +138,14 @@ class CpuUtils(object):
if smt_enabled and not smt_used:
cpu_list_len = len(cpu_list)
- cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
+ cpu_list = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
return cpu_list
@staticmethod
- def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0,
- smt_used=False):
+ def cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False
+ ):
"""Return string of node related list of CPU numbers.
:param node: Node dictionary with cpuinfo.
@@ -171,20 +173,20 @@ class CpuUtils(object):
cpu_cnt = cpu_list_len - skip_cnt
if smt_used:
- cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
- cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
- cpu_list_ex = [cpu for cpu in
- cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list_0 = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
+ cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS :]
+ cpu_list = [cpu for cpu in cpu_list_0[skip_cnt : skip_cnt + cpu_cnt]]
+ cpu_list_ex = [cpu for cpu in cpu_list_1[skip_cnt : skip_cnt + cpu_cnt]]
cpu_list.extend(cpu_list_ex)
else:
- cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list = [cpu for cpu in cpu_list[skip_cnt : skip_cnt + cpu_cnt]]
return cpu_list
@staticmethod
- def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
- smt_used=False):
+ def cpu_list_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", smt_used=False
+ ):
"""Return string of node related list of CPU numbers.
:param node: Node dictionary with cpuinfo.
@@ -203,15 +205,15 @@ class CpuUtils(object):
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, smt_used=smt_used
+ )
return sep.join(str(cpu) for cpu in cpu_list)
@staticmethod
- def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-",
- smt_used=False):
+ def cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-", smt_used=False
+ ):
"""Return string of node related range of CPU numbers, e.g. 0-4.
:param node: Node dictionary with cpuinfo.
@@ -230,18 +232,16 @@ class CpuUtils(object):
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, smt_used=smt_used
+ )
if smt_used:
cpu_list_len = len(cpu_list)
- cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
- cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep,
- cpu_list_0[-1],
- cpu_list_1[0], sep,
- cpu_list_1[-1])
+ cpu_list_0 = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
+ cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS :]
+ cpu_range = "{}{}{},{}{}{}".format(
+ cpu_list_0[0], sep, cpu_list_0[-1], cpu_list_1[0], sep, cpu_list_1[-1]
+ )
else:
cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1])
@@ -260,28 +260,30 @@ class CpuUtils(object):
cmd = "lscpu"
ret, stdout, stderr = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError("lscpu command failed on node {} {}."
- .format(node['host'], stderr))
+ raise RuntimeError(
+ "lscpu command failed on node {} {}.".format(node["host"], stderr)
+ )
cpuinfo = {}
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
- if line != '':
- linesplit = re.split(r':\s+', line)
+ if line != "":
+ linesplit = re.split(r":\s+", line)
cpuinfo[linesplit[0]] = linesplit[1]
cmd = "cat /proc/*/task/*/stat | awk '{print $1" "$2" "$39}'"
ret, stdout, stderr = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError("cat command failed on node {} {}."
- .format(node['host'], stderr))
+ raise RuntimeError(
+ "cat command failed on node {} {}.".format(node["host"], stderr)
+ )
vpp_processes = {}
- vpp_lines = re.findall(r'\w+\(vpp_\w+\)\w+', stdout)
+ vpp_lines = re.findall(r"\w+\(vpp_\w+\)\w+", stdout)
for line in vpp_lines:
- linesplit = re.split(r'\w+\(', line)[1].split(')')
+ linesplit = re.split(r"\w+\(", line)[1].split(")")
vpp_processes[linesplit[0]] = linesplit[1]
- cpuinfo['vpp_processes'] = vpp_processes
+ cpuinfo["vpp_processes"] = vpp_processes
return cpuinfo
diff --git a/extras/vpp_config/vpplib/QemuUtils.py b/extras/vpp_config/vpplib/QemuUtils.py
index 0b7e08b12d8..e1da7ae72bf 100644
--- a/extras/vpp_config/vpplib/QemuUtils.py
+++ b/extras/vpp_config/vpplib/QemuUtils.py
@@ -12,7 +12,7 @@
# limitations under the License.
"""QEMU utilities library."""
-from __future__ import absolute_import, division
+from __future__ import absolute_import, division
from time import time, sleep
import json
@@ -24,12 +24,13 @@ from vpplib.constants import Constants
class NodeType(object):
"""Defines node types used in topology dictionaries."""
+
# Device Under Test (this node has VPP running on it)
- DUT = 'DUT'
+ DUT = "DUT"
# Traffic Generator (this node has traffic generator on it)
- TG = 'TG'
+ TG = "TG"
# Virtual Machine (this node running on DUT node)
- VM = 'VM'
+ VM = "VM"
class QemuUtils(object):
@@ -39,43 +40,46 @@ class QemuUtils(object):
def __init__(self, qemu_id=1):
self._qemu_id = qemu_id
# Path to QEMU binary
- self._qemu_bin = '/usr/bin/qemu-system-x86_64'
+ self._qemu_bin = "/usr/bin/qemu-system-x86_64"
# QEMU Machine Protocol socket
- self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
+ self._qmp_sock = "/tmp/qmp{0}.sock".format(self._qemu_id)
# QEMU Guest Agent socket
- self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id)
+ self._qga_sock = "/tmp/qga{0}.sock".format(self._qemu_id)
# QEMU PID file
- self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id)
+ self._pid_file = "/tmp/qemu{0}.pid".format(self._qemu_id)
self._qemu_opt = {}
# Default 1 CPU.
- self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1'
+ self._qemu_opt["smp"] = "-smp 1,sockets=1,cores=1,threads=1"
# Daemonize the QEMU process after initialization. Default one
# management interface.
- self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \
- '-machine pc,accel=kvm,usb=off,mem-merge=off ' \
- '-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none'\
- .format(self._qemu_id)
- self._qemu_opt['ssh_fwd_port'] = 10021 + qemu_id
+ self._qemu_opt["options"] = (
+ "-cpu host -daemonize -enable-kvm "
+ "-machine pc,accel=kvm,usb=off,mem-merge=off "
+ "-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none".format(
+ self._qemu_id
+ )
+ )
+ self._qemu_opt["ssh_fwd_port"] = 10021 + qemu_id
# Default serial console port
- self._qemu_opt['serial_port'] = 4555 + qemu_id
+ self._qemu_opt["serial_port"] = 4555 + qemu_id
# Default 512MB virtual RAM
- self._qemu_opt['mem_size'] = 512
+ self._qemu_opt["mem_size"] = 512
# Default huge page mount point, required for Vhost-user interfaces.
- self._qemu_opt['huge_mnt'] = '/mnt/huge'
+ self._qemu_opt["huge_mnt"] = "/mnt/huge"
# Default do not allocate huge pages.
- self._qemu_opt['huge_allocate'] = False
+ self._qemu_opt["huge_allocate"] = False
# Default image for CSIT virl setup
- self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
+ self._qemu_opt["disk_image"] = "/var/lib/vm/vhost-nested.img"
# VM node info dict
self._vm_info = {
- 'type': NodeType.VM,
- 'port': self._qemu_opt['ssh_fwd_port'],
- 'username': 'cisco',
- 'password': 'cisco',
- 'interfaces': {},
+ "type": NodeType.VM,
+ "port": self._qemu_opt["ssh_fwd_port"],
+ "username": "cisco",
+ "password": "cisco",
+ "interfaces": {},
}
# Virtio queue count
- self._qemu_opt['queues'] = 1
+ self._qemu_opt["queues"] = 1
self._vhost_id = 0
self._ssh = None
self._node = None
@@ -101,9 +105,9 @@ class QemuUtils(object):
:type threads: int
:type sockets: int
"""
- self._qemu_opt['smp'] = \
- '-smp {},cores={},threads={},sockets={}'.format(
- cpus, cores, threads, sockets)
+ self._qemu_opt["smp"] = "-smp {},cores={},threads={},sockets={}".format(
+ cpus, cores, threads, sockets
+ )
def qemu_set_ssh_fwd_port(self, fwd_port):
"""Set host port for guest SSH forwarding.
@@ -111,8 +115,8 @@ class QemuUtils(object):
:param fwd_port: Port number on host for guest SSH forwarding.
:type fwd_port: int
"""
- self._qemu_opt['ssh_fwd_port'] = fwd_port
- self._vm_info['port'] = fwd_port
+ self._qemu_opt["ssh_fwd_port"] = fwd_port
+ self._vm_info["port"] = fwd_port
def qemu_set_serial_port(self, port):
"""Set serial console port.
@@ -120,7 +124,7 @@ class QemuUtils(object):
:param port: Serial console port.
:type port: int
"""
- self._qemu_opt['serial_port'] = port
+ self._qemu_opt["serial_port"] = port
def qemu_set_mem_size(self, mem_size):
"""Set virtual RAM size.
@@ -128,7 +132,7 @@ class QemuUtils(object):
:param mem_size: RAM size in Mega Bytes.
:type mem_size: int
"""
- self._qemu_opt['mem_size'] = int(mem_size)
+ self._qemu_opt["mem_size"] = int(mem_size)
def qemu_set_huge_mnt(self, huge_mnt):
"""Set hugefile mount point.
@@ -136,11 +140,11 @@ class QemuUtils(object):
:param huge_mnt: System hugefile mount point.
:type huge_mnt: int
"""
- self._qemu_opt['huge_mnt'] = huge_mnt
+ self._qemu_opt["huge_mnt"] = huge_mnt
def qemu_set_huge_allocate(self):
"""Set flag to allocate more huge pages if needed."""
- self._qemu_opt['huge_allocate'] = True
+ self._qemu_opt["huge_allocate"] = True
def qemu_set_disk_image(self, disk_image):
"""Set disk image.
@@ -148,7 +152,7 @@ class QemuUtils(object):
:param disk_image: Path of the disk image.
:type disk_image: str
"""
- self._qemu_opt['disk_image'] = disk_image
+ self._qemu_opt["disk_image"] = disk_image
def qemu_set_affinity(self, *host_cpus):
"""Set qemu affinity by getting thread PIDs via QMP and taskset to list
@@ -157,36 +161,41 @@ class QemuUtils(object):
:param host_cpus: List of CPU cores.
:type host_cpus: list
"""
- qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
+ qemu_cpus = self._qemu_qmp_exec("query-cpus")["return"]
if len(qemu_cpus) != len(host_cpus):
- logging.debug('Host CPU count {0}, Qemu Thread count {1}'.format(
- len(host_cpus), len(qemu_cpus)))
- raise ValueError('Host CPU count must match Qemu Thread count')
+ logging.debug(
+ "Host CPU count {0}, Qemu Thread count {1}".format(
+ len(host_cpus), len(qemu_cpus)
+ )
+ )
+ raise ValueError("Host CPU count must match Qemu Thread count")
for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
- cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id'])
+ cmd = "taskset -pc {0} {1}".format(host_cpu, qemu_cpu["thread_id"])
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- logging.debug('Set affinity failed {0}'.format(stderr))
- raise RuntimeError('Set affinity failed on {0}'.format(
- self._node['host']))
+ logging.debug("Set affinity failed {0}".format(stderr))
+ raise RuntimeError(
+ "Set affinity failed on {0}".format(self._node["host"])
+ )
def qemu_set_scheduler_policy(self):
"""Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
- processes.
+ processes.
- :raises RuntimeError: Set scheduler policy failed.
+ :raises RuntimeError: Set scheduler policy failed.
"""
- qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
+ qemu_cpus = self._qemu_qmp_exec("query-cpus")["return"]
for qemu_cpu in qemu_cpus:
- cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id'])
+ cmd = "chrt -r -p 1 {0}".format(qemu_cpu["thread_id"])
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- logging.debug('Set SCHED_RR failed {0}'.format(stderr))
- raise RuntimeError('Set SCHED_RR failed on {0}'.format(
- self._node['host']))
+ logging.debug("Set SCHED_RR failed {0}".format(stderr))
+ raise RuntimeError(
+ "Set SCHED_RR failed on {0}".format(self._node["host"])
+ )
def qemu_set_node(self, node):
"""Set node to run QEMU on.
@@ -195,7 +204,7 @@ class QemuUtils(object):
:type node: dict
"""
self._node = node
- self._vm_info['host'] = node['host']
+ self._vm_info["host"] = node["host"]
def qemu_add_vhost_user_if(self, socket, server=True, mac=None):
"""Add Vhost-user interface.
@@ -210,31 +219,33 @@ class QemuUtils(object):
"""
self._vhost_id += 1
# Create unix socket character device.
- chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id,
- socket)
+ chardev = " -chardev socket,id=char{0},path={1}".format(self._vhost_id, socket)
if server is True:
- chardev += ',server'
- self._qemu_opt['options'] += chardev
+ chardev += ",server"
+ self._qemu_opt["options"] += chardev
# Create Vhost-user network backend.
- netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}'
- .format(self._vhost_id, self._qemu_opt['queues']))
- self._qemu_opt['options'] += netdev
+ netdev = " -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}".format(
+ self._vhost_id, self._qemu_opt["queues"]
+ )
+ self._qemu_opt["options"] += netdev
# If MAC is not specified use auto-generated MAC address based on
# template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU
# with ID 1 is 52:54:00:00:01:01
if mac is None:
- mac = '52:54:00:00:{0:02x}:{1:02x}'.\
- format(self._qemu_id, self._vhost_id)
- extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\
- 'guest_tso6=off,guest_ecn=off,mrg_rxbuf=off'
+ mac = "52:54:00:00:{0:02x}:{1:02x}".format(self._qemu_id, self._vhost_id)
+ extend_options = (
+ "mq=on,csum=off,gso=off,guest_tso4=off,"
+ "guest_tso6=off,guest_ecn=off,mrg_rxbuf=off"
+ )
# Create Virtio network device.
- device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format(
- self._vhost_id, mac, extend_options)
- self._qemu_opt['options'] += device
+ device = " -device virtio-net-pci,netdev=vhost{0},mac={1},{2}".format(
+ self._vhost_id, mac, extend_options
+ )
+ self._qemu_opt["options"] += device
# Add interface MAC and socket to the node dict
- if_data = {'mac_address': mac, 'socket': socket}
- if_name = 'vhost{}'.format(self._vhost_id)
- self._vm_info['interfaces'][if_name] = if_data
+ if_data = {"mac_address": mac, "socket": socket}
+ if_name = "vhost{}".format(self._vhost_id)
+ self._vm_info["interfaces"][if_name] = if_data
# Add socket to the socket list
self._socks.append(socket)
@@ -250,41 +261,44 @@ class QemuUtils(object):
response will contain the "error" keyword instead of "return".
"""
# To enter command mode, the qmp_capabilities command must be issued.
- qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \
- '{ \\"execute\\": \\"' + cmd + \
- '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock
+ qmp_cmd = (
+ 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }'
+ '{ \\"execute\\": \\"'
+ + cmd
+ + '\\" }" | sudo -S socat - UNIX-CONNECT:'
+ + self._qmp_sock
+ )
(ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd)
if int(ret_code) != 0:
- logging.debug('QMP execute failed {0}'.format(stderr))
- raise RuntimeError('QMP execute "{0}"'
- ' failed on {1}'.format(
- cmd, self._node['host']))
+ logging.debug("QMP execute failed {0}".format(stderr))
+ raise RuntimeError(
+ 'QMP execute "{0}"' " failed on {1}".format(cmd, self._node["host"])
+ )
logging.debug(stdout)
# Skip capabilities negotiation messages.
out_list = stdout.splitlines()
if len(out_list) < 3:
- raise RuntimeError('Invalid QMP output on {0}'.format(
- self._node['host']))
+ raise RuntimeError("Invalid QMP output on {0}".format(self._node["host"]))
return json.loads(out_list[2])
def _qemu_qga_flush(self):
- """Flush the QGA parser state
- """
- qga_cmd = '(printf "\xFF"; sleep 1) | ' \
- 'sudo -S socat - UNIX-CONNECT:' + \
- self._qga_sock
+ """Flush the QGA parser state"""
+ qga_cmd = (
+ '(printf "\xFF"; sleep 1) | '
+ "sudo -S socat - UNIX-CONNECT:" + self._qga_sock
+ )
# TODO: probably need something else
(ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
if int(ret_code) != 0:
- logging.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}" '
- 'failed on {1}'.format(qga_cmd,
- self._node['host']))
+ logging.debug("QGA execute failed {0}".format(stderr))
+ raise RuntimeError(
+ 'QGA execute "{0}" ' "failed on {1}".format(qga_cmd, self._node["host"])
+ )
logging.debug(stdout)
if not stdout:
return {}
- return json.loads(stdout.split('\n', 1)[0])
+ return json.loads(stdout.split("\n", 1)[0])
def _qemu_qga_exec(self, cmd):
"""Execute QGA command.
@@ -294,20 +308,22 @@ class QemuUtils(object):
:param cmd: QGA command to execute.
:type cmd: str
"""
- qga_cmd = '(echo "{ \\"execute\\": \\"' + \
- cmd + \
- '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \
- self._qga_sock
+ qga_cmd = (
+ '(echo "{ \\"execute\\": \\"'
+ + cmd
+ + '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:'
+ + self._qga_sock
+ )
(ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
if int(ret_code) != 0:
- logging.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}"'
- ' failed on {1}'.format(
- cmd, self._node['host']))
+ logging.debug("QGA execute failed {0}".format(stderr))
+ raise RuntimeError(
+ 'QGA execute "{0}"' " failed on {1}".format(cmd, self._node["host"])
+ )
logging.debug(stdout)
if not stdout:
return {}
- return json.loads(stdout.split('\n', 1)[0])
+ return json.loads(stdout.split("\n", 1)[0])
def _wait_until_vm_boot(self, timeout=60):
"""Wait until QEMU VM is booted.
@@ -320,65 +336,69 @@ class QemuUtils(object):
start = time()
while True:
if time() - start > timeout:
- raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
+ raise RuntimeError(
+ "timeout, VM {0} not booted on {1}".format(
+ self._qemu_opt["disk_image"], self._node["host"]
+ )
+ )
out = None
try:
self._qemu_qga_flush()
- out = self._qemu_qga_exec('guest-ping')
+ out = self._qemu_qga_exec("guest-ping")
except ValueError:
- logging.debug(
- 'QGA guest-ping unexpected output {}'.format(out))
+ logging.debug("QGA guest-ping unexpected output {}".format(out))
# Empty output - VM not booted yet
if not out:
sleep(5)
# Non-error return - VM booted
- elif out.get('return') is not None:
+ elif out.get("return") is not None:
break
# Skip error and wait
- elif out.get('error') is not None:
+ elif out.get("error") is not None:
sleep(5)
else:
# If there is an unexpected output from QGA guest-info, try
# again until timeout.
- logging.debug(
- 'QGA guest-ping unexpected output {}'.format(out))
+ logging.debug("QGA guest-ping unexpected output {}".format(out))
logging.debug(
- 'VM {0} booted on {1}'.format(self._qemu_opt['disk_image'],
- self._node['host']))
+ "VM {0} booted on {1}".format(
+ self._qemu_opt["disk_image"], self._node["host"]
+ )
+ )
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
# Send guest-network-get-interfaces command via QGA, output example:
# {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"},
# {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}
- out = self._qemu_qga_exec('guest-network-get-interfaces')
- interfaces = out.get('return')
+ out = self._qemu_qga_exec("guest-network-get-interfaces")
+ interfaces = out.get("return")
mac_name = {}
if not interfaces:
raise RuntimeError(
- 'Get VM {0} interface list failed on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
+ "Get VM {0} interface list failed on {1}".format(
+ self._qemu_opt["disk_image"], self._node["host"]
+ )
+ )
# Create MAC-name dict
for interface in interfaces:
- if 'hardware-address' not in interface:
+ if "hardware-address" not in interface:
continue
- mac_name[interface['hardware-address']] = interface['name']
+ mac_name[interface["hardware-address"]] = interface["name"]
# Match interface by MAC and save interface name
- for interface in self._vm_info['interfaces'].values():
- mac = interface.get('mac_address')
+ for interface in self._vm_info["interfaces"].values():
+ mac = interface.get("mac_address")
if_name = mac_name.get(mac)
if if_name is None:
- logging.debug(
- 'Interface name for MAC {} not found'.format(mac))
+ logging.debug("Interface name for MAC {} not found".format(mac))
else:
- interface['name'] = if_name
+ interface["name"] = if_name
def _huge_page_check(self, allocate=False):
"""Huge page check."""
- huge_mnt = self._qemu_opt.get('huge_mnt')
- mem_size = self._qemu_opt.get('mem_size')
+ huge_mnt = self._qemu_opt.get("huge_mnt")
+ mem_size = self._qemu_opt.get("mem_size")
# Get huge pages information
huge_size = self._get_huge_page_size()
@@ -391,55 +411,55 @@ class QemuUtils(object):
if allocate:
mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
huge_to_allocate = ((mem_needed // huge_size) * 2) + huge_total
- max_map_count = huge_to_allocate*4
+ max_map_count = huge_to_allocate * 4
# Increase maximum number of memory map areas a
# process may have
- cmd = \
- 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
- max_map_count)
+ cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
+ max_map_count
+ )
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
# Increase hugepage count
- cmd = \
- 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
- huge_to_allocate)
+ cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
+ huge_to_allocate
+ )
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- logging.debug(
- 'Mount huge pages failed {0}'.format(stderr))
+ logging.debug("Mount huge pages failed {0}".format(stderr))
raise RuntimeError(
- 'Mount huge pages failed on {0}'.format(
- self._node['host']))
+ "Mount huge pages failed on {0}".format(self._node["host"])
+ )
# If we do not want to allocate dynamicaly end with error
else:
raise RuntimeError(
- 'Not enough free huge pages: {0}, '
- '{1} MB'.format(huge_free, huge_free * huge_size)
+ "Not enough free huge pages: {0}, "
+ "{1} MB".format(huge_free, huge_free * huge_size)
)
# Check if huge pages mount point exist
has_huge_mnt = False
- (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
+ (_, output, _) = self._ssh.exec_command("cat /proc/mounts")
for line in output.splitlines():
# Try to find something like:
# none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
mount = line.split()
- if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
+ if mount[2] == "hugetlbfs" and mount[1] == huge_mnt:
has_huge_mnt = True
break
# If huge page mount point not exist create one
if not has_huge_mnt:
- cmd = 'mkdir -p {0}'.format(huge_mnt)
+ cmd = "mkdir -p {0}".format(huge_mnt)
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- logging.debug('Create mount dir failed: {0}'.format(stderr))
- raise RuntimeError('Create mount dir failed on {0}'.format(
- self._node['host']))
- cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
- huge_mnt)
+ logging.debug("Create mount dir failed: {0}".format(stderr))
+ raise RuntimeError(
+ "Create mount dir failed on {0}".format(self._node["host"])
+ )
+ cmd = "mount -t hugetlbfs -o pagesize=2048k none {0}".format(huge_mnt)
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- logging.debug('Mount huge pages failed {0}'.format(stderr))
- raise RuntimeError('Mount huge pages failed on {0}'.format(
- self._node['host']))
+ logging.debug("Mount huge pages failed {0}".format(stderr))
+ raise RuntimeError(
+ "Mount huge pages failed on {0}".format(self._node["host"])
+ )
def _get_huge_page_size(self):
"""Get default size of huge pages in system.
@@ -456,11 +476,11 @@ class QemuUtils(object):
try:
huge_size = int(out)
except ValueError:
- logging.debug('Reading huge page size information failed')
+ logging.debug("Reading huge page size information failed")
else:
break
else:
- raise RuntimeError('Getting huge page size information failed.')
+ raise RuntimeError("Getting huge page size information failed.")
return huge_size
def _get_huge_page_free(self, huge_size):
@@ -474,20 +494,21 @@ class QemuUtils(object):
"""
# TODO: add numa aware option
# TODO: remove to dedicated library
- cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
- 'free_hugepages'.format(huge_size)
+ cmd_huge_free = (
+ "cat /sys/kernel/mm/hugepages/hugepages-{0}kB/"
+ "free_hugepages".format(huge_size)
+ )
for _ in range(3):
(ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free)
if ret == 0:
try:
huge_free = int(out)
except ValueError:
- logging.debug(
- 'Reading free huge pages information failed')
+ logging.debug("Reading free huge pages information failed")
else:
break
else:
- raise RuntimeError('Getting free huge pages information failed.')
+ raise RuntimeError("Getting free huge pages information failed.")
return huge_free
def _get_huge_page_total(self, huge_size):
@@ -501,20 +522,21 @@ class QemuUtils(object):
"""
# TODO: add numa aware option
# TODO: remove to dedicated library
- cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
- 'nr_hugepages'.format(huge_size)
+ cmd_huge_total = (
+ "cat /sys/kernel/mm/hugepages/hugepages-{0}kB/"
+ "nr_hugepages".format(huge_size)
+ )
for _ in range(3):
(ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total)
if ret == 0:
try:
huge_total = int(out)
except ValueError:
- logging.debug(
- 'Reading total huge pages information failed')
+ logging.debug("Reading total huge pages information failed")
else:
break
else:
- raise RuntimeError('Getting total huge pages information failed.')
+ raise RuntimeError("Getting total huge pages information failed.")
return huge_total
def qemu_start(self):
@@ -526,45 +548,63 @@ class QemuUtils(object):
.. warning:: Starts only one VM on the node.
"""
# SSH forwarding
- ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format(
- self._qemu_opt.get('ssh_fwd_port'))
+ ssh_fwd = "-net user,hostfwd=tcp::{0}-:22".format(
+ self._qemu_opt.get("ssh_fwd_port")
+ )
# Memory and huge pages
- mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \
- 'share=on -m {0} -numa node,memdev=mem'.format(
- self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt'))
+ mem = (
+ "-object memory-backend-file,id=mem,size={0}M,mem-path={1},"
+ "share=on -m {0} -numa node,memdev=mem".format(
+ self._qemu_opt.get("mem_size"), self._qemu_opt.get("huge_mnt")
+ )
+ )
# By default check only if hugepages are available.
# If 'huge_allocate' is set to true try to allocate as well.
- self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate'))
+ self._huge_page_check(allocate=self._qemu_opt.get("huge_allocate"))
# Disk option
- drive = '-drive file={0},format=raw,cache=none,if=virtio'.format(
- self._qemu_opt.get('disk_image'))
+ drive = "-drive file={0},format=raw,cache=none,if=virtio".format(
+ self._qemu_opt.get("disk_image")
+ )
# Setup QMP via unix socket
- qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock)
+ qmp = "-qmp unix:{0},server,nowait".format(self._qmp_sock)
# Setup serial console
- serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \
- 'nowait -device isa-serial,chardev=gnc0'.format(
- self._qemu_opt.get('serial_port'))
+ serial = (
+ "-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,"
+ "nowait -device isa-serial,chardev=gnc0".format(
+ self._qemu_opt.get("serial_port")
+ )
+ )
# Setup QGA via chardev (unix socket) and isa-serial channel
- qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \
- '-device isa-serial,chardev=qga0'.format(self._qga_sock)
+ qga = (
+ "-chardev socket,path={0},server,nowait,id=qga0 "
+ "-device isa-serial,chardev=qga0".format(self._qga_sock)
+ )
# Graphic setup
- graphic = '-monitor none -display none -vga none'
+ graphic = "-monitor none -display none -vga none"
# PID file
- pid = '-pidfile {}'.format(self._pid_file)
+ pid = "-pidfile {}".format(self._pid_file)
# Run QEMU
- cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'.format(
- self._qemu_bin, self._qemu_opt.get('smp'), mem, ssh_fwd,
- self._qemu_opt.get('options'),
- drive, qmp, serial, qga, graphic, pid)
+ cmd = "{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}".format(
+ self._qemu_bin,
+ self._qemu_opt.get("smp"),
+ mem,
+ ssh_fwd,
+ self._qemu_opt.get("options"),
+ drive,
+ qmp,
+ serial,
+ qga,
+ graphic,
+ pid,
+ )
(ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300)
if int(ret_code) != 0:
- logging.debug('QEMU start failed {0}'.format(stderr))
- raise RuntimeError('QEMU start failed on {0}'.format(
- self._node['host']))
- logging.debug('QEMU running')
+ logging.debug("QEMU start failed {0}".format(stderr))
+ raise RuntimeError("QEMU start failed on {0}".format(self._node["host"]))
+ logging.debug("QEMU running")
# Wait until VM boot
try:
self._wait_until_vm_boot()
@@ -579,40 +619,43 @@ class QemuUtils(object):
def qemu_quit(self):
"""Quit the QEMU emulator."""
- out = self._qemu_qmp_exec('quit')
- err = out.get('error')
+ out = self._qemu_qmp_exec("quit")
+ err = out.get("error")
if err is not None:
- raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format(
- self._node['host'], json.dumps(err)))
+ raise RuntimeError(
+ "QEMU quit failed on {0}, error: {1}".format(
+ self._node["host"], json.dumps(err)
+ )
+ )
def qemu_system_powerdown(self):
"""Power down the system (if supported)."""
- out = self._qemu_qmp_exec('system_powerdown')
- err = out.get('error')
+ out = self._qemu_qmp_exec("system_powerdown")
+ err = out.get("error")
if err is not None:
raise RuntimeError(
- 'QEMU system powerdown failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err))
+ "QEMU system powerdown failed on {0}, "
+ "error: {1}".format(self._node["host"], json.dumps(err))
)
def qemu_system_reset(self):
"""Reset the system."""
- out = self._qemu_qmp_exec('system_reset')
- err = out.get('error')
+ out = self._qemu_qmp_exec("system_reset")
+ err = out.get("error")
if err is not None:
raise RuntimeError(
- 'QEMU system reset failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
+ "QEMU system reset failed on {0}, "
+ "error: {1}".format(self._node["host"], json.dumps(err))
+ )
def qemu_kill(self):
"""Kill qemu process."""
# Note: in QEMU start phase there are 3 QEMU processes because we
# daemonize QEMU
- self._ssh.exec_command_sudo('chmod +r {}'.format(self._pid_file))
- self._ssh.exec_command_sudo('kill -SIGKILL $(cat {})'
- .format(self._pid_file))
+ self._ssh.exec_command_sudo("chmod +r {}".format(self._pid_file))
+ self._ssh.exec_command_sudo("kill -SIGKILL $(cat {})".format(self._pid_file))
# Delete PID file
- cmd = 'rm -f {}'.format(self._pid_file)
+ cmd = "rm -f {}".format(self._pid_file)
self._ssh.exec_command_sudo(cmd)
def qemu_kill_all(self, node=None):
@@ -623,16 +666,16 @@ class QemuUtils(object):
"""
if node:
self.qemu_set_node(node)
- self._ssh.exec_command_sudo('pkill -SIGKILL qemu')
+ self._ssh.exec_command_sudo("pkill -SIGKILL qemu")
def qemu_clear_socks(self):
"""Remove all sockets created by QEMU."""
# If serial console port still open kill process
- cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port'))
+ cmd = "fuser -k {}/tcp".format(self._qemu_opt.get("serial_port"))
self._ssh.exec_command_sudo(cmd)
# Delete all created sockets
for sock in self._socks:
- cmd = 'rm -f {}'.format(sock)
+ cmd = "rm -f {}".format(sock)
self._ssh.exec_command_sudo(cmd)
def qemu_system_status(self):
@@ -659,15 +702,16 @@ class QemuUtils(object):
:return: VM status.
:rtype: str
"""
- out = self._qemu_qmp_exec('query-status')
- ret = out.get('return')
+ out = self._qemu_qmp_exec("query-status")
+ ret = out.get("return")
if ret is not None:
- return ret.get('status')
+ return ret.get("status")
else:
- err = out.get('error')
+ err = out.get("error")
raise RuntimeError(
- 'QEMU query-status failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
+ "QEMU query-status failed on {0}, "
+ "error: {1}".format(self._node["host"], json.dumps(err))
+ )
@staticmethod
def build_qemu(node, force_install=False, apply_patch=False):
@@ -682,17 +726,23 @@ class QemuUtils(object):
:raises: RuntimeError if building QEMU failed.
"""
- directory = ' --directory={0}'.format(Constants.QEMU_INSTALL_DIR)
- version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION)
- force = ' --force' if force_install else ''
- patch = ' --patch' if apply_patch else ''
-
- (ret_code, stdout, stderr) = VPPUtil. \
- exec_command(
- "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}'".
- format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH,
- version, directory, force, patch), 1000)
+ directory = " --directory={0}".format(Constants.QEMU_INSTALL_DIR)
+ version = " --version={0}".format(Constants.QEMU_INSTALL_VERSION)
+ force = " --force" if force_install else ""
+ patch = " --patch" if apply_patch else ""
+
+ (ret_code, stdout, stderr) = VPPUtil.exec_command(
+ "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}'".format(
+ Constants.REMOTE_FW_DIR,
+ Constants.RESOURCES_LIB_SH,
+ version,
+ directory,
+ force,
+ patch,
+ ),
+ 1000,
+ )
if int(ret_code) != 0:
- logging.debug('QEMU build failed {0}'.format(stdout + stderr))
- raise RuntimeError('QEMU build failed on {0}'.format(node['host']))
+ logging.debug("QEMU build failed {0}".format(stdout + stderr))
+ raise RuntimeError("QEMU build failed on {0}".format(node["host"]))
diff --git a/extras/vpp_config/vpplib/VPPUtil.py b/extras/vpp_config/vpplib/VPPUtil.py
index 97747a31ca5..711f1032d96 100644
--- a/extras/vpp_config/vpplib/VPPUtil.py
+++ b/extras/vpp_config/vpplib/VPPUtil.py
@@ -23,15 +23,53 @@ from collections import Counter
import distro
-ubuntu_pkgs = {'release': ['vpp', 'vpp-plugin-core', 'vpp-plugin-dpdk', 'vpp-api-python', 'python3-vpp-api',
- 'vpp-dbg', 'vpp-dev', 'vpp-ext-deps'],
- 'master': ['vpp', 'vpp-plugin-core', 'vpp-plugin-dpdk', 'vpp-api-python', 'python3-vpp-api',
- 'vpp-dbg', 'vpp-dev', 'vpp-ext-deps']}
-
-centos_pkgs = {'release': ['vpp', 'vpp-selinux-policy', 'vpp-plugins', 'vpp-api-lua',
- 'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0', 'vpp-ext-deps'],
- 'master': ['vpp', 'vpp-selinux-policy', 'vpp-plugins', 'vpp-api-lua',
- 'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0', 'vpp-ext-deps']}
+ubuntu_pkgs = {
+ "release": [
+ "vpp",
+ "vpp-plugin-core",
+ "vpp-plugin-dpdk",
+ "vpp-api-python",
+ "python3-vpp-api",
+ "vpp-dbg",
+ "vpp-dev",
+ "vpp-ext-deps",
+ ],
+ "master": [
+ "vpp",
+ "vpp-plugin-core",
+ "vpp-plugin-dpdk",
+ "vpp-api-python",
+ "python3-vpp-api",
+ "vpp-dbg",
+ "vpp-dev",
+ "vpp-ext-deps",
+ ],
+}
+
+centos_pkgs = {
+ "release": [
+ "vpp",
+ "vpp-selinux-policy",
+ "vpp-plugins",
+ "vpp-api-lua",
+ "vpp-api-python",
+ "vpp-debuginfo",
+ "vpp-devel",
+ "libvpp0",
+ "vpp-ext-deps",
+ ],
+ "master": [
+ "vpp",
+ "vpp-selinux-policy",
+ "vpp-plugins",
+ "vpp-api-lua",
+ "vpp-api-python",
+ "vpp-debuginfo",
+ "vpp-devel",
+ "libvpp0",
+ "vpp-ext-deps",
+ ],
+}
class VPPUtil(object):
@@ -50,19 +88,23 @@ class VPPUtil(object):
"""
logging.info(" Local Command: {}".format(cmd))
- out = ''
- err = ''
- prc = subprocess.Popen(cmd, shell=True, bufsize=1,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ out = ""
+ err = ""
+ prc = subprocess.Popen(
+ cmd,
+ shell=True,
+ bufsize=1,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
with prc.stdout:
lines = prc.stdout.readlines()
for line in lines:
if type(line) != str:
line = line.decode()
- logging.info(" {}".format(line.strip('\n')))
+ logging.info(" {}".format(line.strip("\n")))
out += line
with prc.stderr:
@@ -70,7 +112,7 @@ class VPPUtil(object):
for line in lines:
if type(line) != str:
line = line.decode()
- logging.warning(" {}".format(line.strip('\n')))
+ logging.warning(" {}".format(line.strip("\n")))
err += line
ret = prc.wait()
@@ -86,17 +128,17 @@ class VPPUtil(object):
"""
# Does a copy of the file exist, if not create one
- ofile = filename + '.orig'
- (ret, stdout, stderr) = self.exec_command('ls {}'.format(ofile))
+ ofile = filename + ".orig"
+ (ret, stdout, stderr) = self.exec_command("ls {}".format(ofile))
if ret != 0:
logging.debug(stderr)
- if stdout.strip('\n') != ofile:
- cmd = 'sudo cp {} {}'.format(filename, ofile)
+ if stdout.strip("\n") != ofile:
+ cmd = "sudo cp {} {}".format(filename, ofile)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
logging.debug(stderr)
- def _install_vpp_ubuntu(self, node, branch, ubuntu_version='xenial'):
+ def _install_vpp_ubuntu(self, node, branch, ubuntu_version="xenial"):
"""
Install the VPP packages
@@ -109,49 +151,49 @@ class VPPUtil(object):
"""
# Modify the sources list
- sfile = '/etc/apt/sources.list.d/99fd.io.list'
+ sfile = "/etc/apt/sources.list.d/99fd.io.list"
# Backup the sources list
self._autoconfig_backup_file(sfile)
- reps = 'deb [trusted=yes] https://packagecloud.io/fdio/'
- reps += '{}/ubuntu {} main\n'.format(branch, ubuntu_version)
+ reps = "deb [trusted=yes] https://packagecloud.io/fdio/"
+ reps += "{}/ubuntu {} main\n".format(branch, ubuntu_version)
- with open(sfile, 'w') as sfd:
+ with open(sfile, "w") as sfd:
sfd.write(reps)
sfd.close()
# Add the key
- key = requests.get(
- 'https://packagecloud.io/fdio/{}/gpgkey'.format(branch))
+ key = requests.get("https://packagecloud.io/fdio/{}/gpgkey".format(branch))
cmd = 'echo "{}" | apt-key add -'.format(key.content.decode(key.encoding))
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
# Install the package
- cmd = 'apt-get -y update'
+ cmd = "apt-get -y update"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} apt-get update failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ raise RuntimeError(
+ "{} apt-get update failed on node {} {}".format(
+ cmd, node["host"], stderr
+ )
+ )
# Get the package list
- pkgstr = ''
+ pkgstr = ""
for ps in ubuntu_pkgs[branch]:
- pkgstr += ps + ' '
+ pkgstr += ps + " "
- cmd = 'apt-get -y install {}'.format(pkgstr)
+ cmd = "apt-get -y install {}".format(pkgstr)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.format(
- cmd, node['host'], stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
def _install_vpp_centos(self, node, branch):
"""
@@ -164,95 +206,82 @@ class VPPUtil(object):
"""
# Be sure the correct system packages are installed
- cmd = 'yum -y update'
+ cmd = "yum -y update"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ logging.debug("{} failed on node {} {}".format(cmd, node["host"], stderr))
- cmd = 'yum -y install pygpgme yum-utils'
+ cmd = "yum -y install pygpgme yum-utils"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ logging.debug("{} failed on node {} {}".format(cmd, node["host"], stderr))
# Modify the sources list
- sfile = '/etc/yum.repos.d/fdio-release.repo'
+ sfile = "/etc/yum.repos.d/fdio-release.repo"
# Backup the sources list
self._autoconfig_backup_file(sfile)
# Remove the current file
- cmd = 'rm {}'.format(sfile)
+ cmd = "rm {}".format(sfile)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ logging.debug("{} failed on node {} {}".format(cmd, node["host"], stderr))
# Get the file contents
- reps = '\n'.join([
- '[fdio_{}]'.format(branch),
- 'name=fdio_{}'.format(branch),
- 'baseurl=https://packagecloud.io/fdio/{}/el/7/$basearch'.format(
- branch),
- 'repo_gpgcheck=1',
- 'gpgcheck=0',
- 'enabled=1',
- 'gpgkey=https://packagecloud.io/fdio/{}/gpgkey'.format(branch),
- 'sslverify=1',
- 'sslcacert=/etc/pki/tls/certs/ca-bundle.crt',
- 'metadata_expire=300\n',
- '[fdio_{}-source]'.format(branch),
- 'name=fdio_release-{}'.format(branch),
- 'baseurl=https://packagecloud.io/fdio/{}/el/7/SRPMS'.format(
- branch),
- 'repo_gpgcheck=1',
- 'gpgcheck=0',
- 'enabled=1',
- 'gpgkey=https://packagecloud.io/fdio/{}/gpgkey'.format(branch),
- 'sslverify =1',
- 'sslcacert=/etc/pki/tls/certs/ca-bundle.crt',
- 'metadata_expire=300\n'
- ])
- with open(sfile, 'w') as sfd:
+ reps = "\n".join(
+ [
+ "[fdio_{}]".format(branch),
+ "name=fdio_{}".format(branch),
+ "baseurl=https://packagecloud.io/fdio/{}/el/7/$basearch".format(branch),
+ "repo_gpgcheck=1",
+ "gpgcheck=0",
+ "enabled=1",
+ "gpgkey=https://packagecloud.io/fdio/{}/gpgkey".format(branch),
+ "sslverify=1",
+ "sslcacert=/etc/pki/tls/certs/ca-bundle.crt",
+ "metadata_expire=300\n",
+ "[fdio_{}-source]".format(branch),
+ "name=fdio_release-{}".format(branch),
+ "baseurl=https://packagecloud.io/fdio/{}/el/7/SRPMS".format(branch),
+ "repo_gpgcheck=1",
+ "gpgcheck=0",
+ "enabled=1",
+ "gpgkey=https://packagecloud.io/fdio/{}/gpgkey".format(branch),
+ "sslverify =1",
+ "sslcacert=/etc/pki/tls/certs/ca-bundle.crt",
+ "metadata_expire=300\n",
+ ]
+ )
+ with open(sfile, "w") as sfd:
sfd.write(reps)
sfd.close()
# Update the fdio repo
- cmd = 'yum clean all'
+ cmd = "yum clean all"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ logging.debug("{} failed on node {} {}".format(cmd, node["host"], stderr))
- cmd = "yum -q makecache -y --disablerepo='*' " \
- "--enablerepo='fdio_{}'".format(branch)
+ cmd = "yum -q makecache -y --disablerepo='*' " "--enablerepo='fdio_{}'".format(
+ branch
+ )
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ logging.debug("{} failed on node {} {}".format(cmd, node["host"], stderr))
# Get the package list
- pkgstr = ''
+ pkgstr = ""
for ps in centos_pkgs[branch]:
- pkgstr += ps + ' '
+ pkgstr += ps + " "
- cmd = 'yum -y install {}'.format(pkgstr)
+ cmd = "yum -y install {}".format(pkgstr)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.format(
- cmd, node['host'], stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
def install_vpp(self, node, branch):
"""
@@ -266,10 +295,10 @@ class VPPUtil(object):
"""
distro = self.get_linux_distro()
logging.info(" {}".format(distro[0]))
- if distro[0] == 'Ubuntu':
+ if distro[0] == "Ubuntu":
logging.info("Install Ubuntu")
self._install_vpp_ubuntu(node, branch, ubuntu_version=distro[2])
- elif distro[0] == 'CentOS Linux':
+ elif distro[0] == "CentOS Linux":
logging.info("Install CentOS")
self._install_vpp_centos(node, branch)
else:
@@ -286,17 +315,18 @@ class VPPUtil(object):
"""
# get the package list
- pkgstr = ''
+ pkgstr = ""
pkgs = self.get_installed_vpp_pkgs()
for pkg in pkgs:
- pkgname = pkg['name']
- pkgstr += pkgname + ' '
+ pkgname = pkg["name"]
+ pkgstr += pkgname + " "
- cmd = 'dpkg --purge {}'.format(pkgstr)
+ cmd = "dpkg --purge {}".format(pkgstr)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.format(
- cmd, node['host'], stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
def _uninstall_vpp_centos(self, node):
"""
@@ -306,18 +336,19 @@ class VPPUtil(object):
:type node: dict
"""
- pkgstr = ''
+ pkgstr = ""
pkgs = self.get_installed_vpp_pkgs()
for pkg in pkgs:
- pkgname = pkg['name']
- pkgstr += pkgname + ' '
+ pkgname = pkg["name"]
+ pkgstr += pkgname + " "
logging.info("Uninstalling {}".format(pkgstr))
- cmd = 'yum -y remove {}'.format(pkgstr)
+ cmd = "yum -y remove {}".format(pkgstr)
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.format(
- cmd, node['host'], stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
def uninstall_vpp(self, node):
"""
@@ -330,10 +361,10 @@ class VPPUtil(object):
# First stop VPP
self.stop(node)
distro = self.get_linux_distro()
- if distro[0] == 'Ubuntu':
+ if distro[0] == "Ubuntu":
logging.info("Uninstall Ubuntu")
self._uninstall_vpp_ubuntu(node)
- elif distro[0] == 'CentOS Linux':
+ elif distro[0] == "CentOS Linux":
logging.info("Uninstall CentOS")
self._uninstall_vpp_centos(node)
else:
@@ -352,21 +383,20 @@ class VPPUtil(object):
:type additional_cmds: tuple
"""
def_setting_tb_displayed = {
- 'IPv6 FIB': 'ip6 fib',
- 'IPv4 FIB': 'ip fib',
- 'Interface IP': 'int addr',
- 'Interfaces': 'int',
- 'ARP': 'ip arp',
- 'Errors': 'err'
+ "IPv6 FIB": "ip6 fib",
+ "IPv4 FIB": "ip fib",
+ "Interface IP": "int addr",
+ "Interfaces": "int",
+ "ARP": "ip arp",
+ "Errors": "err",
}
if additional_cmds:
for cmd in additional_cmds:
- def_setting_tb_displayed['Custom Setting: {}'.format(cmd)] \
- = cmd
+ def_setting_tb_displayed["Custom Setting: {}".format(cmd)] = cmd
for _, value in def_setting_tb_displayed.items():
- self.exec_command('vppctl sh {}'.format(value))
+ self.exec_command("vppctl sh {}".format(value))
@staticmethod
def get_vms(node):
@@ -397,32 +427,32 @@ class VPPUtil(object):
:rtype: dictionary
"""
interfaces = {}
- cmd = 'vppctl show int addr'
+ cmd = "vppctl show int addr"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
return interfaces
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
if len(lines[0]) != 0:
- if lines[0].split(' ')[0] == 'FileNotFoundError':
+ if lines[0].split(" ")[0] == "FileNotFoundError":
return interfaces
- name = ''
+ name = ""
for line in lines:
if len(line) == 0:
continue
# If the first character is not whitespace
# create a new interface
- if len(re.findall(r'\s', line[0])) == 0:
+ if len(re.findall(r"\s", line[0])) == 0:
spl = line.split()
name = spl[0]
- if name == 'local0':
+ if name == "local0":
continue
interfaces[name] = {}
- interfaces[name]['state'] = spl[1].lstrip('(').rstrip('):\r')
+ interfaces[name]["state"] = spl[1].lstrip("(").rstrip("):\r")
else:
- interfaces[name]['address'] = line.lstrip(' ').rstrip('\r')
+ interfaces[name]["address"] = line.lstrip(" ").rstrip("\r")
return interfaces
@@ -439,14 +469,14 @@ class VPPUtil(object):
"""
interfaces = {}
- cmd = 'vppctl show hard'
+ cmd = "vppctl show hard"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
return interfaces
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
if len(lines[0]) != 0:
- if lines[0].split(' ')[0] == 'FileNotFoundError':
+ if lines[0].split(" ")[0] == "FileNotFoundError":
return interfaces
for line in lines:
@@ -455,46 +485,46 @@ class VPPUtil(object):
# If the first character is not whitespace
# create a new interface
- if len(re.findall(r'\s', line[0])) == 0:
+ if len(re.findall(r"\s", line[0])) == 0:
spl = line.split()
name = spl[0]
interfaces[name] = {}
- interfaces[name]['index'] = spl[1]
- interfaces[name]['state'] = spl[2]
+ interfaces[name]["index"] = spl[1]
+ interfaces[name]["state"] = spl[2]
# Ethernet address
- rfall = re.findall(r'Ethernet address', line)
+ rfall = re.findall(r"Ethernet address", line)
if rfall:
spl = line.split()
- interfaces[name]['mac'] = spl[2]
+ interfaces[name]["mac"] = spl[2]
# Carrier
- rfall = re.findall(r'carrier', line)
+ rfall = re.findall(r"carrier", line)
if rfall:
- spl = line.split('carrier ')
- interfaces[name]['carrier'] = spl[1]
+ spl = line.split("carrier ")
+ interfaces[name]["carrier"] = spl[1]
# Socket
- spl = ''
- rfall = re.findall(r'numa \d+', line)
+ spl = ""
+ rfall = re.findall(r"numa \d+", line)
if rfall:
spl = rfall[0].split()
- interfaces[name]['numa'] = rfall[0].split()[1]
+ interfaces[name]["numa"] = rfall[0].split()[1]
# Queues and Descriptors
- rfall = re.findall(r'rx\: queues \d+', line)
+ rfall = re.findall(r"rx\: queues \d+", line)
if rfall:
- interfaces[name]['rx queues'] = rfall[0].split()[2]
- rdesc = re.findall(r'desc \d+', line)
+ interfaces[name]["rx queues"] = rfall[0].split()[2]
+ rdesc = re.findall(r"desc \d+", line)
if rdesc:
- interfaces[name]['rx descs'] = rdesc[0].split()[1]
+ interfaces[name]["rx descs"] = rdesc[0].split()[1]
- rfall = re.findall(r'tx\: queues \d+', line)
+ rfall = re.findall(r"tx\: queues \d+", line)
if rfall:
- interfaces[name]['tx queues'] = rfall[0].split()[2]
- rdesc = re.findall(r'desc \d+', line)
+ interfaces[name]["tx queues"] = rfall[0].split()[2]
+ rdesc = re.findall(r"desc \d+", line)
if rdesc:
- interfaces[name]['tx descs'] = rdesc[0].split()[1]
+ interfaces[name]["tx descs"] = rdesc[0].split()[1]
return interfaces
@@ -508,17 +538,17 @@ class VPPUtil(object):
"""
pkgs = []
- cmd = 'dpkg -l | grep vpp'
+ cmd = "dpkg -l | grep vpp"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
return pkgs
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
items = line.split()
if len(items) < 2:
continue
- pkg = {'name': items[1], 'version': items[2]}
+ pkg = {"name": items[1], "version": items[2]}
pkgs.append(pkg)
return pkgs
@@ -533,21 +563,21 @@ class VPPUtil(object):
"""
pkgs = []
- cmd = 'rpm -qa | grep vpp'
+ cmd = "rpm -qa | grep vpp"
(ret, stdout, stderr) = self.exec_command(cmd)
if ret != 0:
return pkgs
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
if len(line) == 0:
continue
items = line.split()
if len(items) < 2:
- pkg = {'name': items[0]}
+ pkg = {"name": items[0]}
else:
- pkg = {'name': items[1], 'version': items[2]}
+ pkg = {"name": items[1], "version": items[2]}
pkgs.append(pkg)
@@ -563,9 +593,9 @@ class VPPUtil(object):
"""
distro = self.get_linux_distro()
- if distro[0] == 'Ubuntu':
+ if distro[0] == "Ubuntu":
pkgs = self._get_installed_vpp_pkgs_ubuntu()
- elif distro[0] == 'CentOS Linux':
+ elif distro[0] == "CentOS Linux":
pkgs = self._get_installed_vpp_pkgs_centos()
else:
pkgs = self._get_installed_vpp_pkgs_centos()
@@ -594,7 +624,7 @@ class VPPUtil(object):
numa_list = []
for if_key in iface_keys:
try:
- numa_list.append(node['interfaces'][if_key].get('numa_node'))
+ numa_list.append(node["interfaces"][if_key].get("numa_node"))
except KeyError:
pass
@@ -617,12 +647,12 @@ class VPPUtil(object):
:type node: dict
"""
- cmd = 'service vpp restart'
+ cmd = "service vpp restart"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
@staticmethod
def start(node):
@@ -634,12 +664,12 @@ class VPPUtil(object):
:type node: dict
"""
- cmd = 'service vpp start'
+ cmd = "service vpp start"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
@staticmethod
def stop(node):
@@ -651,12 +681,12 @@ class VPPUtil(object):
:type node: dict
"""
- cmd = 'service vpp stop'
+ cmd = "service vpp stop"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- logging.debug('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
+ logging.debug(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
# noinspection RegExpRedundantEscape
@staticmethod
@@ -676,11 +706,11 @@ class VPPUtil(object):
if len(pkgs) == 0:
return "Not Installed", errors
- cmd = 'service vpp status'
+ cmd = "service vpp status"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
# Get the active status
- state = re.findall(r'Active:[\w (\)]+', stdout)[0].split(' ')
+ state = re.findall(r"Active:[\w (\)]+", stdout)[0].split(" ")
if len(state) > 2:
statestr = "{} {}".format(state[1], state[2])
else:
@@ -707,13 +737,10 @@ class VPPUtil(object):
"""
dist = distro.linux_distribution()
- if dist[0] == 'Ubuntu' or \
- dist[0] == 'CentOS Linux' or \
- dist[:7] == 'Red Hat':
+ if dist[0] == "Ubuntu" or dist[0] == "CentOS Linux" or dist[:7] == "Red Hat":
return dist
else:
- raise RuntimeError(
- 'Linux Distribution {} is not supported'.format(dist[0]))
+ raise RuntimeError("Linux Distribution {} is not supported".format(dist[0]))
@staticmethod
def version():
@@ -726,21 +753,21 @@ class VPPUtil(object):
"""
version = {}
- cmd = 'vppctl show version verbose'
+ cmd = "vppctl show version verbose"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
return version
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
if len(lines[0]) != 0:
- if lines[0].split(' ')[0] == 'FileNotFoundError':
+ if lines[0].split(" ")[0] == "FileNotFoundError":
return version
for line in lines:
if len(line) == 0:
continue
- dct = line.split(':')
- version[dct[0]] = dct[1].lstrip(' ')
+ dct = line.split(":")
+ version[dct[0]] = dct[1].lstrip(" ")
return version
@@ -755,38 +782,40 @@ class VPPUtil(object):
"""
ifaces = []
- cmd = 'vppctl show bridge'
+ cmd = "vppctl show bridge"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
- lines = stdout.split('\r\n')
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
+ lines = stdout.split("\r\n")
bridges = []
for line in lines:
- if line == 'no bridge-domains in use':
+ if line == "no bridge-domains in use":
print(line)
return ifaces
if len(line) == 0:
continue
- lspl = line.lstrip(' ').split()
- if lspl[0] != 'BD-ID':
+ lspl = line.lstrip(" ").split()
+ if lspl[0] != "BD-ID":
bridges.append(lspl[0])
for bridge in bridges:
- cmd = 'vppctl show bridge {} detail'.format(bridge)
+ cmd = "vppctl show bridge {} detail".format(bridge)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(
+ cmd, node["host"], stdout, stderr
+ )
+ )
- lines = stdout.split('\r\n')
+ lines = stdout.split("\r\n")
for line in lines:
- iface = re.findall(r'[a-zA-z]+\d+/\d+/\d+', line)
+ iface = re.findall(r"[a-zA-z]+\d+/\d+/\d+", line)
if len(iface):
- ifcidx = {'name': iface[0], 'index': line.split()[1]}
+ ifcidx = {"name": iface[0], "index": line.split()[1]}
ifaces.append(ifcidx)
print(stdout)
diff --git a/extras/vpp_config/vpplib/VppGrubUtil.py b/extras/vpp_config/vpplib/VppGrubUtil.py
index f17efd8a868..976b20019c4 100644
--- a/extras/vpp_config/vpplib/VppGrubUtil.py
+++ b/extras/vpp_config/vpplib/VppGrubUtil.py
@@ -17,11 +17,11 @@ import re
from vpplib.VPPUtil import VPPUtil
-__all__ = ['VppGrubUtil']
+__all__ = ["VppGrubUtil"]
class VppGrubUtil(object):
- """ VPP Grub Utilities."""
+ """VPP Grub Utilities."""
def _get_current_cmdline(self):
"""
@@ -32,14 +32,14 @@ class VppGrubUtil(object):
"""
# Get the memory information using /proc/meminfo
- cmd = 'sudo cat /proc/cmdline'
+ cmd = "sudo cat /proc/cmdline"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} on node {} {} {}'.
- format(cmd, self._node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} on node {} {} {}".format(cmd, self._node["host"], stdout, stderr)
+ )
- self._current_cmdline = stdout.strip('\n')
+ self._current_cmdline = stdout.strip("\n")
def _get_default_cmdline(self):
"""
@@ -50,21 +50,24 @@ class VppGrubUtil(object):
"""
# Get the default grub cmdline
- rootdir = self._node['rootdir']
- gfile = self._node['cpu']['grub_config_file']
- grubcmdline = self._node['cpu']['grubcmdline']
- cmd = 'cat {}'.format(rootdir + gfile)
+ rootdir = self._node["rootdir"]
+ gfile = self._node["cpu"]["grub_config_file"]
+ grubcmdline = self._node["cpu"]["grubcmdline"]
+ cmd = "cat {}".format(rootdir + gfile)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} Executing failed on node {} {}'.
- format(cmd, self._node['host'], stderr))
+ raise RuntimeError(
+ "{} Executing failed on node {} {}".format(
+ cmd, self._node["host"], stderr
+ )
+ )
# Get the Default Linux command line, ignoring commented lines
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
- if line == '' or line[0] == '#':
+ if line == "" or line[0] == "#":
continue
- ldefault = re.findall(r'{}=.+'.format(grubcmdline), line)
+ ldefault = re.findall(r"{}=.+".format(grubcmdline), line)
if ldefault:
self._default_cmdline = ldefault[0]
break
@@ -96,9 +99,9 @@ class VppGrubUtil(object):
:returns: The command line
:rtype: string
"""
- grubcmdline = self._node['cpu']['grubcmdline']
+ grubcmdline = self._node["cpu"]["grubcmdline"]
cmdline = self._default_cmdline
- value = cmdline.split('{}='.format(grubcmdline))[1]
+ value = cmdline.split("{}=".format(grubcmdline))[1]
value = value.rstrip('"').lstrip('"')
# jadfix intel_pstate=disable sometimes cause networks to
@@ -111,43 +114,43 @@ class VppGrubUtil(object):
# value = '{} intel_pstate=disable'.format(value)
# Replace isolcpus with ours
- isolcpus = re.findall(r'isolcpus=[\w+\-,]+', value)
+ isolcpus = re.findall(r"isolcpus=[\w+\-,]+", value)
if not isolcpus:
- if isolated_cpus != '':
+ if isolated_cpus != "":
value = "{} isolcpus={}".format(value, isolated_cpus)
else:
- if isolated_cpus != '':
- value = re.sub(r'isolcpus=[\w+\-,]+',
- 'isolcpus={}'.format(isolated_cpus),
- value)
+ if isolated_cpus != "":
+ value = re.sub(
+ r"isolcpus=[\w+\-,]+", "isolcpus={}".format(isolated_cpus), value
+ )
else:
- value = re.sub(r'isolcpus=[\w+\-,]+', '', value)
+ value = re.sub(r"isolcpus=[\w+\-,]+", "", value)
- nohz = re.findall(r'nohz_full=[\w+\-,]+', value)
+ nohz = re.findall(r"nohz_full=[\w+\-,]+", value)
if not nohz:
- if isolated_cpus != '':
+ if isolated_cpus != "":
value = "{} nohz_full={}".format(value, isolated_cpus)
else:
- if isolated_cpus != '':
- value = re.sub(r'nohz_full=[\w+\-,]+',
- 'nohz_full={}'.format(isolated_cpus),
- value)
+ if isolated_cpus != "":
+ value = re.sub(
+ r"nohz_full=[\w+\-,]+", "nohz_full={}".format(isolated_cpus), value
+ )
else:
- value = re.sub(r'nohz_full=[\w+\-,]+', '', value)
+ value = re.sub(r"nohz_full=[\w+\-,]+", "", value)
- rcu = re.findall(r'rcu_nocbs=[\w+\-,]+', value)
+ rcu = re.findall(r"rcu_nocbs=[\w+\-,]+", value)
if not rcu:
- if isolated_cpus != '':
+ if isolated_cpus != "":
value = "{} rcu_nocbs={}".format(value, isolated_cpus)
else:
- if isolated_cpus != '':
- value = re.sub(r'rcu_nocbs=[\w+\-,]+',
- 'rcu_nocbs={}'.format(isolated_cpus),
- value)
+ if isolated_cpus != "":
+ value = re.sub(
+ r"rcu_nocbs=[\w+\-,]+", "rcu_nocbs={}".format(isolated_cpus), value
+ )
else:
- value = re.sub(r'rcu_nocbs=[\w+\-,]+', '', value)
+ value = re.sub(r"rcu_nocbs=[\w+\-,]+", "", value)
- value = value.lstrip(' ').rstrip(' ')
+ value = value.lstrip(" ").rstrip(" ")
cmdline = '{}="{}"'.format(grubcmdline, value)
return cmdline
@@ -167,69 +170,68 @@ class VppGrubUtil(object):
if len(vpp_cmdline):
# Update grub
# Save the original file
- rootdir = node['rootdir']
- grubcmdline = node['cpu']['grubcmdline']
- ofilename = rootdir + node['cpu']['grub_config_file'] + '.orig'
- filename = rootdir + node['cpu']['grub_config_file']
+ rootdir = node["rootdir"]
+ grubcmdline = node["cpu"]["grubcmdline"]
+ ofilename = rootdir + node["cpu"]["grub_config_file"] + ".orig"
+ filename = rootdir + node["cpu"]["grub_config_file"]
# Write the output file
# Does a copy of the original file exist, if not create one
- (ret, stdout, stderr) = VPPUtil.exec_command(
- 'ls {}'.format(ofilename))
+ (ret, stdout, stderr) = VPPUtil.exec_command("ls {}".format(ofilename))
if ret != 0:
- if stdout.strip('\n') != ofilename:
- cmd = 'sudo cp {} {}'.format(filename, ofilename)
+ if stdout.strip("\n") != ofilename:
+ cmd = "sudo cp {} {}".format(filename, ofilename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.
- format(cmd, self._node['host'],
- stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(
+ cmd, self._node["host"], stderr
+ )
+ )
# Get the contents of the current grub config file
- cmd = 'cat {}'.format(filename)
+ cmd = "cat {}".format(filename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.format(
- cmd,
- self._node['host'],
- stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, self._node["host"], stderr)
+ )
# Write the new contents
# Get the Default Linux command line, ignoring commented lines
content = ""
- lines = stdout.split('\n')
+ lines = stdout.split("\n")
for line in lines:
- if line == '':
- content += line + '\n'
+ if line == "":
+ content += line + "\n"
continue
- if line[0] == '#':
- content += line + '\n'
+ if line[0] == "#":
+ content += line + "\n"
continue
- ldefault = re.findall(r'{}=.+'.format(grubcmdline), line)
+ ldefault = re.findall(r"{}=.+".format(grubcmdline), line)
if ldefault:
- content += vpp_cmdline + '\n'
+ content += vpp_cmdline + "\n"
else:
- content += line + '\n'
+ content += line + "\n"
content = content.replace(r"`", r"\`")
- content = content.rstrip('\n')
+ content = content.rstrip("\n")
cmd = "sudo cat > {0} << EOF\n{1}\n".format(filename, content)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.format(
- cmd,
- self._node['host'],
- stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, self._node["host"], stderr)
+ )
return vpp_cmdline
def __init__(self, node):
distro = VPPUtil.get_linux_distro()
- if distro[0] == 'Ubuntu':
- node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX_DEFAULT'
+ if distro[0] == "Ubuntu":
+ node["cpu"]["grubcmdline"] = "GRUB_CMDLINE_LINUX_DEFAULT"
else:
- node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX'
+ node["cpu"]["grubcmdline"] = "GRUB_CMDLINE_LINUX"
self._node = node
self._current_cmdline = ""
diff --git a/extras/vpp_config/vpplib/VppHugePageUtil.py b/extras/vpp_config/vpplib/VppHugePageUtil.py
index 3a632828883..48991090f04 100644
--- a/extras/vpp_config/vpplib/VppHugePageUtil.py
+++ b/extras/vpp_config/vpplib/VppHugePageUtil.py
@@ -33,6 +33,7 @@ class VppHugePageUtil(object):
"""
Huge Page Utilities
"""
+
def hugepages_dryrun_apply(self):
"""
Apply the huge page configuration
@@ -40,23 +41,23 @@ class VppHugePageUtil(object):
"""
node = self._node
- hugepages = node['hugepages']
+ hugepages = node["hugepages"]
vpp_hugepage_config = VPP_HUGEPAGE_CONFIG.format(
- nr_hugepages=hugepages['total'],
- max_map_count=hugepages['max_map_count'],
- shmmax=hugepages['shmax'])
+ nr_hugepages=hugepages["total"],
+ max_map_count=hugepages["max_map_count"],
+ shmmax=hugepages["shmax"],
+ )
- rootdir = node['rootdir']
- filename = rootdir + node['hugepages']['hugepage_config_file']
+ rootdir = node["rootdir"]
+ filename = rootdir + node["hugepages"]["hugepage_config_file"]
- cmd = 'echo "{0}" | sudo tee {1}'.\
- format(vpp_hugepage_config, filename)
+ cmd = 'echo "{0}" | sudo tee {1}'.format(vpp_hugepage_config, filename)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.
- format(cmd, node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
def get_actual_huge_pages(self):
"""
@@ -68,25 +69,26 @@ class VppHugePageUtil(object):
"""
# Get the memory information using /proc/meminfo
- cmd = 'sudo cat /proc/meminfo'
+ cmd = "sudo cat /proc/meminfo"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
raise RuntimeError(
- '{} failed on node {} {} {}'.format(
- cmd, self._node['host'],
- stdout, stderr))
-
- total = re.findall(r'HugePages_Total:\s+\w+', stdout)
- free = re.findall(r'HugePages_Free:\s+\w+', stdout)
- size = re.findall(r'Hugepagesize:\s+\w+\s+\w+', stdout)
- memtotal = re.findall(r'MemTotal:\s+\w+\s+\w+', stdout)
- memfree = re.findall(r'MemFree:\s+\w+\s+\w+', stdout)
-
- total = total[0].split(':')[1].lstrip()
- free = free[0].split(':')[1].lstrip()
- size = size[0].split(':')[1].lstrip()
- memtotal = memtotal[0].split(':')[1].lstrip()
- memfree = memfree[0].split(':')[1].lstrip()
+ "{} failed on node {} {} {}".format(
+ cmd, self._node["host"], stdout, stderr
+ )
+ )
+
+ total = re.findall(r"HugePages_Total:\s+\w+", stdout)
+ free = re.findall(r"HugePages_Free:\s+\w+", stdout)
+ size = re.findall(r"Hugepagesize:\s+\w+\s+\w+", stdout)
+ memtotal = re.findall(r"MemTotal:\s+\w+\s+\w+", stdout)
+ memfree = re.findall(r"MemFree:\s+\w+\s+\w+", stdout)
+
+ total = total[0].split(":")[1].lstrip()
+ free = free[0].split(":")[1].lstrip()
+ size = size[0].split(":")[1].lstrip()
+ memtotal = memtotal[0].split(":")[1].lstrip()
+ memfree = memfree[0].split(":")[1].lstrip()
return total, free, size, memtotal, memfree
def show_huge_pages(self):
@@ -96,17 +98,13 @@ class VppHugePageUtil(object):
"""
node = self._node
- hugepages = node['hugepages']
- print (" {:30}: {}".format("Total System Memory",
- hugepages['memtotal']))
- print (" {:30}: {}".format("Total Free Memory",
- hugepages['memfree']))
- print (" {:30}: {}".format("Actual Huge Page Total",
- hugepages['actual_total']))
- print (" {:30}: {}".format("Configured Huge Page Total",
- hugepages['total']))
- print (" {:30}: {}".format("Huge Pages Free", hugepages['free']))
- print (" {:30}: {}".format("Huge Page Size", hugepages['size']))
+ hugepages = node["hugepages"]
+ print(" {:30}: {}".format("Total System Memory", hugepages["memtotal"]))
+ print(" {:30}: {}".format("Total Free Memory", hugepages["memfree"]))
+ print(" {:30}: {}".format("Actual Huge Page Total", hugepages["actual_total"]))
+ print(" {:30}: {}".format("Configured Huge Page Total", hugepages["total"]))
+ print(" {:30}: {}".format("Huge Pages Free", hugepages["free"]))
+ print(" {:30}: {}".format("Huge Page Size", hugepages["size"]))
def get_huge_page_config(self):
"""
@@ -115,7 +113,7 @@ class VppHugePageUtil(object):
:returns: The map max count and shmmax
"""
- total = self._node['hugepages']['total']
+ total = self._node["hugepages"]["total"]
max_map_count = int(total) * 2 + 1024
shmmax = int(total) * 2 * 1024 * 1024
return max_map_count, shmmax
diff --git a/extras/vpp_config/vpplib/VppPCIUtil.py b/extras/vpp_config/vpplib/VppPCIUtil.py
index ceda46f97b9..032a262c21c 100644
--- a/extras/vpp_config/vpplib/VppPCIUtil.py
+++ b/extras/vpp_config/vpplib/VppPCIUtil.py
@@ -23,7 +23,7 @@ from vpplib.VPPUtil import VPPUtil
DPDK_SCRIPT = "/vpp/vpp-config/scripts/dpdk-devbind.py"
# PCI Device id regular expresssion
-PCI_DEV_ID_REGEX = '[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+.[0-9A-Fa-f]+'
+PCI_DEV_ID_REGEX = "[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+.[0-9A-Fa-f]+"
class VppPCIUtil(object):
@@ -45,51 +45,47 @@ class VppPCIUtil(object):
devices = {}
ids = re.findall(PCI_DEV_ID_REGEX, device_string)
- descriptions = re.findall(r'\'([\s\S]*?)\'', device_string)
- unused = re.findall(r'unused=\w+|unused=', device_string)
+ descriptions = re.findall(r"\'([\s\S]*?)\'", device_string)
+ unused = re.findall(r"unused=\w+|unused=", device_string)
for i, j in enumerate(ids):
- device = {'description': descriptions[i]}
+ device = {"description": descriptions[i]}
if unused:
- device['unused'] = unused[i].split('=')[1].split(',')
+ device["unused"] = unused[i].split("=")[1].split(",")
- cmd = 'ls /sys/bus/pci/devices/{}/driver/module/drivers'. \
- format(ids[i])
+ cmd = "ls /sys/bus/pci/devices/{}/driver/module/drivers".format(ids[i])
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret == 0:
- device['driver'] = stdout.split(':')[1].rstrip('\n')
+ device["driver"] = stdout.split(":")[1].rstrip("\n")
- cmd = 'cat /sys/bus/pci/devices/{}/numa_node'.format(ids[i])
+ cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(ids[i])
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed {} {}'.
- format(cmd, stderr, stdout))
- numa_node = stdout.rstrip('\n')
- if numa_node == '-1':
- device['numa_node'] = '0'
+ raise RuntimeError("{} failed {} {}".format(cmd, stderr, stdout))
+ numa_node = stdout.rstrip("\n")
+ if numa_node == "-1":
+ device["numa_node"] = "0"
else:
- device['numa_node'] = numa_node
+ device["numa_node"] = numa_node
interfaces = []
- device['interfaces'] = []
- cmd = 'ls /sys/bus/pci/devices/{}/net'.format(ids[i])
+ device["interfaces"] = []
+ cmd = "ls /sys/bus/pci/devices/{}/net".format(ids[i])
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret == 0:
- interfaces = stdout.rstrip('\n').split()
- device['interfaces'] = interfaces
+ interfaces = stdout.rstrip("\n").split()
+ device["interfaces"] = interfaces
l2_addrs = []
for intf in interfaces:
- cmd = 'cat /sys/bus/pci/devices/{}/net/{}/address'.format(
- ids[i], intf)
+ cmd = "cat /sys/bus/pci/devices/{}/net/{}/address".format(ids[i], intf)
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed {} {}'.
- format(cmd, stderr, stdout))
+ raise RuntimeError("{} failed {} {}".format(cmd, stderr, stdout))
- l2_addrs.append(stdout.rstrip('\n'))
+ l2_addrs.append(stdout.rstrip("\n"))
- device['l2addr'] = l2_addrs
+ device["l2addr"] = l2_addrs
devices[ids[i]] = device
@@ -112,66 +108,62 @@ class VppPCIUtil(object):
"""
node = self._node
- rootdir = node['rootdir']
+ rootdir = node["rootdir"]
dpdk_script = rootdir + DPDK_SCRIPT
- cmd = dpdk_script + ' --status'
+ cmd = dpdk_script + " --status"
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
# Get the network devices using the DPDK
# First get everything after using DPDK
- stda = stdout.split('Network devices using DPDK-compatible driver')[1]
+ stda = stdout.split("Network devices using DPDK-compatible driver")[1]
# Then get everything before using kernel driver
- using_dpdk = stda.split('Network devices using kernel driver')[0]
+ using_dpdk = stda.split("Network devices using kernel driver")[0]
self._dpdk_devices = self._create_device_list(using_dpdk)
# Get the network devices using the kernel
- stda = stdout.split('Network devices using kernel driver')[1]
- using_kernel = stda.split('Other network devices')[0]
+ stda = stdout.split("Network devices using kernel driver")[1]
+ using_kernel = stda.split("Other network devices")[0]
self._kernel_devices = self._create_device_list(using_kernel)
# Get the other network devices
- stda = stdout.split('Other network devices')[1]
- other = stda.split('Crypto devices using DPDK-compatible driver')[0]
+ stda = stdout.split("Other network devices")[1]
+ other = stda.split("Crypto devices using DPDK-compatible driver")[0]
self._other_devices = self._create_device_list(other)
# Get the crypto devices using the DPDK
- stda = stdout.split('Crypto devices using DPDK-compatible driver')[1]
- crypto_using_dpdk = stda.split('Crypto devices using kernel driver')[0]
- self._crypto_dpdk_devices = self._create_device_list(
- crypto_using_dpdk)
+ stda = stdout.split("Crypto devices using DPDK-compatible driver")[1]
+ crypto_using_dpdk = stda.split("Crypto devices using kernel driver")[0]
+ self._crypto_dpdk_devices = self._create_device_list(crypto_using_dpdk)
# Get the network devices using the kernel
- stda = stdout.split('Crypto devices using kernel driver')[1]
- crypto_using_kernel = stda.split('Other crypto devices')[0]
- self._crypto_kernel_devices = self._create_device_list(
- crypto_using_kernel)
+ stda = stdout.split("Crypto devices using kernel driver")[1]
+ crypto_using_kernel = stda.split("Other crypto devices")[0]
+ self._crypto_kernel_devices = self._create_device_list(crypto_using_kernel)
# Get the other network devices
- crypto_other = stdout.split('Other crypto devices')[1]
+ crypto_other = stdout.split("Other crypto devices")[1]
self._crypto_other_devices = self._create_device_list(crypto_other)
# Get the devices used by the kernel
for devk in self._kernel_devices.items():
dvid = devk[0]
device = devk[1]
- for i in device['interfaces']:
+ for i in device["interfaces"]:
cmd = "ip addr show " + i
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {}'.format(
- cmd,
- node['host'],
- stderr))
- lstate = re.findall(r'state \w+', stdout)[0].split(' ')[1]
+ raise RuntimeError(
+ "{} failed on node {} {}".format(cmd, node["host"], stderr)
+ )
+ lstate = re.findall(r"state \w+", stdout)[0].split(" ")[1]
# Take care of the links that are UP
- if lstate == 'UP':
- device['linkup'] = True
+ if lstate == "UP":
+ device["linkup"] = True
self._link_up_devices[dvid] = device
for devl in self._link_up_devices.items():
@@ -234,18 +226,18 @@ class VppPCIUtil(object):
"""
- name = 'port' + str(len(interfaces))
+ name = "port" + str(len(interfaces))
interfaces[name] = {}
- interfaces[name]['pci_address'] = device_id
- interfaces[name]['numa_node'] = device['numa_node']
- if 'l2addr' in device:
- l2_addrs = device['l2addr']
+ interfaces[name]["pci_address"] = device_id
+ interfaces[name]["numa_node"] = device["numa_node"]
+ if "l2addr" in device:
+ l2_addrs = device["l2addr"]
for i, j in enumerate(l2_addrs):
if i > 0:
- mname = 'mac_address' + str(i + 1)
+ mname = "mac_address" + str(i + 1)
interfaces[name][mname] = l2_addrs[i]
else:
- interfaces[name]['mac_address'] = l2_addrs[i]
+ interfaces[name]["mac_address"] = l2_addrs[i]
@staticmethod
def show_vpp_devices(devices, show_interfaces=True, show_header=True):
@@ -261,34 +253,33 @@ class VppPCIUtil(object):
"""
if show_interfaces:
- header = "{:15} {:25} {:50}".format("PCI ID",
- "Kernel Interface(s)",
- "Description")
+ header = "{:15} {:25} {:50}".format(
+ "PCI ID", "Kernel Interface(s)", "Description"
+ )
else:
- header = "{:15} {:50}".format("PCI ID",
- "Description")
- dashseparator = ("-" * (len(header) - 2))
+ header = "{:15} {:50}".format("PCI ID", "Description")
+ dashseparator = "-" * (len(header) - 2)
if show_header is True:
- print (header)
- print (dashseparator)
+ print(header)
+ print(dashseparator)
for dit in devices.items():
dvid = dit[0]
device = dit[1]
if show_interfaces:
- interfaces = device['interfaces']
- interface = ''
+ interfaces = device["interfaces"]
+ interface = ""
for i, j in enumerate(interfaces):
if i > 0:
- interface += ',' + interfaces[i]
+ interface += "," + interfaces[i]
else:
interface = interfaces[i]
- print ("{:15} {:25} {:50}".format(
- dvid, interface, device['description']))
+ print(
+ "{:15} {:25} {:50}".format(dvid, interface, device["description"])
+ )
else:
- print ("{:15} {:50}".format(
- dvid, device['description']))
+ print("{:15} {:50}".format(dvid, device["description"]))
@staticmethod
def unbind_vpp_device(node, device_id):
@@ -301,14 +292,14 @@ class VppPCIUtil(object):
:type device_id: string
"""
- rootdir = node['rootdir']
+ rootdir = node["rootdir"]
dpdk_script = rootdir + DPDK_SCRIPT
- cmd = dpdk_script + ' -u ' + ' ' + device_id
+ cmd = dpdk_script + " -u " + " " + device_id
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- raise RuntimeError('{} failed on node {} {} {}'.format(
- cmd, node['host'],
- stdout, stderr))
+ raise RuntimeError(
+ "{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
+ )
@staticmethod
def bind_vpp_device(node, driver, device_id):
@@ -324,14 +315,14 @@ class VppPCIUtil(object):
:returns ret: Command return code
"""
- rootdir = node['rootdir']
+ rootdir = node["rootdir"]
dpdk_script = rootdir + DPDK_SCRIPT
- cmd = dpdk_script + ' -b ' + driver + ' ' + device_id
+ cmd = dpdk_script + " -b " + driver + " " + device_id
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
if ret != 0:
- logging.error('{} failed on node {}'.format(
- cmd, node['host'], stdout, stderr))
- logging.error('{} {}'.format(
- stdout, stderr))
+ logging.error(
+ "{} failed on node {}".format(cmd, node["host"], stdout, stderr)
+ )
+ logging.error("{} {}".format(stdout, stderr))
return ret
diff --git a/extras/vpp_config/vpplib/constants.py b/extras/vpp_config/vpplib/constants.py
index 051a21cf023..63428b0c4d4 100644
--- a/extras/vpp_config/vpplib/constants.py
+++ b/extras/vpp_config/vpplib/constants.py
@@ -18,31 +18,31 @@ class Constants(object):
"""Constants used in CSIT."""
# OpenVPP testing directory location at topology nodes
- REMOTE_FW_DIR = '/tmp/openvpp-testing'
+ REMOTE_FW_DIR = "/tmp/openvpp-testing"
# shell scripts location
- RESOURCES_LIB_SH = 'resources/libraries/bash'
+ RESOURCES_LIB_SH = "resources/libraries/bash"
# vat templates location
- RESOURCES_TPL_VAT = 'resources/templates/vat'
+ RESOURCES_TPL_VAT = "resources/templates/vat"
# OpenVPP VAT binary name
- VAT_BIN_NAME = 'vpp_api_test'
+ VAT_BIN_NAME = "vpp_api_test"
# QEMU version to install
- QEMU_INSTALL_VERSION = 'qemu-2.5.0'
+ QEMU_INSTALL_VERSION = "qemu-2.5.0"
# QEMU install directory
- QEMU_INSTALL_DIR = '/opt/qemu-2.5.0'
+ QEMU_INSTALL_DIR = "/opt/qemu-2.5.0"
# Honeycomb directory location at topology nodes:
- REMOTE_HC_DIR = '/opt/honeycomb'
+ REMOTE_HC_DIR = "/opt/honeycomb"
# Honeycomb persistence files location
- REMOTE_HC_PERSIST = '/var/lib/honeycomb/persist'
+ REMOTE_HC_PERSIST = "/var/lib/honeycomb/persist"
# Honeycomb templates location
- RESOURCES_TPL_HC = 'resources/templates/honeycomb'
+ RESOURCES_TPL_HC = "resources/templates/honeycomb"
# ODL Client Restconf listener port
ODL_PORT = 8181