aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/KubernetesUtils.py
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python/KubernetesUtils.py')
-rw-r--r--resources/libraries/python/KubernetesUtils.py330
1 files changed, 165 insertions, 165 deletions
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
index 029d635c72..d0d72a39a1 100644
--- a/resources/libraries/python/KubernetesUtils.py
+++ b/resources/libraries/python/KubernetesUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,25 +13,27 @@
"""Library to control Kubernetes kubectl."""
+from functools import reduce
+from io import open
from time import sleep
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.topology import NodeType
-from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+from resources.libraries.python.topology import NodeType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
-__all__ = ["KubernetesUtils"]
+__all__ = [u"KubernetesUtils"]
# Maximum number of retries to check if PODs are running or deleted.
MAX_RETRY = 48
-class KubernetesUtils(object):
+
+class KubernetesUtils:
"""Kubernetes utilities class."""
def __init__(self):
"""Initialize KubernetesUtils class."""
- pass
@staticmethod
def load_docker_image_on_node(node, image_path):
@@ -43,20 +45,18 @@ class KubernetesUtils(object):
:type image_path: str
:raises RuntimeError: If loading image failed on node.
"""
- command = 'docker load -i {image_path}'.\
- format(image_path=image_path)
- message = 'Failed to load Docker image on {node}.'.\
- format(node=node['host'])
- exec_cmd_no_error(node, command, timeout=240, sudo=True,
- message=message)
-
- command = "docker rmi $(sudo docker images -f 'dangling=true' -q)".\
- format(image_path=image_path)
- message = 'Failed to clean Docker images on {node}.'.\
- format(node=node['host'])
+ command = f"docker load -i {image_path}"
+ message = f"Failed to load Docker image on {node[u'host']}."
+ exec_cmd_no_error(
+ node, command, timeout=240, sudo=True, message=message
+ )
+
+ command = u"docker rmi $(sudo docker images -f 'dangling=true' -q)"
+ message = f"Failed to clean Docker images on {node[u'host']}."
try:
- exec_cmd_no_error(node, command, timeout=240, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=240, sudo=True, message=message
+ )
except RuntimeError:
pass
@@ -70,7 +70,7 @@ class KubernetesUtils(object):
:type image_path: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.load_docker_image_on_node(node, image_path)
@staticmethod
@@ -84,16 +84,17 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh deploy_calico'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
- (ret_code, _, _) = ssh.exec_command(cmd, timeout=240)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh deploy_calico"
+ ret_code, _, _ = ssh.exec_command(cmd, timeout=240)
if int(ret_code) != 0:
- raise RuntimeError('Failed to setup Kubernetes on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ "Failed to setup Kubernetes on {node[u'host']}."
+ )
- KubernetesUtils.wait_for_kubernetes_pods_on_node(node,
- nspace='kube-system')
+ KubernetesUtils.wait_for_kubernetes_pods_on_node(
+ node, nspace=u"kube-system"
+ )
@staticmethod
def setup_kubernetes_on_all_duts(nodes):
@@ -103,7 +104,7 @@ class KubernetesUtils(object):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.setup_kubernetes_on_node(node)
@staticmethod
@@ -117,13 +118,14 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh destroy'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
- (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh destroy"
+
+ ret_code, _, _ = ssh.exec_command(cmd, timeout=120)
if int(ret_code) != 0:
- raise RuntimeError('Failed to destroy Kubernetes on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to destroy Kubernetes on {node[u'host']}."
+ )
@staticmethod
def destroy_kubernetes_on_all_duts(nodes):
@@ -133,7 +135,7 @@ class KubernetesUtils(object):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.destroy_kubernetes_on_node(node)
@staticmethod
@@ -151,19 +153,20 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- fqn_file = '{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
- yaml=yaml_file)
+ fqn_file = f"{Constants.RESOURCES_TPL_K8S}/{yaml_file}"
with open(fqn_file, 'r') as src_file:
stream = src_file.read()
- data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(),
- stream)
- cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format(
- data=data)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ data = reduce(
+ lambda a, kv: a.replace(*kv), list(kwargs.items()), stream
+ )
+ cmd = f"cat <<EOF | kubectl apply -f - \n{data}\nEOF"
+
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to apply Kubernetes template {yaml} '
- 'on {node}.'.format(yaml=yaml_file,
- node=node['host']))
+ raise RuntimeError(
+ f"Failed to apply Kubernetes template {yaml_file} "
+ f"on {node[u'host']}."
+ )
@staticmethod
def apply_kubernetes_resource_on_all_duts(nodes, yaml_file, **kwargs):
@@ -177,10 +180,10 @@ class KubernetesUtils(object):
:type kwargs: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.apply_kubernetes_resource_on_node(node,
- yaml_file,
- **kwargs)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.apply_kubernetes_resource_on_node(
+ node, yaml_file, **kwargs
+ )
@staticmethod
def create_kubernetes_cm_from_file_on_node(node, nspace, name, **kwargs):
@@ -199,21 +202,21 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
-
- from_file = '{0}'.format(' '.join('--from-file={0}={1} '\
- .format(key, kwargs[key]) for key in kwargs))
+ nspace = f"-n {nspace}" if nspace else u""
+ from_file = u" ".join(
+ f"--from-file={key}={kwargs[key]} " for key in kwargs
+ )
+ cmd = f"kubectl create {nspace} configmap {name} {from_file}"
- cmd = 'kubectl create {nspace} configmap {name} {from_file}'\
- .format(nspace=nspace, name=name, from_file=from_file)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to create Kubernetes ConfigMap '
- 'on {node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to create Kubernetes ConfigMap on {node[u'host']}."
+ )
@staticmethod
- def create_kubernetes_cm_from_file_on_all_duts(nodes, nspace, name,
- **kwargs):
+ def create_kubernetes_cm_from_file_on_all_duts(
+ nodes, nspace, name, **kwargs):
"""Create Kubernetes ConfigMap from file on all DUTs.
:param nodes: Topology nodes.
@@ -226,15 +229,14 @@ class KubernetesUtils(object):
:param kwargs: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.create_kubernetes_cm_from_file_on_node(node,
- nspace,
- name,
- **kwargs)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.create_kubernetes_cm_from_file_on_node(
+ node, nspace, name, **kwargs
+ )
@staticmethod
- def delete_kubernetes_resource_on_node(node, nspace, name=None,
- rtype='po,cm,deploy,rs,rc,svc'):
+ def delete_kubernetes_resource_on_node(
+ node, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"):
"""Delete Kubernetes resource on node.
:param node: DUT node.
@@ -251,27 +253,28 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- name = '{name}'.format(name=name) if name else '--all'
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ name = f"{name}" if name else u"--all"
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"kubectl delete {nspace} {rtype} {name}"
- cmd = 'kubectl delete {nspace} {rtype} {name}'\
- .format(nspace=nspace, rtype=rtype, name=name)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
- raise RuntimeError('Failed to delete Kubernetes resources '
- 'on {node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to delete Kubernetes resources on {node[u'host']}."
+ )
- cmd = 'kubectl get {nspace} pods --no-headers'\
- .format(nspace=nspace)
+ cmd = f"kubectl get {nspace} pods --no-headers"
for _ in range(MAX_RETRY):
- (ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd)
+ ret_code, stdout, stderr = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to retrieve Kubernetes resources on '
- '{node}.'.format(node=node['host']))
- if name == '--all':
+ raise RuntimeError(
+ f"Failed to retrieve Kubernetes resources "
+ f"on {node[u'host']}."
+ )
+ if name == u"--all":
ready = False
for line in stderr.splitlines():
- if 'No resources found.' in line:
+ if u"No resources found." in line:
ready = True
if ready:
break
@@ -279,9 +282,10 @@ class KubernetesUtils(object):
ready = False
for line in stdout.splitlines():
try:
- state = line.split()[1].split('/')
- ready = True if 'Running' in line and\
- state == state[::-1] else False
+ state = line.split()[1].split(u"/")
+ ready = bool(
+ u"Running" in line and state == state[::-1]
+ )
if not ready:
break
except (ValueError, IndexError):
@@ -290,12 +294,13 @@ class KubernetesUtils(object):
break
sleep(5)
else:
- raise RuntimeError('Failed to delete Kubernetes resources on '
- '{node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to delete Kubernetes resources on {node[u'host']}."
+ )
@staticmethod
- def delete_kubernetes_resource_on_all_duts(nodes, nspace, name=None,
- rtype='po,cm,deploy,rs,rc,svc'):
+ def delete_kubernetes_resource_on_all_duts(
+ nodes, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"):
"""Delete all Kubernetes resource on all DUTs.
:param nodes: Topology nodes.
@@ -308,9 +313,10 @@ class KubernetesUtils(object):
:type name: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.delete_kubernetes_resource_on_node(node, nspace,
- name, rtype)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.delete_kubernetes_resource_on_node(
+ node, nspace, name, rtype
+ )
@staticmethod
def describe_kubernetes_resource_on_node(node, nspace):
@@ -324,9 +330,9 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"kubectl describe {nspace} all"
- cmd = 'kubectl describe {nspace} all'.format(nspace=nspace)
ssh.exec_command_sudo(cmd)
@staticmethod
@@ -339,9 +345,10 @@ class KubernetesUtils(object):
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.describe_kubernetes_resource_on_node(node,
- nspace)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.describe_kubernetes_resource_on_node(
+ node, nspace
+ )
@staticmethod
def get_kubernetes_logs_on_node(node, nspace):
@@ -355,15 +362,16 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"for p in $(kubectl get pods {nspace} " \
+ f"-o jsonpath='{{.items[*].metadata.name}}'); do echo $p; " \
+ f"kubectl logs {nspace} $p; done"
- cmd = "for p in $(kubectl get pods {nspace} -o jsonpath="\
- "'{{.items[*].metadata.name}}'); do echo $p; kubectl logs "\
- "{nspace} $p; done".format(nspace=nspace)
ssh.exec_command(cmd)
- cmd = "kubectl exec {nspace} etcdv3 -- etcdctl --endpoints "\
- "\"localhost:22379\" get \"/\" --prefix=true".format(nspace=nspace)
+ cmd = f"kubectl exec {nspace} etcdv3 -- etcdctl " \
+ f"--endpoints \"localhost:22379\" get \"/\" --prefix=true"
+
ssh.exec_command(cmd)
@staticmethod
@@ -376,7 +384,7 @@ class KubernetesUtils(object):
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.get_kubernetes_logs_on_node(node, nspace)
@staticmethod
@@ -392,20 +400,19 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace \
- else '--all-namespaces'
+ nspace = f"-n {nspace}" if nspace else u"--all-namespaces"
+ cmd = f"kubectl get {nspace} pods --no-headers"
- cmd = 'kubectl get {nspace} pods --no-headers' \
- .format(nspace=nspace)
for _ in range(MAX_RETRY):
- (ret_code, stdout, _) = ssh.exec_command_sudo(cmd)
+ ret_code, stdout, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) == 0:
ready = False
for line in stdout.splitlines():
try:
- state = line.split()[1].split('/')
- ready = True if 'Running' in line and \
- state == state[::-1] else False
+ state = line.split()[1].split(u"/")
+ ready = bool(
+ u"Running" in line and state == state[::-1]
+ )
if not ready:
break
except (ValueError, IndexError):
@@ -414,8 +421,9 @@ class KubernetesUtils(object):
break
sleep(5)
else:
- raise RuntimeError('Kubernetes PODs are not running on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ f"Kubernetes PODs are not running on {node[u'host']}."
+ )
@staticmethod
def wait_for_kubernetes_pods_on_all_duts(nodes, nspace):
@@ -427,7 +435,7 @@ class KubernetesUtils(object):
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.wait_for_kubernetes_pods_on_node(node, nspace)
@staticmethod
@@ -440,9 +448,9 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh affinity_non_vpp'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh affinity_non_vpp"
+
ssh.exec_command(cmd)
@staticmethod
@@ -453,7 +461,7 @@ class KubernetesUtils(object):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.set_kubernetes_pods_affinity_on_node(node)
@staticmethod
@@ -463,44 +471,40 @@ class KubernetesUtils(object):
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
-
- cpuset_cpus = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=2,
- cpu_cnt=kwargs['phy_cores'],
- smt_used=smt_used)
- cpuset_main = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=1,
- cpu_cnt=1,
- smt_used=smt_used)
+ smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])
+
+ cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=2,
+ cpu_cnt=kwargs[u"phy_cores"], smt_used=smt_used
+ )
+ cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1,
+ cpu_cnt=1, smt_used=smt_used
+ )
# Create config instance
vpp_config = VppConfigGenerator()
- vpp_config.set_node(kwargs['node'])
- vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.set_node(kwargs[u"node"])
+ vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
vpp_config.add_unix_nodaemon()
vpp_config.add_socksvr()
- vpp_config.add_heapsize('4G')
- vpp_config.add_ip_heap_size('4G')
- vpp_config.add_ip6_heap_size('4G')
- vpp_config.add_ip6_hash_buckets('2000000')
- if not kwargs['jumbo']:
+ vpp_config.add_heapsize(u"4G")
+ vpp_config.add_ip_heap_size(u"4G")
+ vpp_config.add_ip6_heap_size(u"4G")
+ vpp_config.add_ip6_hash_buckets(u"2000000")
+ if not kwargs[u"jumbo"]:
vpp_config.add_dpdk_no_multi_seg()
vpp_config.add_dpdk_no_tx_checksum_offload()
- vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq_count_int'])
- vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
- vpp_config.add_buffers_per_numa(kwargs['buffers_per_numa'])
+ vpp_config.add_dpdk_dev_default_rxq(kwargs[u"rxq_count_int"])
+ vpp_config.add_dpdk_dev(kwargs[u"if1"], kwargs[u"if2"])
+ vpp_config.add_buffers_per_numa(kwargs[u"buffers_per_numa"])
# We will pop first core from list to be main core
vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.write_config(filename=kwargs['filename'])
+ vpp_config.write_config(filename=kwargs[u"filename"])
@staticmethod
def create_kubernetes_vnf_startup_config(**kwargs):
@@ -509,32 +513,28 @@ class KubernetesUtils(object):
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
- skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \
- (kwargs['phy_cores'] - 1)
- cpuset_cpus = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=skip_cnt,
- cpu_cnt=kwargs['phy_cores']-1,
- smt_used=smt_used)
- cpuset_main = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=1,
- cpu_cnt=1,
- smt_used=smt_used)
+ smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])
+ skip_cnt = kwargs[u"cpu_skip"] + (kwargs[u"i"] - 1) * \
+ (kwargs[u"phy_cores"] - 1)
+ cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"],
+ skip_cnt=skip_cnt, cpu_cnt=kwargs[u"phy_cores"]-1, smt_used=smt_used
+ )
+ cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1,
+ cpu_cnt=1, smt_used=smt_used
+ )
# Create config instance
vpp_config = VppConfigGenerator()
- vpp_config.set_node(kwargs['node'])
- vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.set_node(kwargs[u"node"])
+ vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
vpp_config.add_unix_nodaemon()
vpp_config.add_socksvr()
# We will pop first core from list to be main core
vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
- vpp_config.write_config(filename=kwargs['filename'])
+ vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"])
+ vpp_config.write_config(filename=kwargs[u"filename"])