aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
Diffstat (limited to 'resources')
-rwxr-xr-xresources/libraries/bash/k8s_setup.sh54
-rw-r--r--resources/libraries/python/ContainerUtils.py117
-rw-r--r--resources/libraries/python/InterfaceUtil.py41
-rw-r--r--resources/libraries/python/KubernetesUtils.py372
-rw-r--r--resources/libraries/python/VppConfigGenerator.py91
-rw-r--r--resources/libraries/python/constants.py3
-rw-r--r--resources/libraries/robot/performance/performance_setup.robot4
-rw-r--r--resources/libraries/robot/performance/performance_utils.robot1
-rw-r--r--resources/libraries/robot/shared/default.robot2
-rw-r--r--resources/templates/kubernetes/calico_v2.4.1.yaml387
-rw-r--r--resources/templates/kubernetes/csit.yaml4
-rw-r--r--resources/templates/kubernetes/etcd.yaml25
-rw-r--r--resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml206
-rw-r--r--resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml206
-rw-r--r--resources/templates/kubernetes/kafka.yaml22
-rw-r--r--resources/tools/scripts/topo_container_copy.py132
16 files changed, 1561 insertions, 106 deletions
diff --git a/resources/libraries/bash/k8s_setup.sh b/resources/libraries/bash/k8s_setup.sh
new file mode 100755
index 0000000000..0649c711c6
--- /dev/null
+++ b/resources/libraries/bash/k8s_setup.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xo pipefail
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+K8S_CALICO="${SCRIPT_DIR}/../../templates/kubernetes/calico_v2.4.1.yaml"
+K8S_CSIT="${SCRIPT_DIR}/../../templates/kubernetes/csit.yaml"
+
+trap "sudo kubeadm reset && sudo rm -rf $HOME/.kube" ERR
+
+# Revert any changes made to this host by 'kubeadm init' or 'kubeadm join'
+sudo kubeadm reset && sudo rm -rf $HOME/.kube || \
+ { echo "Failed to reset kubeadm"; exit 1; }
+
+# Ret up the Kubernetes master
+sudo -E kubeadm init --token-ttl 0 --pod-network-cidr=192.168.0.0/16 || \
+ { echo "Failed to init kubeadm"; exit 1; }
+
+# Make cgroup non-exclusive for CPU and MEM
+sudo cgset -r cpuset.cpu_exclusive=0 /kubepods
+sudo cgset -r cpuset.mem_exclusive=0 /kubepods
+
+rm -rf $HOME/.kube
+mkdir -p $HOME/.kube
+sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+# Apply resources
+kubectl apply -f ${K8S_CALICO} || \
+ { echo "Failed to apply Calico resources"; exit 1; }
+kubectl apply -f ${K8S_CSIT} || \
+ { echo "Failed to apply CSIT resource"; exit 1; }
+
+# Update the taints
+kubectl taint nodes --all node-role.kubernetes.io/master- || \
+ { echo "Failed to taint nodes"; exit 1; }
+
+# Dump Kubernetes objects ...
+kubectl get all --all-namespaces
+
+echo Kubernetes is ready
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index fb2695fe44..b56fb0dc24 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -12,7 +12,7 @@
# limitations under the License.
# Bug workaround in pylint for abstract classes.
-#pylint: disable=W0223
+# pylint: disable=W0223
"""Library to manipulate Containers."""
@@ -93,11 +93,12 @@ class ContainerManager(object):
def construct_containers(self, **kwargs):
"""Construct 1..N container(s) on node with specified name.
+
Ordinal number is automatically added to the name of container as
suffix.
- :param kwargs: Name of container.
- :param kwargs: str
+ :param kwargs: Named parameters.
+ :param kwargs: dict
"""
name = kwargs['name']
for i in range(kwargs['count']):
@@ -311,7 +312,6 @@ class ContainerEngine(object):
# Create config instance
vpp_config = VppConfigGenerator()
vpp_config.set_node(self.container.node)
- vpp_config.set_config_filename(config_filename)
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
@@ -326,15 +326,15 @@ class ContainerEngine(object):
self.execute('mkdir -p /etc/vpp/')
self.execute('echo "{c}" | tee {f}'
.format(c=vpp_config.get_config_str(),
- f=vpp_config.get_config_filename()))
+ f=config_filename))
- def create_vpp_exec_config(self, vat_template_file, **args):
+ def create_vpp_exec_config(self, vat_template_file, **kwargs):
"""Create VPP exec configuration on container.
:param vat_template_file: File name of a VAT template script.
- :param args: Parameters for VAT script.
+ :param kwargs: Parameters for VAT script.
:type vat_template_file: str
- :type args: dict
+ :type kwargs: dict
"""
vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
f=vat_template_file)
@@ -342,7 +342,7 @@ class ContainerEngine(object):
with open(vat_file_path, 'r') as template_file:
cmd_template = template_file.readlines()
for line_tmpl in cmd_template:
- vat_cmd = line_tmpl.format(**args)
+ vat_cmd = line_tmpl.format(**kwargs)
self.execute('echo "{c}" >> /tmp/running.exec'
.format(c=vat_cmd.replace('\n', '')))
@@ -354,6 +354,28 @@ class ContainerEngine(object):
"""Check if container is present."""
raise NotImplementedError
+ def _configure_cgroup(self, name):
+ """Configure the control group associated with a container.
+
+ :param name: Name of cgroup.
+ :type name: str
+ :raises RuntimeError: If applying cgroup settings via cgset failed.
+ """
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgcreate -g cpuset:/{name}'.format(name=name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to copy cgroup settings from root.')
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to apply cgroup settings.')
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to apply cgroup settings.')
+
class LXC(ContainerEngine):
"""LXC implementation."""
@@ -363,8 +385,7 @@ class LXC(ContainerEngine):
super(LXC, self).__init__()
def acquire(self, force=True):
- """Acquire a privileged system object where configuration is stored and
- where user information can be stored.
+ """Acquire a privileged system object where configuration is stored.
:param force: If a container exists, destroy it and create a new
container.
@@ -398,6 +419,7 @@ class LXC(ContainerEngine):
if int(ret) != 0:
raise RuntimeError('Failed to write {c.name} config.'
.format(c=self.container))
+ self._configure_cgroup('lxc')
def create(self):
"""Create/deploy an application inside a container on system.
@@ -415,13 +437,25 @@ class LXC(ContainerEngine):
raise RuntimeError('Failed to start container {c.name}.'
.format(c=self.container))
self._lxc_wait('RUNNING')
- self._lxc_cgroup(state_object='cpuset.cpus',
- value=cpuset_cpus)
+
+ # Workaround for LXC to be able to allocate all cpus including isolated.
+ cmd = 'cgset --copy-from / lxc/'
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to copy cgroup to LXC')
+
+ cmd = 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'\
+ .format(c=self.container, cpus=cpuset_cpus)
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to set cpuset.cpus to container '
+ '{c.name}.'.format(c=self.container))
def execute(self, command):
- """Start a process inside a running container. Runs the specified
- command inside the container specified by name. The container has to
- be running already.
+ """Start a process inside a running container.
+
+ Runs the specified command inside the container specified by name. The
+ container has to be running already.
:param command: Command to run inside container.
:type command: str
@@ -530,33 +564,6 @@ class LXC(ContainerEngine):
raise RuntimeError('Failed to wait for state "{s}" of container '
'{c.name}.'.format(s=state, c=self.container))
- def _lxc_cgroup(self, state_object, value=''):
- """Manage the control group associated with a container.
-
- :param state_object: Specify the state object name.
- :param value: Specify the value to assign to the state object. If empty,
- then action is GET, otherwise is action SET.
- :type state_object: str
- :type value: str
- :raises RuntimeError: If getting/setting state of a container failed.
- """
- cmd = 'lxc-cgroup --name {c.name} {s} {v}'\
- .format(c=self.container, s=state_object, v=value)
-
- ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset --copy-from / lxc')
- if int(ret) != 0:
- raise RuntimeError('Failed to copy cgroup settings from root.')
-
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
- if int(ret) != 0:
- if value:
- raise RuntimeError('Failed to set {s} of container {c.name}.'
- .format(s=state_object, c=self.container))
- else:
- raise RuntimeError('Failed to get {s} of container {c.name}.'
- .format(s=state_object, c=self.container))
-
class Docker(ContainerEngine):
"""Docker implementation."""
@@ -584,6 +591,7 @@ class Docker(ContainerEngine):
if int(ret) != 0:
raise RuntimeError('Failed to create container {c.name}.'
.format(c=self.container))
+ self._configure_cgroup('docker')
def create(self):
"""Create/deploy container.
@@ -613,7 +621,7 @@ class Docker(ContainerEngine):
cmd = 'docker run '\
'--privileged --detach --interactive --tty --rm '\
- '--cgroup-parent lxc {cpuset_cpus} {cpuset_mems} {publish} '\
+ '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
'{env} {volume} --name {container.name} {container.image} '\
'{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
container=self.container, command=command,
@@ -627,9 +635,10 @@ class Docker(ContainerEngine):
self.info()
def execute(self, command):
- """Start a process inside a running container. Runs the specified
- command inside the container specified by name. The container has to
- be running already.
+ """Start a process inside a running container.
+
+ Runs the specified command inside the container specified by name. The
+ container has to be running already.
:param command: Command to run inside container.
:type command: str
@@ -731,12 +740,26 @@ class Container(object):
pass
def __getattr__(self, attr):
+ """Get attribute custom implementation.
+
+ :param attr: Attribute to get.
+ :type attr: str
+ :returns: Attribute value or None.
+ :rtype: any
+ """
try:
return self.__dict__[attr]
except KeyError:
return None
def __setattr__(self, attr, value):
+ """Set attribute custom implementation.
+
+ :param attr: Attribute to set.
+ :param value: Value to set.
+ :type attr: str
+ :type value: any
+ """
try:
# Check if attribute exists
self.__dict__[attr]
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 71d36c1c9d..795bb52933 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -450,6 +450,47 @@ class InterfaceUtil(object):
interface_dump_json)
@staticmethod
+ def update_nic_interface_names(node):
+ """Update interface names based on nic type and PCI address.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param node: Node dictionary.
+ :type node: dict
+ """
+ for ifc in node['interfaces'].values():
+ if_pci = ifc['pci_address'].replace('.', ':').split(':')
+ bus = '{:x}'.format(int(if_pci[1], 16))
+ dev = '{:x}'.format(int(if_pci[2], 16))
+ fun = '{:x}'.format(int(if_pci[3], 16))
+ loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
+ if ifc['model'] == 'Intel-XL710':
+ ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Intel-X710':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Intel-X520-DA2':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Cisco-VIC-1385':
+ ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
+ elif ifc['model'] == 'Cisco-VIC-1227':
+ ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ else:
+ ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
+
+ @staticmethod
+ def update_nic_interface_names_on_all_duts(nodes):
+ """Update interface names based on nic type and PCI address on all DUTs.
+
+ This method updates interface names in the same format as VPP does.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ InterfaceUtil.update_nic_interface_names(node)
+
+ @staticmethod
def update_tg_interface_data_on_node(node):
"""Update interface name for TG/linux node in DICT__nodes.
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
new file mode 100644
index 0000000000..5faa056ddc
--- /dev/null
+++ b/resources/libraries/python/KubernetesUtils.py
@@ -0,0 +1,372 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library to control Kubernetes kubectl."""
+
+import time
+import yaml
+
+from resources.libraries.python.constants import Constants
+from resources.libraries.python.topology import NodeType
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+
+__all__ = ["KubernetesUtils"]
+
+
+class KubernetesUtils(object):
+ """Kubernetes utilities class."""
+
+ def __init__(self):
+ """Initialize KubernetesUtils class."""
+ pass
+
+ @staticmethod
+ def setup_kubernetes_on_node(node):
+ """Set up Kubernetes on node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If Kubernetes setup failed on node.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = '{dir}/{lib}/k8s_setup.sh '.format(dir=Constants.REMOTE_FW_DIR,
+ lib=Constants.RESOURCES_LIB_SH)
+ (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to setup Kubernetes on {node}.'
+ .format(node=node['host']))
+
+ @staticmethod
+ def setup_kubernetes_on_all_duts(nodes):
+ """Set up Kubernetes on all DUTs.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.setup_kubernetes_on_node(node)
+
+ @staticmethod
+ def apply_kubernetes_resource_on_node(node, yaml_file, **kwargs):
+ """Apply Kubernetes resource on node.
+
+ :param node: DUT node.
+ :param yaml_file: YAML configuration file.
+ :param kwargs: Key-value pairs to replace in YAML template.
+ :type node: dict
+ :type yaml_file: str
+ :type kwargs: dict
+ :raises RuntimeError: If applying Kubernetes template failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ stream = file('{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
+ yaml=yaml_file), 'r')
+
+ for data in yaml.load_all(stream):
+ data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(),
+ yaml.dump(data, default_flow_style=False))
+ # Workaround to avoid using RAW string anotated with | in YAML as
+ # library + bash is misinterpreting spaces.
+ data = data.replace('.conf:\n', '.conf: |\n')
+ cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format(
+ data=data)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to apply Kubernetes template {yaml} '
+ 'on {node}.'.format(yaml=yaml_file,
+ node=node['host']))
+
+ @staticmethod
+ def apply_kubernetes_resource_on_all_duts(nodes, yaml_file, **kwargs):
+ """Apply Kubernetes resource on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param yaml_file: YAML configuration file.
+ :param kwargs: Key-value pairs to replace in YAML template.
+ :type nodes: dict
+ :type yaml_file: str
+ :type kwargs: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.apply_kubernetes_resource_on_node(node,
+ yaml_file,
+ **kwargs)
+
+ @staticmethod
+ def create_kubernetes_cm_from_file_on_node(node, name, key, src_file):
+ """Create Kubernetes ConfigMap from file on node.
+
+ :param node: DUT node.
+ :param name: ConfigMap name.
+ :param key: Key (destination file).
+ :param src_file: Source file.
+ :type node: dict
+ :type name: str
+ :type key: str
+ :type src_file: str
+ :raises RuntimeError: If creating Kubernetes ConfigMap failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'kubectl create -n csit configmap {name} --from-file={key}='\
+ '{src_file}'.format(name=name, key=key, src_file=src_file)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to create Kubernetes ConfigMap {name} '
+ 'on {node}.'.format(name=name,
+ node=node['host']))
+
+ @staticmethod
+ def create_kubernetes_cm_from_file_on_all_duts(nodes, name, key, src_file):
+ """Create Kubernetes ConfigMap from file on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param name: ConfigMap name.
+ :param key: Key (destination file).
+ :param src_file: Source file.
+ :type nodes: dict
+ :type name: str
+ :type key: str
+ :type src_file: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.create_kubernetes_cm_from_file_on_node(node,
+ name,
+ key,
+ src_file)
+
+ @staticmethod
+ def delete_kubernetes_resource_on_node(node, rtype='po,cm', name=None):
+ """Delete Kubernetes resource on node.
+
+ :param node: DUT node.
+ :param rtype: Kubernetes resource type.
+ :param name: Name of resource.
+ :type node: dict
+ :type rtype: str
+ :type name: str
+ :raises RuntimeError: If deleting Kubernetes resource failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ name = '{name}'.format(name=name) if name else '--all'
+
+ cmd = 'kubectl delete -n csit {rtype} {name}'\
+ .format(rtype=rtype, name=name)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
+ 'namespace on {node}.'.format(node=node['host']))
+
+ cmd = 'kubectl get -n csit pods --no-headers'
+ for _ in range(24):
+ (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) == 0:
+ ready = True
+ for line in stdout.splitlines():
+ if 'No resources found.' not in line:
+ ready = False
+ if ready:
+ break
+ time.sleep(5)
+ else:
+ raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
+ 'namespace on {node}.'.format(node=node['host']))
+
+ @staticmethod
+ def delete_kubernetes_resource_on_all_duts(nodes, rtype='po,cm', name=None):
+ """Delete all Kubernetes resource on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param rtype: Kubernetes resource type.
+ :param name: Name of resource.
+ :type nodes: dict
+ :type rtype: str
+ :type name: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.delete_kubernetes_resource_on_node(node, rtype,
+ name)
+
+ @staticmethod
+ def describe_kubernetes_resource_on_node(node, rtype='po,cm'):
+ """Describe Kubernetes resource on node.
+
+ :param node: DUT node.
+ :param rtype: Kubernetes resource type.
+ :type node: dict
+ :type rtype: str
+ :raises RuntimeError: If describing Kubernetes resource failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'kubectl describe -n csit {rtype}'.format(rtype=rtype)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to describe Kubernetes resource on '
+ '{node}.'.format(node=node['host']))
+
+ @staticmethod
+ def describe_kubernetes_resource_on_all_duts(nodes, rtype='po,cm'):
+ """Describe Kubernetes resource on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param rtype: Kubernetes resource type.
+ :type nodes: dict
+ :type rtype: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.describe_kubernetes_resource_on_node(node,
+ rtype)
+
+ @staticmethod
+ def reset_kubernetes_on_node(node):
+ """Reset Kubernetes on node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If resetting Kubernetes failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'kubeadm reset && rm -rf $HOME/.kube'
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to reset Kubernetes on {node}.'
+ .format(node=node['host']))
+
+ @staticmethod
+ def reset_kubernetes_on_all_duts(nodes):
+ """Reset Kubernetes on all DUTs.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.reset_kubernetes_on_node(node)
+
+ @staticmethod
+ def wait_for_kubernetes_pods_on_node(node):
+ """Wait for Kubernetes PODs to become in 'Running' state on node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If Kubernetes PODs are not ready.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'kubectl get -n csit pods --no-headers'
+ for _ in range(48):
+ (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ if int(ret_code) == 0:
+ ready = True
+ for line in stdout.splitlines():
+ if 'Running' not in line:
+ ready = False
+ if ready:
+ break
+ time.sleep(5)
+ else:
+ raise RuntimeError('Kubernetes PODs are not ready on {node}.'
+ .format(node=node['host']))
+
+ @staticmethod
+ def wait_for_kubernetes_pods_on_all_duts(nodes):
+ """Wait for Kubernetes PODs to become in Running state on all DUTs.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.wait_for_kubernetes_pods_on_node(node)
+
+ @staticmethod
+ def create_kubernetes_vswitch_startup_config(**kwargs):
+ """Create Kubernetes VSWITCH startup configuration.
+
+ :param kwargs: Key-value pairs used to create configuration.
+ :param kwargs: dict
+ """
+ cpuset_cpus = \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpu_node'],
+ skip_cnt=kwargs['cpu_skip'],
+ cpu_cnt=kwargs['cpu_cnt'],
+ smt_used=kwargs['smt_used'])
+
+ # Create config instance
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(kwargs['node'])
+ vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.add_unix_nodaemon()
+ vpp_config.add_dpdk_socketmem('1024,1024')
+ vpp_config.add_heapsize('3G')
+ vpp_config.add_ip6_hash_buckets('2000000')
+ vpp_config.add_ip6_heap_size('3G')
+ if kwargs['framesize'] < 1522:
+ vpp_config.add_dpdk_no_multi_seg()
+ vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq'])
+ vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
+ # We will pop first core from list to be main core
+ vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ # if this is not only core in list, the rest will be used as workers.
+ if cpuset_cpus:
+ corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ vpp_config.add_cpu_corelist_workers(corelist_workers)
+ vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
+
+ @staticmethod
+ def create_kubernetes_vnf_startup_config(**kwargs):
+ """Create Kubernetes VNF startup configuration.
+
+ :param kwargs: Key-value pairs used to create configuration.
+ :param kwargs: dict
+ """
+ cpuset_cpus = \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpu_node'],
+ skip_cnt=kwargs['cpu_skip'],
+ cpu_cnt=kwargs['cpu_cnt'],
+ smt_used=kwargs['smt_used'])
+
+ # Create config instance
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(kwargs['node'])
+ vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.add_unix_nodaemon()
+ # We will pop first core from list to be main core
+ vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ # if this is not only core in list, the rest will be used as workers.
+ if cpuset_cpus:
+ corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ vpp_config.add_cpu_corelist_workers(corelist_workers)
+ vpp_config.add_plugin_disable('dpdk_plugin.so')
+ vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index 7bd0175b3d..69096491a2 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -22,10 +22,12 @@ from resources.libraries.python.topology import Topology
__all__ = ['VppConfigGenerator']
+
class VppConfigGenerator(object):
"""VPP Configuration File Generator."""
def __init__(self):
+ """Initialize library."""
# VPP Node to apply configuration on
self._node = ''
# VPP Hostname
@@ -36,8 +38,6 @@ class VppConfigGenerator(object):
self._vpp_config = ''
# VPP Service name
self._vpp_service_name = 'vpp'
- # VPP Configuration file path
- self._vpp_config_filename = '/etc/vpp/startup.conf'
def set_node(self, node):
"""Set DUT node.
@@ -46,29 +46,12 @@ class VppConfigGenerator(object):
:type node: dict
:raises RuntimeError: If Node type is not DUT.
"""
-
if node['type'] != NodeType.DUT:
raise RuntimeError('Startup config can only be applied to DUT'
'node.')
self._node = node
self._hostname = Topology.get_node_hostname(node)
- def set_config_filename(self, filename):
- """Set startup configuration filename.
-
- :param filename: Startup configuration filename.
- :type filename: str
- """
- self._vpp_config_filename = filename
-
- def get_config_filename(self):
- """Get startup configuration filename.
-
- :returns: Startup configuration filename.
- :rtype: str
- """
- return self._vpp_config_filename
-
def get_config_str(self):
"""Get dumped startup configuration in VPP config format.
@@ -88,11 +71,10 @@ class VppConfigGenerator(object):
:type value: str
:type path: list
"""
-
if len(path) == 1:
config[path[0]] = value
return
- if not config.has_key(path[0]):
+ if path[0] not in config:
config[path[0]] = {}
self.add_config_item(config[path[0]], value, path[1:])
@@ -100,9 +82,9 @@ class VppConfigGenerator(object):
"""Dump the startup configuration in VPP config format.
:param obj: Python Object to print.
- :param nested_level: Nested level for indentation.
+ :param level: Nested level for indentation.
:type obj: Obj
- :type nested_level: int
+ :type level: int
:returns: nothing
"""
indent = ' '
@@ -158,7 +140,6 @@ class VppConfigGenerator(object):
:type devices: tuple
:raises ValueError: If PCI address format is not valid.
"""
-
pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:"
"[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$")
for device in devices:
@@ -219,7 +200,6 @@ class VppConfigGenerator(object):
path = ['dpdk', 'dev default', 'num-tx-desc']
self.add_config_item(self._nodeconfig, value, path)
-
def add_dpdk_socketmem(self, value):
"""Add DPDK socket memory configuration.
@@ -312,16 +292,21 @@ class VppConfigGenerator(object):
path = ['nat']
self.add_config_item(self._nodeconfig, value, path)
- def apply_config(self, waittime=5, retries=12):
+ def apply_config(self, filename='/etc/vpp/startup.conf', waittime=5,
+ retries=12, restart_vpp=True):
"""Generate and apply VPP configuration for node.
Use data from calls to this class to form a startup.conf file and
replace /etc/vpp/startup.conf with it on node.
+ :param filename: Startup configuration file name.
:param waittime: Time to wait for VPP to restart (default 5 seconds).
:param retries: Number of times (default 12) to re-try waiting.
+ :param restart_vpp: Whether to restart VPP.
+ :type filename: str
:type waittime: int
:type retries: int
+ :type restart_vpp: bool.
:raises RuntimeError: If writing config file failed, or restarting of
VPP failed.
"""
@@ -330,39 +315,37 @@ class VppConfigGenerator(object):
ssh = SSH()
ssh.connect(self._node)
- # We're using this "| sudo tee" construct because redirecting
- # a sudo's output ("sudo echo xxx > /path/to/file") does not
- # work on most platforms...
(ret, _, _) = \
- ssh.exec_command('echo "{0}" | sudo tee {1}'.
- format(self._vpp_config,
- self._vpp_config_filename))
+ ssh.exec_command('echo "{config}" | sudo tee {filename}'.
+ format(config=self._vpp_config,
+ filename=filename))
if ret != 0:
raise RuntimeError('Writing config file failed to node {}'.
format(self._hostname))
- # Instead of restarting, we'll do separate start and stop
- # actions. This way we don't care whether VPP was running
- # to begin with.
- ssh.exec_command('sudo service {} stop'
- .format(self._vpp_service_name))
- (ret, _, _) = \
- ssh.exec_command('sudo service {} start'
+ if restart_vpp:
+ # Instead of restarting, we'll do separate start and stop
+ # actions. This way we don't care whether VPP was running
+ # to begin with.
+ ssh.exec_command('sudo service {} stop'
.format(self._vpp_service_name))
- if ret != 0:
- raise RuntimeError('Restarting VPP failed on node {}'.
- format(self._hostname))
-
- # Sleep <waittime> seconds, up to <retry> times,
- # and verify if VPP is running.
- for _ in range(retries):
- time.sleep(waittime)
(ret, _, _) = \
- ssh.exec_command('echo show hardware-interfaces | '
- 'nc 0 5002 || echo "VPP not yet running"')
- if ret == 0:
- break
- else:
- raise RuntimeError('VPP failed to restart on node {}'.
- format(self._hostname))
+ ssh.exec_command('sudo service {} start'
+ .format(self._vpp_service_name))
+ if ret != 0:
+ raise RuntimeError('Restarting VPP failed on node {}'.
+ format(self._hostname))
+
+ # Sleep <waittime> seconds, up to <retry> times,
+ # and verify if VPP is running.
+ for _ in range(retries):
+ time.sleep(waittime)
+ (ret, _, _) = \
+ ssh.exec_command('echo show hardware-interfaces | '
+ 'nc 0 5002 || echo "VPP not yet running"')
+ if ret == 0:
+ break
+ else:
+ raise RuntimeError('VPP failed to restart on node {}'.
+ format(self._hostname))
diff --git a/resources/libraries/python/constants.py b/resources/libraries/python/constants.py
index a8d40a2a26..01a96a861b 100644
--- a/resources/libraries/python/constants.py
+++ b/resources/libraries/python/constants.py
@@ -35,6 +35,9 @@ class Constants(object):
# QEMU install directory
QEMU_INSTALL_DIR = '/opt/qemu-2.5.0'
+ # Kubernetes templates location
+ RESOURCES_TPL_K8S = 'resources/templates/kubernetes'
+
# Honeycomb directory location at topology nodes:
REMOTE_HC_DIR = '/opt/honeycomb'
diff --git a/resources/libraries/robot/performance/performance_setup.robot b/resources/libraries/robot/performance/performance_setup.robot
index 0dc7f78b45..9713c2268a 100644
--- a/resources/libraries/robot/performance/performance_setup.robot
+++ b/resources/libraries/robot/performance/performance_setup.robot
@@ -13,7 +13,6 @@
*** Settings ***
| Library | resources.libraries.python.DUTSetup
-| Library | resources.libraries.python.VhostUser
| Resource | resources/libraries/robot/performance/performance_configuration.robot
| Resource | resources/libraries/robot/performance/performance_utils.robot
| Documentation | Performance suite keywords - Suite and test setups and
@@ -242,7 +241,6 @@
| | ...
| | [Arguments] | ${topology_type} | ${nic_model}
| | ...
-| | Show vpp version on all DUTs | ${nodes}
| | Set variables in 2-node circular topology with DUT interface model
| | ... | ${nic_model}
| | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
@@ -269,7 +267,6 @@
| | [Arguments] | ${topology_type} | ${nic_model} | ${tg_if1_dest_mac}
| | ... | ${tg_if2_dest_mac}
| | ...
-| | Show vpp version on all DUTs | ${nodes}
| | Set variables in 2-node circular topology with DUT interface model
| | ... | ${nic_model}
| | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
@@ -294,7 +291,6 @@
| | ...
| | [Arguments] | ${topology_type} | ${nic_model}
| | ...
-| | Show vpp version on all DUTs | ${nodes}
| | Set variables in 3-node circular topology with DUT interface model
| | ... | ${nic_model}
| | Initialize traffic generator | ${tg} | ${tg_if1} | ${tg_if2}
diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot
index 884bc4831e..6d6413d966 100644
--- a/resources/libraries/robot/performance/performance_utils.robot
+++ b/resources/libraries/robot/performance/performance_utils.robot
@@ -17,6 +17,7 @@
| Library | resources.libraries.python.NodePath
| Library | resources.libraries.python.DpdkUtil
| Library | resources.libraries.python.InterfaceUtil
+| Library | resources.libraries.python.KubernetesUtils
| Library | resources.libraries.python.VhostUser
| Library | resources.libraries.python.TrafficGenerator
| Library | resources.libraries.python.TrafficGenerator.TGDropRateSearchImpl
diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot
index 250380dcc0..fa291bf1aa 100644
--- a/resources/libraries/robot/shared/default.robot
+++ b/resources/libraries/robot/shared/default.robot
@@ -401,4 +401,4 @@
| | ... | \| Start VPP Service on DUT \| ${nodes['DUT1']} \|
| | ...
| | [Arguments] | ${node}
-| | Start VPP Service | ${node} \ No newline at end of file
+| | Start VPP Service | ${node}
diff --git a/resources/templates/kubernetes/calico_v2.4.1.yaml b/resources/templates/kubernetes/calico_v2.4.1.yaml
new file mode 100644
index 0000000000..921e6923ca
--- /dev/null
+++ b/resources/templates/kubernetes/calico_v2.4.1.yaml
@@ -0,0 +1,387 @@
+# Calico Version v2.4.1
+# https://docs.projectcalico.org/v2.4/releases#v2.4.1
+# This manifest includes the following component versions:
+# calico/node:v2.4.1
+# calico/cni:v1.10.0
+# calico/kube-policy-controller:v0.7.0
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # The location of your etcd cluster. This uses the Service clusterIP
+ # defined below.
+ etcd_endpoints: "http://10.96.232.136:6666"
+
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # The CNI network configuration to install on each node.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.1.0",
+ "type": "calico",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "log_level": "info",
+ "mtu": 1500,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s",
+ "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
+ "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
+ },
+ "kubernetes": {
+ "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
+ }
+ }
+
+---
+
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ # Only run this pod on the master.
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.1.10
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command: ["/bin/sh","-c"]
+ args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-cni-plugin
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: quay.io/calico/node:v2.4.1
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Enable BGP. Disable to enforce policy only.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "kubeadm,bgp"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Configure the IP Pool from which Pod IPs will be chosen.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ - name: CALICO_IPV4POOL_IPIP
+ value: "always"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ value: "1440"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: ""
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9099
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: quay.io/calico/cni:v1.10.0
+ command: ["/install-cni.sh"]
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+
+---
+
+# This manifest deploys the Calico policy controller on Kubernetes.
+# See https://github.com/projectcalico/k8s-policy
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy
+spec:
+ # The policy controller can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ # The policy controller must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-policy-controller
+ containers:
+ - name: calico-policy-controller
+ image: quay.io/calico/kube-policy-controller:v0.7.0
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The location of the Kubernetes API. Use the default Kubernetes
+ # service for API access.
+ - name: K8S_API
+ value: "https://kubernetes.default:443"
+ # Since we're running in the host namespace and might not have KubeDNS
+ # access, configure the container's /etc/hosts to resolve
+ # kubernetes.default to the correct service clusterIP.
+ - name: CONFIGURE_ETC_HOSTS
+ value: "true"
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-cni-plugin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-cni-plugin
+subjects:
+- kind: ServiceAccount
+ name: calico-cni-plugin
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-cni-plugin
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-policy-controller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-policy-controller
+subjects:
+- kind: ServiceAccount
+ name: calico-policy-controller
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
diff --git a/resources/templates/kubernetes/csit.yaml b/resources/templates/kubernetes/csit.yaml
new file mode 100644
index 0000000000..4ae72063e7
--- /dev/null
+++ b/resources/templates/kubernetes/csit.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: csit
diff --git a/resources/templates/kubernetes/etcd.yaml b/resources/templates/kubernetes/etcd.yaml
new file mode 100644
index 0000000000..66c1a57fad
--- /dev/null
+++ b/resources/templates/kubernetes/etcd.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: etcdv3-server
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - image: quay.io/coreos/etcd:v3.0.16
+ name: etcdv3
+ command:
+ - /usr/local/bin/etcd
+ - --advertise-client-urls
+ - http://0.0.0.0:22379
+ - --listen-client-urls
+ - http://0.0.0.0:22379
+ - --listen-peer-urls
+ - http://0.0.0.0:22380
+ ports:
+ - containerPort: 22379
+ hostPort: 22379
+ name: serverport
+ env:
+ - name: ETCDCTL_API
+ value: "3"
diff --git a/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml
new file mode 100644
index 0000000000..726e9734ab
--- /dev/null
+++ b/resources/templates/kubernetes/eth-l2bdbase-eth-2memif-1vnf.yaml
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sfc-controller-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ sfc.conf:
+ sfc_controller_config_version: 1
+ description: $$TEST_NAME$$
+ host_entities:
+ - name: vswitch
+ sfc_entities:
+ - name: vswitch-vnf1
+ description: vswitch to VNF1 - memif
+ type: 3
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF1$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port1
+ etcd_vpp_switch_key: vswitch
+ type: 2
+ - name: vnf1-vswitch
+ description: VNF1 to vswitch - memif
+ type: 3
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF2$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port2
+ etcd_vpp_switch_key: vswitch
+ type: 2
+
+ vnf.conf:
+ vnf_plugin_config_version: 1
+ description: VNF config
+ vnf_entities:
+ - name: vnf1
+ container: vnf1
+ l2xconnects:
+ - port_labels:
+ - port1
+ - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sfc-controller
+ namespace: csit
+spec:
+ containers:
+ - name: "sfc-controller"
+ image: prod_sfc_controller
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sfc-controller
+ - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+ - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+ - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+ volumeMounts:
+ - name: controller-config
+ mountPath: /opt/sfc-controller/dev
+ volumes:
+ - name: controller-config
+ configMap:
+ name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vswitch-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vswitch-vpp
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - name: "vswitch"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 1
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vswitch
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vswitch-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vswitch-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vnf-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vnf-vpp
+ namespace: csit
+spec:
+ containers:
+ - name: "vnf"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vnf1
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vnf-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vnf-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
diff --git a/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml b/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml
new file mode 100644
index 0000000000..7514eeb181
--- /dev/null
+++ b/resources/templates/kubernetes/eth-l2xcbase-eth-2memif-1vnf.yaml
@@ -0,0 +1,206 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sfc-controller-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ sfc.conf:
+ sfc_controller_config_version: 1
+ description: $$TEST_NAME$$
+ host_entities:
+ - name: vswitch
+ sfc_entities:
+ - name: vswitch-vnf1
+ description: vswitch to VNF1 - memif
+ type: 4
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF1$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port1
+ etcd_vpp_switch_key: vswitch
+ type: 2
+ - name: vnf1-vswitch
+ description: VNF1 to vswitch - memif
+ type: 4
+ elements:
+ - container: vswitch
+ port_label: $$VSWITCH_IF2$$
+ etcd_vpp_switch_key: vswitch
+ type: 5
+ - container: vnf1
+ port_label: port2
+ etcd_vpp_switch_key: vswitch
+ type: 2
+
+ vnf.conf:
+ vnf_plugin_config_version: 1
+ description: VNF config
+ vnf_entities:
+ - name: vnf1
+ container: vnf1
+ l2xconnects:
+ - port_labels:
+ - port1
+ - port2
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sfc-controller
+ namespace: csit
+spec:
+ containers:
+ - name: "sfc-controller"
+ image: prod_sfc_controller
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sfc-controller
+ - -etcdv3-config=/opt/sfc-controller/dev/etcd.conf
+ - -sfc-config=/opt/sfc-controller/dev/sfc.conf
+ - -vnf-config=/opt/sfc-controller/dev/vnf.conf
+ volumeMounts:
+ - name: controller-config
+ mountPath: /opt/sfc-controller/dev
+ volumes:
+ - name: controller-config
+ configMap:
+ name: sfc-controller-cfg
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vswitch-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vswitch-vpp
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - name: "vswitch"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 1
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vswitch
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vswitch-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vswitch-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vnf-agent-cfg
+ namespace: csit
+data:
+ etcd.conf:
+ insecure-transport: true
+ dial-timeout: 1000000000
+ endpoints:
+ - "172.17.0.1:22379"
+
+ kafka.conf:
+ addrs:
+ - "172.17.0.1:9092"
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vnf-vpp
+ namespace: csit
+spec:
+ containers:
+ - name: "vnf"
+ image: prod_vpp_agent_shrink
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 5002
+ - containerPort: 9191
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 1
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ initialDelaySeconds: 15
+ env:
+ - name: MICROSERVICE_LABEL
+ value: vnf1
+ volumeMounts:
+ - name: vpp-config
+ mountPath: /etc/vpp
+ - name: agent-config
+ mountPath: /opt/vpp-agent/dev
+ - name: memif-sockets
+ mountPath: /tmp
+ volumes:
+ - name: vpp-config
+ configMap:
+ name: vnf-vpp-cfg
+ - name: agent-config
+ configMap:
+ name: vnf-agent-cfg
+ - name: memif-sockets
+ hostPath:
+ path: /tmp
diff --git a/resources/templates/kubernetes/kafka.yaml b/resources/templates/kubernetes/kafka.yaml
new file mode 100644
index 0000000000..55d165f31b
--- /dev/null
+++ b/resources/templates/kubernetes/kafka.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kafka-server
+ namespace: csit
+spec:
+ hostNetwork: true
+ containers:
+ - image: spotify/kafka
+ name: kafka
+ ports:
+ - containerPort: 2181
+ hostPort: 2181
+ name: zookeeper
+ - containerPort: 9092
+ hostPort: 9092
+ name: kafka
+ env:
+ - name: ADVERTISED_HOST
+ value: "172.17.0.1"
+ - name: ADVERTISED_PORT
+ value: "9092"
diff --git a/resources/tools/scripts/topo_container_copy.py b/resources/tools/scripts/topo_container_copy.py
new file mode 100644
index 0000000000..d243182236
--- /dev/null
+++ b/resources/tools/scripts/topo_container_copy.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script provides copy and load of Docker container images.
+ As destinations are used all DUT nodes from the topology file."""
+
+import sys
+import argparse
+from yaml import load
+
+from resources.libraries.python.ssh import SSH
+
+
+def ssh_no_error(ssh, cmd, sudo=False):
+ """Execute a command over ssh channel, and log and exit if the command
+ fails.
+
+ :param ssh: SSH() object connected to a node.
+ :param cmd: Command line to execute on remote node.
+ :param sudo: Run command with sudo privileges.
+ :type ssh: SSH() object
+ :type cmd: str
+ :type sudo: bool
+ :returns: stdout from the SSH command.
+ :rtype: str
+ :raises RuntimeError: In case of unexpected ssh command failure
+ """
+ if sudo:
+ ret, stdo, stde = ssh.exec_command_sudo(cmd, timeout=60)
+ else:
+ ret, stdo, stde = ssh.exec_command(cmd, timeout=60)
+
+ if ret != 0:
+ print('Command execution failed: "{}"'.format(cmd))
+ print('stdout: {0}'.format(stdo))
+ print('stderr: {0}'.format(stde))
+ raise RuntimeError('Unexpected ssh command failure')
+
+ return stdo
+
+
+def ssh_ignore_error(ssh, cmd, sudo=False):
+ """Execute a command over ssh channel, ignore errors.
+
+ :param ssh: SSH() object connected to a node.
+ :param cmd: Command line to execute on remote node.
+ :param sudo: Run command with sudo privileges.
+ :type ssh: SSH() object
+ :type cmd: str
+ :type sudo: bool
+ :returns: stdout from the SSH command.
+ :rtype: str
+ """
+ if sudo:
+ ret, stdo, stde = ssh.exec_command_sudo(cmd)
+ else:
+ ret, stdo, stde = ssh.exec_command(cmd)
+
+ if ret != 0:
+ print('Command execution failed: "{}"'.format(cmd))
+ print('stdout: {0}'.format(stdo))
+ print('stderr: {0}'.format(stde))
+
+ return stdo
+
+
+def main():
+ """Copy and load of Docker image."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-t", "--topo", required=True,
+ help="Topology file")
+ parser.add_argument("-d", "--directory", required=True,
+ help="Destination directory")
+ parser.add_argument("-i", "--images", required=False, nargs='+',
+ help="Images paths to copy")
+ parser.add_argument("-c", "--cancel", help="Cancel all",
+ action="store_true")
+
+ args = parser.parse_args()
+ topology_file = args.topo
+ images = args.images
+ directory = args.directory
+ cancel_all = args.cancel
+
+ work_file = open(topology_file)
+ topology = load(work_file.read())['nodes']
+
+ ssh = SSH()
+ for node in topology:
+ if topology[node]['type'] == "DUT":
+ print("###TI host: {host}".format(host=topology[node]['host']))
+ ssh.connect(topology[node])
+
+ if cancel_all:
+ # Remove destination directory on DUT
+ cmd = "rm -r {directory}".format(directory=directory)
+ stdout = ssh_ignore_error(ssh, cmd)
+ print("###TI {stdout}".format(stdout=stdout))
+
+ else:
+ # Create installation directory on DUT
+ cmd = "rm -r {directory}; mkdir {directory}"\
+ .format(directory=directory)
+ stdout = ssh_no_error(ssh, cmd)
+ print("###TI {stdout}".format(stdout=stdout))
+
+ # Copy images from local path to destination dir
+ for image in images:
+ print("###TI scp: {}".format(image))
+ ssh.scp(local_path=image, remote_path=directory)
+
+ # Load image to Docker.
+ cmd = "for f in {directory}/*.tar.gz; do zcat $f | "\
+ "sudo docker load; done".format(directory=directory)
+ stdout = ssh_no_error(ssh, cmd)
+ print("###TI {}".format(stdout))
+
+
+if __name__ == "__main__":
+ sys.exit(main())