aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries')
-rw-r--r--resources/libraries/bash/config/config5
-rw-r--r--resources/libraries/bash/config/defaults12
-rwxr-xr-xresources/libraries/bash/k8s_setup.sh86
-rw-r--r--resources/libraries/bash/shell/dpdk_utils.sh92
-rw-r--r--resources/libraries/bash/shell/k8s_utils.sh125
-rw-r--r--resources/libraries/bash/shell/qemu_utils.sh81
-rw-r--r--resources/libraries/python/KubernetesUtils.py306
-rw-r--r--resources/libraries/robot/performance/performance_setup.robot26
8 files changed, 580 insertions, 153 deletions
diff --git a/resources/libraries/bash/config/config b/resources/libraries/bash/config/config
new file mode 100644
index 0000000000..2b71edcc49
--- /dev/null
+++ b/resources/libraries/bash/config/config
@@ -0,0 +1,5 @@
+QEMU_INSTALL_DIR=/opt
+QEMU_INSTALL_VERSION=qemu-2.5.0
+
+DPDK_INSTALL_DIR=/opt
+DPDK_INSTALL_VERSION=dpdk-17.11 \ No newline at end of file
diff --git a/resources/libraries/bash/config/defaults b/resources/libraries/bash/config/defaults
new file mode 100644
index 0000000000..1547452982
--- /dev/null
+++ b/resources/libraries/bash/config/defaults
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+typeset -A cfg
+cfg=( # set default values in config array
+ [QEMU_INSTALL_DIR]="/opt"
+ [QEMU_INSTALL_VERSION]="qemu-2.5.0"
+ [DPDK_INSTALL_DIR]=/opt
+ [DPDK_INSTALL_VERSION]=dpdk-17.11
+ [K8S_CALICO]="${SCRIPT_DIR}/../../templates/kubernetes/calico_v2.6.3.yaml"
+ [K8S_CONTIV_VPP]="https://raw.githubusercontent.com/contiv/vpp/master/k8s/contiv-vpp.yaml"
+ [K8S_CSIT]="${SCRIPT_DIR}/../../templates/kubernetes/csit.yaml"
+) \ No newline at end of file
diff --git a/resources/libraries/bash/k8s_setup.sh b/resources/libraries/bash/k8s_setup.sh
index 0649c711c6..f9f6c61ec7 100755
--- a/resources/libraries/bash/k8s_setup.sh
+++ b/resources/libraries/bash/k8s_setup.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,39 +16,59 @@ set -xo pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-K8S_CALICO="${SCRIPT_DIR}/../../templates/kubernetes/calico_v2.4.1.yaml"
-K8S_CSIT="${SCRIPT_DIR}/../../templates/kubernetes/csit.yaml"
+# Include
+source ${SCRIPT_DIR}/config/defaults
+source ${SCRIPT_DIR}/shell/dpdk_utils.sh
+source ${SCRIPT_DIR}/shell/k8s_utils.sh
-trap "sudo kubeadm reset && sudo rm -rf $HOME/.kube" ERR
+# Read configuration
+while read line
+do
+ if echo $line | grep -F = &>/dev/null
+ then
+ varname=$(echo "$line" | cut -d '=' -f 1)
+ cfg[$varname]=$(echo "$line" | cut -d '=' -f 2-)
+ fi
+done < ${script_dir}/../config/config
-# Revert any changes made to this host by 'kubeadm init' or 'kubeadm join'
-sudo kubeadm reset && sudo rm -rf $HOME/.kube || \
- { echo "Failed to reset kubeadm"; exit 1; }
+trap "k8s_utils.destroy" ERR
-# Ret up the Kubernetes master
-sudo -E kubeadm init --token-ttl 0 --pod-network-cidr=192.168.0.0/16 || \
- { echo "Failed to init kubeadm"; exit 1; }
+case "$1" in
+ prepare)
+ # Revert any changes made to this host by 'kubeadm init'
+ k8s_utils.destroy
+ # Sets up the Kubernetes master
+ k8s_utils.prepare
+ ;;
+ deploy_calico)
+ # Revert any changes made to this host by 'kubeadm init'
+ k8s_utils.destroy
+ # Load kernel modules uio/uio_pci_generic
+ dpdk_utils.load_modules
+ # Sets up the Kubernetes master
+ k8s_utils.prepare "--pod-network-cidr=192.168.0.0/16"
+ # Apply resources
+ k8s_utils.calico_deploy ${cfg[K8S_CALICO]}
+ # Dump Kubernetes objects ...
+ k8s_utils.dump_all
+ ;;
+ affinity_non_vpp)
+ # Set affinity for all non VPP docker containers to CPU 0
+ k8s_utils.affinity_non_vpp
+ ;;
+ destroy)
+ # Revert any changes made to this host by 'kubeadm init'
+ k8s_utils.destroy
+ ;;
+ *)
+ echo "usage: $0 function"
+ echo "function:"
+ echo " prepare"
+ echo " deploy_calico"
+ echo " affinity_non_vpp"
+ echo " destroy"
+ exit 1
+esac
+shift
-# Make cgroup non-exclusive for CPU and MEM
-sudo cgset -r cpuset.cpu_exclusive=0 /kubepods
-sudo cgset -r cpuset.mem_exclusive=0 /kubepods
-
-rm -rf $HOME/.kube
-mkdir -p $HOME/.kube
-sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-# Apply resources
-kubectl apply -f ${K8S_CALICO} || \
- { echo "Failed to apply Calico resources"; exit 1; }
-kubectl apply -f ${K8S_CSIT} || \
- { echo "Failed to apply CSIT resource"; exit 1; }
-
-# Update the taints
-kubectl taint nodes --all node-role.kubernetes.io/master- || \
- { echo "Failed to taint nodes"; exit 1; }
-
-# Dump Kubernetes objects ...
-kubectl get all --all-namespaces
-
-echo Kubernetes is ready
+echo Kubernetes setup finished
diff --git a/resources/libraries/bash/shell/dpdk_utils.sh b/resources/libraries/bash/shell/dpdk_utils.sh
new file mode 100644
index 0000000000..9f81aaabce
--- /dev/null
+++ b/resources/libraries/bash/shell/dpdk_utils.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function dpdk_utils.dpdk_delete {
+ # Deletes the DPDK directory
+ # DPDK install directory
+ dpdk_install_dir=$1
+ # DPDK install version
+ dpdk_install_ver=$2
+
+ [ -d ${dpdk_install_dir}/${dpdk_install_ver} ] && \
+ sudo rm -r ${dpdk_install_dir}/${dpdk_install_ver} && \
+ echo "${dpdk_install_dir}/${dpdk_install_ver} removed"
+}
+
+function dpdk_utils.dpdk_install {
+ # Downloads and installs DPDK
+ # DPDK install directory
+ dpdk_install_dir=$1
+ # DPDK install version
+ dpdk_install_ver=$2
+ # DPDK compile target
+ dpdk_target=x86_64-native-linuxapp-gcc
+ # Force install (if true then remove previous installation; default false)
+ force_install=${3:-false}
+
+ if [ "$force_install" = true ]; then
+ # Cleanup DPDK DIR
+ dpdk_utils.dpdk_delete ${dpdk_install_dir} ${dpdk_install_ver}
+ else
+ # Test if DPDK was installed previously
+ test -d ${dpdk_install_dir}/${dpdk_install_ver} && \
+ { echo "DPDK ${dpdk_install_ver} ready"; exit 0; }
+ fi
+
+ # Download the DPDK package if no local copy exists
+ if [ ! -f ${dpdk_install_dir}/${dpdk_install_ver}.tar.xz ]; then
+ sudo wget -e use_proxy=yes -P ${dpdk_install_dir} -q \
+ fast.dpdk.org/rel/${dpdk_install_ver}.tar.xz || \
+ { echo "Failed to download ${dpdk_install_ver}"; exit 1; }
+ fi
+
+ # Create DPDK install dir if not exists and extract
+ sudo mkdir -p ${dpdk_install_dir} || \
+ { echo "Failed to create ${dpdk_install_dir}"; exit 1; }
+ sudo tar -xJf ${dpdk_install_dir}/${dpdk_install_ver}.tar.xz \
+ -C ${dpdk_install_dir} || \
+ { echo "Failed to extract ${dpdk_install_ver}.tar.xz"; exit 1; }
+
+ cd ${dpdk_install_dir}/${dpdk_install_ver}
+
+ # Compile and install the DPDK
+ sudo make install T=${dpdk_target} -j DESTDIR=install || \
+ { echo "Installation of ${dpdk_install_ver} failed"; exit 1; }
+
+ echo "DPDK ${dpdk_install_ver} ready"
+}
+
+function dpdk_utils.load_modules {
+ # Loads kernel modules and bind interfaces to drivers
+ # Use igb_uio [true|false]
+ use_igb_uio=${1:-false}
+ # DPDK install directory
+ dpdk_install_dir=$2
+ # DPDK install version
+ dpdk_install_ver=$3
+
+ sudo modprobe uio
+ sudo modprobe uio_pci_generic
+
+ if [ "${use_igb_uio}" = true ]; then
+ sudo rmmod igb_uio
+ # Try to insert IGB_UIO module
+ sudo insmod ${dpdk_install_dir}/${dpdk_install_ver}/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+ # If failed then download/compile DPDK
+ if [ $? -ne 0 ]; then
+ dpdk_utils.dpdk_install ${dpdk_install_dir} ${dpdk_install_ver} true
+ sudo insmod ${dpdk_install_dir}/${dpdk_install_ver}/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+ fi
+ fi
+}
diff --git a/resources/libraries/bash/shell/k8s_utils.sh b/resources/libraries/bash/shell/k8s_utils.sh
new file mode 100644
index 0000000000..55d3d815c9
--- /dev/null
+++ b/resources/libraries/bash/shell/k8s_utils.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function k8s_utils.destroy {
+ # Destroy existing Kubernetes deployment
+ kubectl drain $HOSTNAME --delete-local-data --force --ignore-daemonsets
+ kubectl delete node $HOSTNAME
+
+ # Revert any changes made to this host by 'kubeadm init' or 'kubeadm join'
+ sudo kubeadm reset && sudo rm -rf $HOME/.kube || \
+ { echo "Failed to reset kubeadm"; exit 1; }
+}
+
+function k8s_utils.prepare {
+ # Sets up the Kubernetes master
+
+ # Disable swap
+ sudo swapoff --all
+
+ # Set up the Kubernetes master
+ sudo -E kubeadm init --token-ttl 0 ${1} || \
+ { echo "Failed to init kubeadm"; exit 1; }
+
+ # Make cgroup non-exclusive for CPU and MEM
+ sudo cgset -r cpuset.cpu_exclusive=0 /kubepods
+ sudo cgset -r cpuset.mem_exclusive=0 /kubepods
+
+ rm -rf $HOME/.kube
+ mkdir -p $HOME/.kube
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+}
+
+function k8s_utils.taint {
+ # Updates the taints
+ kubectl taint nodes --all node-role.kubernetes.io/master- || \
+ { echo "Failed to taint nodes"; exit 1; }
+}
+
+function k8s_utils.calico_deploy {
+ # Calico yaml URL or file
+ k8s_calico=$1
+
+ # Apply resources
+ kubectl apply -f ${k8s_calico} || \
+ { echo "Failed to apply ${k8s_calico}"; exit 1; }
+
+ # Update the taints
+ k8s_utils.taint
+}
+
+function k8s_utils.contiv_vpp_deploy {
+ # Contiv yaml URL or file
+ k8s_contiv=$1
+ k8s_contiv_patch="kubecon.contiv-vpp-yaml-patch.diff"
+
+ # Pull the most recent Docker images
+ bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/pull-images.sh)
+
+ # Apply resources
+ wget ${k8s_contiv}
+ patch contiv-vpp.yaml -i ${k8s_contiv_patch} -o - | kubectl apply -f - || \
+ { echo "Failed to apply Contiv resources"; exit 1; }
+ rm contiv-vpp.yaml
+
+ # Update the taints
+ k8s_utils.taint
+}
+
+function k8s_utils.cri_shim_install {
+ # Install the CRI Shim on host
+ sudo su root -c 'bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh)'
+}
+
+function k8s_utils.cri_shim_uninstall {
+ # Uninstall the CRI Shim on host
+ sudo su root -c 'bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/cri-install.sh) --uninstall'
+}
+
+function k8s_utils.kube_proxy_install {
+ # Installing custom version of Kube-Proxy to enable Kubernetes services
+ bash <(curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/proxy-install.sh)
+}
+
+function k8s_utils.apply {
+ # Resource yaml URL or file
+ k8s_resource=$1
+
+ # Apply resources
+ kubectl apply -f ${k8s_resource} || \
+ { echo "Failed to apply ${k8s_resource}"; exit 1; }
+}
+
+function k8s_utils.resource_delete {
+ # Resource yaml URL or file
+ k8s_resource=$1
+
+ # Delete resources
+ kubectl delete -f ${k8s_resource} || \
+ { echo "Failed to delete ${k8s_resource}"; exit 1; }
+}
+
+function k8s_utils.affinity_non_vpp {
+ # Set affinity for all non VPP docker containers to CPU 0
+ for i in `sudo docker ps --format "{{.ID}} {{.Names}}" | grep -v vpp | cut -d' ' -f1`; do
+ sudo docker update --cpuset-cpus 0 ${i}
+ done
+}
+
+function k8s_utils.dump_all {
+ # Dumps the kubernetes objects
+ kubectl get all --all-namespaces
+ kubectl describe nodes
+}
diff --git a/resources/libraries/bash/shell/qemu_utils.sh b/resources/libraries/bash/shell/qemu_utils.sh
new file mode 100644
index 0000000000..510d9f2838
--- /dev/null
+++ b/resources/libraries/bash/shell/qemu_utils.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function qemu_utils.qemu_delete {
+ # Deletes the QEMU directory
+ # QEMU install directory
+ qemu_install_dir=$1
+ # QEMU install version
+ qemu_install_ver=$2
+
+ [ -d ${qemu_install_dir}/${qemu_install_ver} ] && \
+ sudo rm -r ${qemu_install_dir}/${qemu_install_ver} && \
+ echo "${qemu_install_dir}/${qemu_install_ver} removed"
+}
+
+function qemu_utils.qemu_install {
+ # Downloads and installs QEMU
+ # QEMU install directory
+ qemu_install_dir=$1
+ # QEMU install version
+ qemu_install_ver=$2
+ # QEMU patch
+ qemu_patch=$3
+ # Force install (if true then remove previous installation; default false)
+ force_install=${4:-false}
+ # QEMU repo URL
+ qemu_package_url="http://download.qemu-project.org/${qemu_install_ver}.tar.xz"
+
+ if [ $force_install ]; then
+ # Cleanup QEMU dir
+ qemu_utils.qemu_delete $qemu_install_dir $qemu_install_ver
+ else
+ # Test if QEMU was installed previously
+ test -d $qemu_install_dir && \
+ { echo "Qemu already installed: $qemu_install_dir"; exit 0; }
+ fi
+
+ tmp_dir=$(mktemp -d) || \
+ { echo "Failed to create temporary working dir"; exit 1; }
+ trap "rm -r ${tmp_dir}" EXIT
+
+ # Download QEMU source code if no local copy exists
+ if [ ! -f /opt/${qemu_install_ver}.tar.xz ]; then
+ sudo wget -e use_proxy=yes -P /opt -q ${qemu_package_url} || \
+ { echo "Failed to download ${qemu_install_ver}"; exit 1; }
+ fi
+ tar --strip-components 1 -xvJf ${tmp_dir}/${qemu_install_ver}.tar.xz -C ${tmp_dir} && \
+ { echo "Failed to exctract ${qemu_install_ver}.tar.xz"; exit 1; }
+
+ cd ${tmp_dir}
+ sudo mkdir -p ${qemu_install_dir} || \
+ { echo "Failed to create ${qemu_install_dir}"; exit 1; }
+
+ # Apply additional patches
+ if [ $qemu_patch ]
+ then
+ chmod +x ${SCRIPT_DIR}/qemu_patches/${qemu_install_ver}/*
+ run-parts --verbose --report ${SCRIPT_DIR}/qemu_patches/${qemu_install_ver}
+ fi
+
+ # Build
+ sudo ./configure --target-list=x86_64-softmmu --prefix=${qemu_install_dir}/${qemu_install_ver} || \
+ { echo "Failed to configure ${qemu_install_ver}"; exit 1; }
+ sudo make -j`nproc` || \
+ { echo "Failed to compile ${qemu_install_ver}"; exit 1; }
+ sudo make install || \
+ { echo "Failed to install ${qemu_install_ver}"; exit 1; }
+
+ echo "QEMU ${qemu_install_ver} ready"
+} \ No newline at end of file
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
index bcbb7f5dff..ceeab0b07e 100644
--- a/resources/libraries/python/KubernetesUtils.py
+++ b/resources/libraries/python/KubernetesUtils.py
@@ -13,8 +13,7 @@
"""Library to control Kubernetes kubectl."""
-import time
-import yaml
+from time import sleep
from resources.libraries.python.constants import Constants
from resources.libraries.python.topology import NodeType
@@ -24,6 +23,8 @@ from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
__all__ = ["KubernetesUtils"]
+# Maximum number of retries to check if PODs are running or deleted.
+MAX_RETRY = 48
class KubernetesUtils(object):
"""Kubernetes utilities class."""
@@ -43,9 +44,10 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh '.format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
- (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+ cmd = '{dir}/{lib}/k8s_setup.sh deploy_calico'\
+ .format(dir=Constants.REMOTE_FW_DIR,
+ lib=Constants.RESOURCES_LIB_SH)
+ (ret_code, _, _) = ssh.exec_command(cmd, timeout=240)
if int(ret_code) != 0:
raise RuntimeError('Failed to setup Kubernetes on {node}.'
.format(node=node['host']))
@@ -65,6 +67,36 @@ class KubernetesUtils(object):
KubernetesUtils.setup_kubernetes_on_node(node)
@staticmethod
+ def destroy_kubernetes_on_node(node):
+ """Destroy Kubernetes on node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If destroying Kubernetes failed.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = '{dir}/{lib}/k8s_setup.sh destroy'\
+ .format(dir=Constants.REMOTE_FW_DIR,
+ lib=Constants.RESOURCES_LIB_SH)
+ (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to destroy Kubernetes on {node}.'
+ .format(node=node['host']))
+
+ @staticmethod
+ def destroy_kubernetes_on_all_duts(nodes):
+ """Destroy Kubernetes on all DUTs.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.destroy_kubernetes_on_node(node)
+
+ @staticmethod
def apply_kubernetes_resource_on_node(node, yaml_file, **kwargs):
"""Apply Kubernetes resource on node.
@@ -79,18 +111,15 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- stream = file('{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
- yaml=yaml_file), 'r')
-
- for data in yaml.load_all(stream):
+ fqn_file = '{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
+ yaml=yaml_file)
+ with open(fqn_file, 'r') as src_file:
+ stream = src_file.read()
data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(),
- yaml.dump(data, default_flow_style=False))
- # Workaround to avoid using RAW string anotated with | in YAML as
- # library + bash is misinterpreting spaces.
- data = data.replace('.conf:\n', '.conf: |\n')
+ stream)
cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format(
data=data)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
raise RuntimeError('Failed to apply Kubernetes template {yaml} '
'on {node}.'.format(yaml=yaml_file,
@@ -114,141 +143,169 @@ class KubernetesUtils(object):
**kwargs)
@staticmethod
- def create_kubernetes_cm_from_file_on_node(node, name, key, src_file):
+ def create_kubernetes_cm_from_file_on_node(node, nspace, name, **kwargs):
"""Create Kubernetes ConfigMap from file on node.
:param node: DUT node.
+ :param nspace: Kubernetes namespace.
:param name: ConfigMap name.
- :param key: Key (destination file).
- :param src_file: Source file.
+ :param kwargs: Named parameters.
:type node: dict
+ :type nspace: str
:type name: str
- :type key: str
- :type src_file: str
+ :param kwargs: dict
:raises RuntimeError: If creating Kubernetes ConfigMap failed.
"""
ssh = SSH()
ssh.connect(node)
- cmd = 'kubectl create -n csit configmap {name} --from-file={key}='\
- '{src_file}'.format(name=name, key=key, src_file=src_file)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+
+ from_file = '{0}'.format(' '.join('--from-file={0}={1} '\
+ .format(key, kwargs[key]) for key in kwargs))
+
+ cmd = 'kubectl create {nspace} configmap {name} {from_file}'\
+ .format(nspace=nspace, name=name, from_file=from_file)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to create Kubernetes ConfigMap {name} '
- 'on {node}.'.format(name=name,
- node=node['host']))
+ raise RuntimeError('Failed to create Kubernetes ConfigMap '
+ 'on {node}.'.format(node=node['host']))
@staticmethod
- def create_kubernetes_cm_from_file_on_all_duts(nodes, name, key, src_file):
+ def create_kubernetes_cm_from_file_on_all_duts(nodes, nspace, name,
+ **kwargs):
"""Create Kubernetes ConfigMap from file on all DUTs.
:param nodes: Topology nodes.
+ :param nspace: Kubernetes namespace.
:param name: ConfigMap name.
- :param key: Key (destination file).
- :param src_file: Source file.
+ :param kwargs: Named parameters.
:type nodes: dict
+ :type nspace: str
:type name: str
- :type key: str
- :type src_file: str
+ :param kwargs: dict
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
KubernetesUtils.create_kubernetes_cm_from_file_on_node(node,
+ nspace,
name,
- key,
- src_file)
+ **kwargs)
@staticmethod
- def delete_kubernetes_resource_on_node(node, rtype='po,cm', name=None):
+ def delete_kubernetes_resource_on_node(node, nspace, name=None,
+ rtype='po,cm,deploy,rs,rc,svc'):
"""Delete Kubernetes resource on node.
:param node: DUT node.
+ :param nspace: Kubernetes namespace.
:param rtype: Kubernetes resource type.
- :param name: Name of resource.
+ :param name: Name of resource (Default: all).
:type node: dict
+ :type nspace: str
:type rtype: str
:type name: str
- :raises RuntimeError: If deleting Kubernetes resource failed.
+ :raises RuntimeError: If retrieving or deleting Kubernetes resource
+ failed.
"""
ssh = SSH()
ssh.connect(node)
name = '{name}'.format(name=name) if name else '--all'
+ nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
- cmd = 'kubectl delete -n csit {rtype} {name}'\
- .format(rtype=rtype, name=name)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ cmd = 'kubectl delete {nspace} {rtype} {name}'\
+ .format(nspace=nspace, rtype=rtype, name=name)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
- 'namespace on {node}.'.format(node=node['host']))
+ raise RuntimeError('Failed to delete Kubernetes resources '
+ 'on {node}.'.format(node=node['host']))
- cmd = 'kubectl get -n csit pods --no-headers'
- for _ in range(24):
- (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
- if int(ret_code) == 0:
- ready = True
+ cmd = 'kubectl get {nspace} pods -a --no-headers'\
+ .format(nspace=nspace)
+ for _ in range(MAX_RETRY):
+ (ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd)
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to retrieve Kubernetes resources on '
+ '{node}.'.format(node=node['host']))
+ if name == '--all':
+ ready = False
+ for line in stderr.splitlines():
+ if 'No resources found.' in line:
+ ready = True
+ if ready:
+ break
+ else:
+ ready = False
for line in stdout.splitlines():
- if 'No resources found.' not in line:
+ try:
+ state = line.split()[1].split('/')
+ ready = True if 'Running' in line and\
+ state == state[::-1] else False
+ if not ready:
+ break
+ except (ValueError, IndexError):
ready = False
if ready:
break
- time.sleep(5)
+ sleep(5)
else:
- raise RuntimeError('Failed to delete Kubernetes resources in CSIT '
- 'namespace on {node}.'.format(node=node['host']))
+ raise RuntimeError('Failed to delete Kubernetes resources on '
+ '{node}.'.format(node=node['host']))
@staticmethod
- def delete_kubernetes_resource_on_all_duts(nodes, rtype='po,cm', name=None):
+ def delete_kubernetes_resource_on_all_duts(nodes, nspace, name=None,
+ rtype='po,cm,deploy,rs,rc,svc'):
"""Delete all Kubernetes resource on all DUTs.
:param nodes: Topology nodes.
+ :param nspace: Kubernetes namespace.
:param rtype: Kubernetes resource type.
:param name: Name of resource.
:type nodes: dict
+ :type nspace: str
:type rtype: str
:type name: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
- KubernetesUtils.delete_kubernetes_resource_on_node(node, rtype,
- name)
+ KubernetesUtils.delete_kubernetes_resource_on_node(node, nspace,
+ name, rtype)
@staticmethod
- def describe_kubernetes_resource_on_node(node, rtype='po,cm'):
- """Describe Kubernetes resource on node.
+ def describe_kubernetes_resource_on_node(node, nspace):
+ """Describe all Kubernetes PODs in namespace on node.
:param node: DUT node.
- :param rtype: Kubernetes resource type.
+ :param nspace: Kubernetes namespace.
:type node: dict
- :type rtype: str
- :raises RuntimeError: If describing Kubernetes resource failed.
+ :type nspace: str
"""
ssh = SSH()
ssh.connect(node)
- cmd = 'kubectl describe -n csit {rtype}'.format(rtype=rtype)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
- if int(ret_code) != 0:
- raise RuntimeError('Failed to describe Kubernetes resource on '
- '{node}.'.format(node=node['host']))
+ nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+
+ cmd = 'kubectl describe {nspace} all'.format(nspace=nspace)
+ ssh.exec_command_sudo(cmd)
@staticmethod
- def describe_kubernetes_resource_on_all_duts(nodes, rtype='po,cm'):
- """Describe Kubernetes resource on all DUTs.
+ def describe_kubernetes_resource_on_all_duts(nodes, nspace):
+ """Describe all Kubernetes PODs in namespace on all DUTs.
:param nodes: Topology nodes.
- :param rtype: Kubernetes resource type.
+ :param nspace: Kubernetes namespace.
:type nodes: dict
- :type rtype: str
+ :type nspace: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
KubernetesUtils.describe_kubernetes_resource_on_node(node,
- rtype)
+ nspace)
@staticmethod
- def get_kubernetes_logs_on_node(node, nspace='csit'):
- """Get Kubernetes logs on node.
+ def get_kubernetes_logs_on_node(node, nspace):
+ """Get Kubernetes logs from all PODs in namespace on node.
:param node: DUT node.
:param nspace: Kubernetes namespace.
@@ -258,14 +315,16 @@ class KubernetesUtils(object):
ssh = SSH()
ssh.connect(node)
- cmd = "for p in $(kubectl get pods -n {namespace} --no-headers"\
- " | cut -f 1 -d ' '); do echo $p; kubectl logs -n {namespace} $p; "\
- "done".format(namespace=nspace)
- ssh.exec_command(cmd, timeout=120)
+ nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+
+ cmd = "for p in $(kubectl get pods {nspace} -o jsonpath="\
+ "'{{.items[*].metadata.name}}'); do echo $p; kubectl logs "\
+ "{nspace} $p; done".format(nspace=nspace)
+ ssh.exec_command_sudo(cmd)
@staticmethod
- def get_kubernetes_logs_on_all_duts(nodes, nspace='csit'):
- """Get Kubernetes logs on all DUTs.
+ def get_kubernetes_logs_on_all_duts(nodes, nspace):
+ """Get Kubernetes logs from all PODs in namespace on all DUTs.
:param nodes: Topology nodes.
:param nspace: Kubernetes namespace.
@@ -277,71 +336,46 @@ class KubernetesUtils(object):
KubernetesUtils.get_kubernetes_logs_on_node(node, nspace)
@staticmethod
- def reset_kubernetes_on_node(node):
- """Reset Kubernetes on node.
-
- :param node: DUT node.
- :type node: dict
- :raises RuntimeError: If resetting Kubernetes failed.
- """
- ssh = SSH()
- ssh.connect(node)
-
- cmd = 'kubeadm reset && rm -rf $HOME/.kube'
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
- if int(ret_code) != 0:
- raise RuntimeError('Failed to reset Kubernetes on {node}.'
- .format(node=node['host']))
-
- @staticmethod
- def reset_kubernetes_on_all_duts(nodes):
- """Reset Kubernetes on all DUTs.
-
- :param nodes: Topology nodes.
- :type nodes: dict
- """
- for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.reset_kubernetes_on_node(node)
-
- @staticmethod
- def wait_for_kubernetes_pods_on_node(node, nspace='csit'):
- """Wait for Kubernetes PODs to become in 'Running' state on node.
+ def wait_for_kubernetes_pods_on_node(node, nspace):
+ """Wait for Kubernetes PODs to become ready on node.
:param node: DUT node.
:param nspace: Kubernetes namespace.
:type node: dict
:type nspace: str
- :raises RuntimeError: If Kubernetes PODs are not ready.
+ :raises RuntimeError: If Kubernetes PODs are not in Running state.
"""
ssh = SSH()
ssh.connect(node)
- cmd = 'kubectl get -n {namespace} pods --no-headers'\
- .format(namespace=nspace)
- for _ in range(48):
- (ret_code, stdout, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ nspace = '-n {nspace}'.format(nspace=nspace) if nspace \
+ else '--all-namespaces'
+
+ cmd = 'kubectl get {nspace} pods -a --no-headers' \
+ .format(nspace=nspace)
+ for _ in range(MAX_RETRY):
+ (ret_code, stdout, _) = ssh.exec_command_sudo(cmd)
if int(ret_code) == 0:
ready = False
for line in stdout.splitlines():
try:
state = line.split()[1].split('/')
- ready = True if 'Running' in line and\
+ ready = True if 'Running' in line and \
state == state[::-1] else False
if not ready:
break
- except ValueError, IndexError:
+ except (ValueError, IndexError):
ready = False
if ready:
break
- time.sleep(5)
+ sleep(5)
else:
- raise RuntimeError('Kubernetes PODs are not ready on {node}.'
+ raise RuntimeError('Kubernetes PODs are not running on {node}.'
.format(node=node['host']))
@staticmethod
- def wait_for_kubernetes_pods_on_all_duts(nodes, nspace='csit'):
- """Wait for Kubernetes PODs to become in Running state on all DUTs.
+ def wait_for_kubernetes_pods_on_all_duts(nodes, nspace):
+ """Wait for Kubernetes to become ready on all DUTs.
:param nodes: Topology nodes.
:param nspace: Kubernetes namespace.
@@ -353,6 +387,32 @@ class KubernetesUtils(object):
KubernetesUtils.wait_for_kubernetes_pods_on_node(node, nspace)
@staticmethod
+ def set_kubernetes_pods_affinity_on_node(node):
+ """Set affinity for all Kubernetes PODs except VPP on node.
+
+ :param node: DUT node.
+ :type node: dict
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = '{dir}/{lib}/k8s_setup.sh affinity_non_vpp'\
+ .format(dir=Constants.REMOTE_FW_DIR,
+ lib=Constants.RESOURCES_LIB_SH)
+ ssh.exec_command(cmd)
+
+ @staticmethod
+ def set_kubernetes_pods_affinity_on_all_duts(nodes):
+ """Set affinity for all Kubernetes PODs except VPP on all DUTs.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.set_kubernetes_pods_affinity_on_node(node)
+
+ @staticmethod
def create_kubernetes_vswitch_startup_config(**kwargs):
"""Create Kubernetes VSWITCH startup configuration.
@@ -394,21 +454,27 @@ class KubernetesUtils(object):
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
- skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * kwargs['cpu_cnt']
+ skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \
+ (kwargs['cpu_cnt'] - 1)
cpuset_cpus = \
CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
cpu_node=kwargs['cpu_node'],
skip_cnt=skip_cnt,
- cpu_cnt=kwargs['cpu_cnt'],
+ cpu_cnt=kwargs['cpu_cnt']-1,
+ smt_used=kwargs['smt_used'])
+ cpuset_main = \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpu_node'],
+ skip_cnt=1,
+ cpu_cnt=1,
smt_used=kwargs['smt_used'])
-
# Create config instance
vpp_config = VppConfigGenerator()
vpp_config.set_node(kwargs['node'])
vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
vpp_config.add_unix_nodaemon()
# We will pop first core from list to be main core
- vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
diff --git a/resources/libraries/robot/performance/performance_setup.robot b/resources/libraries/robot/performance/performance_setup.robot
index 68f84b664f..0453fad275 100644
--- a/resources/libraries/robot/performance/performance_setup.robot
+++ b/resources/libraries/robot/performance/performance_setup.robot
@@ -425,6 +425,22 @@
| | Reset VAT History On All DUTs | ${nodes}
| | Create base startup configuration of VPP on all DUTs
+| Set up performance test with Ligato Kubernetes
+| | [Documentation] | Common test setup for performance tests with Ligato \
+| | ... | Kubernetes.
+| | ...
+| | Apply Kubernetes resource on all duts | ${nodes} | namespaces/csit.yaml
+| | Apply Kubernetes resource on all duts | ${nodes} | pods/kafka.yaml
+| | Apply Kubernetes resource on all duts | ${nodes} | pods/etcdv3.yaml
+| | Apply Kubernetes resource on all duts | ${nodes}
+| | ... | configmaps/vswitch-agent-cfg.yaml
+| | Apply Kubernetes resource on all duts | ${nodes}
+| | ... | configmaps/vnf-agent-cfg.yaml
+| | Apply Kubernetes resource on all duts | ${nodes}
+| | ... | pods/contiv-sfc-controller.yaml
+| | Apply Kubernetes resource on all duts | ${nodes}
+| | ... | pods/contiv-vswitch.yaml
+
# Tests teardowns
| Tear down performance discovery test
@@ -613,3 +629,13 @@
| | ... | Vpp Log Macip Acl Settings | ${dut1}
| | Run Keyword And Ignore Error
| | ... | Vpp Log Macip Acl Interface Assignment | ${dut1}
+
+| Tear down performance test with Ligato Kubernetes
+| | [Documentation] | Common test teardown for ndrdisc and pdrdisc performance \
+| | ... | tests with Ligato Kubernetes.
+| | ...
+| | Run Keyword If Test Failed
+| | ... | Get Kubernetes logs on all DUTs | ${nodes} | csit
+| | Run Keyword If Test Failed
+| | ... | Describe Kubernetes resource on all DUTs | ${nodes} | csit
+| | Delete Kubernetes resource on all DUTs | ${nodes} | csit \ No newline at end of file