aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2017-05-30 14:10:14 +0200
committerPeter Mikus <pmikus@cisco.com>2017-06-15 11:48:15 +0000
commit1ce01dad25e40fbf4144efc5dcc6771c9bf14d20 (patch)
tree4676a4051c91f6adc4cbd401d13edaeebe476194 /resources
parent01e8dbf3aa23c43c2ffd668c694d31d8af12abe7 (diff)
CSIT-649 Add library for creating lxc container
Add RF library for manipulating lxc container Add python library for manipulating lxc container Change-Id: I02140aa879c7ebd40360d588ab1438c58cf560a1 Signed-off-by: Peter Mikus <pmikus@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r--resources/libraries/python/LXCUtils.py375
-rw-r--r--resources/libraries/python/VppConfigGenerator.py71
-rw-r--r--resources/libraries/python/ssh.py23
-rw-r--r--resources/libraries/robot/lxc.robot253
4 files changed, 720 insertions, 2 deletions
diff --git a/resources/libraries/python/LXCUtils.py b/resources/libraries/python/LXCUtils.py
new file mode 100644
index 0000000000..3f251a0db1
--- /dev/null
+++ b/resources/libraries/python/LXCUtils.py
@@ -0,0 +1,375 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library to manipulate LXC."""
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.topology import NodeType
+
+__all__ = ["LXCUtils"]
+
+class LXCUtils(object):
+ """LXC utilities."""
+
+ def __init__(self, container_name='slave'):
+ # LXC container name
+ self._container_name = container_name
+ self._node = None
+ # Host dir that will be mounted inside LXC
+ self._host_dir = '/tmp/'
+ # Guest dir to mount host dir to
+ self._guest_dir = '/mnt/host'
+ # LXC container env variables
+ self._env_vars = ['LC_ALL="en_US.UTF-8"',
+ 'DEBIAN_FRONTEND=noninteractive']
+
+ def set_node(self, node):
+ """Set node for LXC execution.
+
+ :param node: Node to execute LXC on.
+ :type node: dict
+ :raises RuntimeError: If Node type is not DUT.
+ """
+ if node['type'] != NodeType.DUT:
+ raise RuntimeError('Node type is not DUT.')
+ self._node = node
+
+ def set_host_dir(self, node, host_dir):
+ """Set shared dir on parent node for LXC.
+
+ :param node: Node to control LXC on.
+ :type node: dict
+ :raises RuntimeError: If Node type is not DUT.
+ """
+ if node['type'] != NodeType.DUT:
+ raise RuntimeError('Node type is not DUT.')
+ self._host_dir = host_dir
+
+ def set_guest_dir(self, node, guest_dir):
+ """Set mount dir on LXC.
+
+ :param node: Node to control LXC on.
+ :param guest_dir: Guest dir for mount.
+ :type node: dict
+ :type guest_dir: str
+ :raises RuntimeError: If Node type is not DUT.
+ """
+ if node['type'] != NodeType.DUT:
+ raise RuntimeError('Node type is not DUT.')
+ self._guest_dir = guest_dir
+
+ def _lxc_checkconfig(self):
+ """Check the current kernel for LXC support.
+
+ :raises RuntimeError: If failed to check LXC support.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo('lxc-checkconfig')
+ if int(ret) != 0:
+ raise RuntimeError('Failed to check LXC support.')
+
+ def _lxc_create(self, distro='ubuntu', release='xenial', arch='amd64'):
+ """Creates a privileged system object where is stored the configuration
+ information and where can be stored user information.
+
+ :param distro: Linux distribution name.
+ :param release: Linux distribution release.
+ :param arch: Linux distribution architecture.
+ :type distro: str
+ :type release: str
+ :type arch: str
+ :raises RuntimeError: If failed to create a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-create -t download --name {0} -- -d {1} -r {2} -a {3}'
+ .format(self._container_name, distro, release, arch), timeout=1800)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to create LXC container.')
+
+ def _lxc_info(self):
+ """Queries and shows information about a container.
+
+ :raises RuntimeError: If failed to get info about a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-info --name {0}'.format(self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about LXC container {0}.'
+ .format(self._container_name))
+
+ def _lxc_start(self):
+ """Start an application inside a container.
+
+ :raises RuntimeError: If failed to start container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-start --name {0} --daemon'.format(self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to start LXC container {0}.'
+ .format(self._container_name))
+
+ def _lxc_stop(self):
+ """Stop an application inside a container.
+
+ :raises RuntimeError: If failed to stop container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-stop --name {0}'.format(self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to stop LXC container {}.'
+ .format(self._container_name))
+
+ def _lxc_destroy(self):
+ """Destroy a container.
+
+ :raises RuntimeError: If failed to destroy container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-destroy --force --name {0}'.format(self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to destroy LXC container {}.'
+ .format(self._container_name))
+
+ def _lxc_wait(self, state):
+ """Wait for a specific container state.
+
+ :param state: Specify the container state(s) to wait for.
+ :type state: str
+ :raises RuntimeError: If failed to wait for state of a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-wait --name {0} --state "{1}"'
+ .format(self._container_name, state))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to wait for "{0}" of LXC container {1}.'
+ .format(state, self._container_name))
+
+ def _lxc_cgroup(self, state_object, value=''):
+ """Manage the control group associated with a container.
+
+ :param state_object: Specify the state object name.
+ :param value: Specify the value to assign to the state object. If empty,
+ then action is GET, otherwise is action SET.
+ :type state_object: str
+ :type value: str
+ :raises RuntimeError: If failed to get/set for state of a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-cgroup --name {0} {1} {2}'
+ .format(self._container_name, state_object, value))
+ if int(ret) != 0:
+ if value:
+ raise RuntimeError('Failed to set {0} of LXC container {1}.'
+ .format(state_object, self._container_name))
+ else:
+ raise RuntimeError('Failed to get {0} of LXC container {1}.'
+ .format(state_object, self._container_name))
+
+ def lxc_attach(self, command):
+ """Start a process inside a running container. Runs the specified
+ command inside the container specified by name. The container has to
+ be running already.
+
+ :param command: Command to run inside container.
+ :type command: str
+ :raises RuntimeError: If container is not running.
+ :raises RuntimeError: If failed to run the command.
+ """
+ env_var = '--keep-env {0}'\
+ .format(' '.join('--set-var %s' % var for var in self._env_vars))
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ if not self.is_container_running():
+ raise RuntimeError('LXC {0} is not running.'
+ .format(self._container_name))
+
+ ret, _, _ = ssh.exec_command_lxc(lxc_cmd=command,
+ lxc_name=self._container_name,
+ lxc_params=env_var, timeout=180)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to run "{0}" on LXC container {1}.'
+ .format(command, self._container_name))
+
+ def is_container_present(self):
+ """Check if LXC container is existing on node."""
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo(
+ 'lxc-info --name {0}'.format(self._container_name))
+ return False if int(ret) else True
+
+ def create_container(self, force_create=True):
+ """Create and start a container.
+
+ :param force_create: Destroy a container if exists and create.
+ :type force_create: bool
+ """
+ if self.is_container_present():
+ if force_create:
+ self.container_destroy()
+ else:
+ return
+
+ self._lxc_checkconfig()
+ self._lxc_create(distro='ubuntu', release='xenial', arch='amd64')
+ self.start_container()
+
+ def start_container(self):
+ """Start a container and wait for running state."""
+
+ self._lxc_start()
+ self._lxc_wait('RUNNING')
+ self._lxc_info()
+
+ def is_container_running(self):
+ """Check if LXC container is running on node.
+
+ :raises RuntimeError: If failed to get info about a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, stdout, _ = ssh.exec_command_sudo(
+ 'lxc-info --state --name {0}'.format(self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about LXC container {0}.'
+ .format(self._container_name))
+
+ return True if 'RUNNING' in stdout else False
+
+ def stop_container(self):
+ """Stop a container and wait for stopped state."""
+
+ self._lxc_stop()
+ self._lxc_wait('STOPPED|FROZEN')
+ self._lxc_info()
+
+ def restart_container(self):
+ """Restart container."""
+
+ self.stop_container()
+ self.start_container()
+
+ def container_destroy(self):
+ """Stop and destroy a container."""
+
+ self._lxc_destroy()
+
+ def container_cpuset_cpus(self, container_cpu):
+ """Set cpuset.cpus control group associated with a container.
+
+ :param container_cpu: Cpuset.cpus string.
+ :type container_cpu: str
+ :raises RuntimeError: If failed to set cgroup for a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ ret, _, _ = ssh.exec_command_sudo('cgset --copy-from / lxc')
+ if int(ret) != 0:
+ raise RuntimeError('Failed to copy cgroup settings from root.')
+
+ self._lxc_cgroup(state_object='cpuset.cpus')
+ self._lxc_cgroup(state_object='cpuset.cpus', value=container_cpu)
+ self._lxc_cgroup(state_object='cpuset.cpus')
+
+ def mount_host_dir_in_container(self):
+ """Mount shared folder inside container.
+
+ :raises RuntimeError: If failed to mount host dir in a container.
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ self.lxc_attach('mkdir -p {0}'.format(self._guest_dir))
+
+ mnt_cfg = 'lxc.mount.entry = {0} /var/lib/lxc/{1}/rootfs{2} ' \
+ 'none bind 0 0'.format(self._host_dir, self._container_name,
+ self._guest_dir)
+ ret, _, _ = ssh.exec_command_sudo(
+ "sh -c 'echo \"{0}\" >> /var/lib/lxc/{1}/config'"
+ .format(mnt_cfg, self._container_name))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to mount {0} in lxc: {1}'
+ .format(self._host_dir, self._container_name))
+
+ self.restart_container()
+
+ def install_vpp_in_container(self, install_dkms=False):
+ """Install vpp inside a container.
+
+ :param install_dkms: If install dkms package. This will impact install
+ time. Dkms is required for installation of vpp-dpdk-dkms. Default is
+ false.
+ :type install_dkms: bool
+ """
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ self.lxc_attach('apt-get update')
+ if install_dkms:
+ self.lxc_attach('apt-get install -y dkms && '
+ 'dpkg -i --force-all {0}/install_dir/*.deb'
+ .format(self._guest_dir))
+ else:
+ self.lxc_attach('for i in $(ls -I \"*dkms*\" {0}/install_dir/); '
+ 'do dpkg -i --force-all {0}/install_dir/$i; done'
+ .format(self._guest_dir))
+ self.lxc_attach('apt-get -f install -y')
+
+ def restart_vpp_in_container(self):
+ """Restart vpp service inside a container."""
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ self.lxc_attach('service vpp restart')
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index 039f629309..31defd6704 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -123,7 +123,6 @@ class VppConfigGenerator(object):
"""
path = ['unix', 'cli-listen']
self.add_config_item(self._nodeconfig, value, path)
- self._nodeconfig = {'unix': {'nodaemon': ''}}
def add_unix_nodaemon(self):
"""Add UNIX nodaemon configuration."""
@@ -228,7 +227,7 @@ class VppConfigGenerator(object):
def add_api_trace(self):
"""Add API trace configuration."""
path = ['api-trace', 'on']
- self.add_config_item(self._nodeconfig, path, '')
+ self.add_config_item(self._nodeconfig, '', path)
def add_ip6_hash_buckets(self, value):
"""Add IP6 hash buckets configuration.
@@ -248,6 +247,16 @@ class VppConfigGenerator(object):
path = ['ip6', 'heap-size']
self.add_config_item(self._nodeconfig, value, path)
+ def add_plugin_disable(self, *plugins):
+ """Add plugin disable for specific plugin.
+
+ :param plugins: Plugin(s) to disable.
+ :type plugins: list
+ """
+ for plugin in plugins:
+ path = ['plugins', 'plugin {0}'.format(plugin), 'disable']
+ self.add_config_item(self._nodeconfig, ' ', path)
+
def add_dpdk_no_multi_seg(self):
"""Add DPDK no-multi-seg configuration."""
path = ['dpdk', 'no-multi-seg']
@@ -316,3 +325,61 @@ class VppConfigGenerator(object):
else:
raise RuntimeError('VPP failed to restart on node {}'.
format(self._hostname))
+
+ def apply_config_lxc(self, lxc_name, waittime=5, retries=12):
+ """Generate and apply VPP configuration for node in a container.
+
+ Use data from calls to this class to form a startup.conf file and
+ replace /etc/vpp/startup.conf with it on node inside a container.
+
+ :param lxc_name: LXC container name.
+ :param waittime: Time to wait for VPP to restart (default 5 seconds).
+ :param retries: Number of times (default 12) to re-try waiting.
+ :type lxc_name: str
+ :type waittime: int
+ :type retries: int
+ :raises RuntimeError: If writing config file failed, or restarting of
+ VPP failed.
+ """
+ self.dump_config(self._nodeconfig)
+
+ ssh = SSH()
+ ssh.connect(self._node)
+
+ # We're using this "| sudo tee" construct because redirecting
+ # a sudo's output ("sudo echo xxx > /path/to/file") does not
+ # work on most platforms...
+ (ret, _, _) = \
+ ssh.exec_command_lxc('echo "{0}" | sudo tee {1}'.
+ format(self._vpp_config,
+ self._vpp_config_filename), lxc_name)
+
+ if ret != 0:
+ raise RuntimeError('Writing config file failed in {0} to node {1}'.
+ format(lxc_name, self._hostname))
+
+ # Instead of restarting, we'll do separate start and stop
+ # actions. This way we don't care whether VPP was running
+ # to begin with.
+ ssh.exec_command_lxc('service {0} stop'.
+ format(self._vpp_service_name), lxc_name)
+ (ret, _, _) = \
+ ssh.exec_command_lxc('service {0} start'.
+ format(self._vpp_service_name), lxc_name)
+ if ret != 0:
+ raise RuntimeError('Restarting VPP failed in {0} on node {1}'.
+ format(lxc_name, self._hostname))
+
+ # Sleep <waittime> seconds, up to <retry> times,
+ # and verify if VPP is running.
+ for _ in range(retries):
+ time.sleep(waittime)
+ (ret, _, _) = \
+ ssh.exec_command_lxc('echo show hardware-interfaces | '
+ 'nc 0 5002 || echo "VPP not yet running"',
+ lxc_name)
+ if ret == 0:
+ break
+ else:
+ raise RuntimeError('VPP failed to restart in {0} on node {1}'.
+ format(lxc_name, self._hostname))
diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py
index f59bd02e25..db39a0701c 100644
--- a/resources/libraries/python/ssh.py
+++ b/resources/libraries/python/ssh.py
@@ -207,6 +207,29 @@ class SSH(object):
command = 'sudo -S {c} <<< "{i}"'.format(c=cmd, i=cmd_input)
return self.exec_command(command, timeout)
+ def exec_command_lxc(self, lxc_cmd, lxc_name, lxc_params='', sudo=True,
+ timeout=30):
+ """Execute command in LXC on a new SSH channel on the connected Node.
+
+ :param lxc_cmd: Command to be executed.
+ :param lxc_name: LXC name.
+ :param lxc_params: Additional parameters for LXC attach.
+ :param sudo: Run in privileged LXC mode. Default: privileged
+ :param timeout: Timeout.
+ :type lxc_cmd: str
+ :type lxc_name: str
+ :type lxc_params: str
+ :type sudo: bool
+ :type timeout: int
+ :return: return_code, stdout, stderr
+ """
+ command = "lxc-attach {p} --name {n} -- /bin/sh -c '{c}'"\
+ .format(p=lxc_params, n=lxc_name, c=lxc_cmd)
+
+ if sudo:
+ command = 'sudo -S {c}'.format(c=command)
+ return self.exec_command(command, timeout)
+
def interactive_terminal_open(self, time_out=30):
"""Open interactive terminal on a new channel on the connected Node.
diff --git a/resources/libraries/robot/lxc.robot b/resources/libraries/robot/lxc.robot
new file mode 100644
index 0000000000..b4dd1d117d
--- /dev/null
+++ b/resources/libraries/robot/lxc.robot
@@ -0,0 +1,253 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Documentation | Keywords related to linux container (LXC)
+| Library | resources.libraries.python.LXCUtils
+| Library | resources.libraries.python.CpuUtils
+| Library | resources.libraries.python.topology.Topology
+
+*** Keywords ***
+| Create LXC container on DUT node
+| | [Documentation] | Setup lxc container on DUT node.
+| | ...
+| | ... | *Arguments:*
+| | ...
+| | ... | - dut_node - DUT node. Type: dictionary
+| | ... | - lxc_name - Name of LXC container. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create LXC container on DUT node \| ${nodes['DUT1']} \
+| | ... | \| DUT1_slave_1 \|
+| | ...
+| | [Arguments] | ${dut_node} | ${lxc_name}
+| | ...
+| | Import Library | resources.libraries.python.LXCUtils
+| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
+| | Run keyword | ${lxc_name}.Set node | ${dut_node}
+| | Run keyword | ${lxc_name}.Container create | force_create=${TRUE}
+| | Run keyword | ${lxc_name}.Mount host dir in container
+
+| Create LXC container on DUT node with cpuset
+| | [Documentation] | Create LXC container on DUT node with cpuset.
+| | ...
+| | ... | *Arguments:*
+| | ...
+| | ... | - dut_node - DUT node. Type: dictionary
+| | ... | - lxc_name - Name of LXC container. Type: dictionary
+| | ... | - skip - number of cpus which will be skipped. Type: integer
+| | ... | - count - number of cpus which will be allocated for lxc. Type:
+| | ... | integer
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create LXC container on DUT node with cpuset \
+| | ... | \| ${nodes['DUT1']} \| DUT1_slave_1 \| 6 \| 1 \|
+| | ...
+| | [Arguments] | ${dut_node} | ${lxc_name} | ${skip}=${6} | ${count}=${1}
+| | ...
+| | Import Library | resources.libraries.python.LXCUtils
+| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
+| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
+| | ... | ${dut1_if1} | ${dut1_if2}
+| | ${lxc_cpus}= | CPU list per node str | ${dut_node} | ${dut_numa}
+| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${False}
+| | Run keyword | ${lxc_name}.Set node | ${dut_node}
+| | Run keyword | ${lxc_name}.Container create | force_create=${TRUE}
+| | Run keyword | ${lxc_name}.Mount host dir in container
+| | Run keyword | ${lxc_name}.Container cpuset cpus | ${lxc_cpus}
+
+| Create '${nr}' LXC containers on '${dut}' node
+| | [Documentation] | Create and start multiple lxc containers on DUT node.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create 5 LXC containers on DUT1 node \|
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | | Create LXC container on DUT node | ${nodes['${dut}']}
+| | | ... | ${dut}_${lxc_base_name}_${number}
+
+| Create '${nr}' LXC containers on all DUT nodes
+| | [Documentation] | Create and start multiple LXC containers on all DUT nodes.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create 5 LXC containers on all DUT nodes \|
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Create '${nr}' LXC containers on '${dut}' node
+
+| Create '${nr}' LXC containers on '${dut}' node with '${count}' cpus
+| | [Documentation] | Create and start multiple LXC containers on DUT node.
+| | ... | Set the cpuset.cpus cgroup profile for pin of cpus.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create 5 LXC containers on DUT1 node with 2 cpus \|
+| | ...
+| | ${skip_cpus}= | Evaluate | ${vpp_cpus}+${system_cpus}
+| | ${count_int}= | Convert To Integer | ${count}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | | ${skip}= | Evaluate | ${skip_cpus} + (${nr} - 1) * ${count}
+| | | Create LXC container on DUT node with cpuset | ${nodes['${dut}']}
+| | | ... | ${dut}_${lxc_base_name}_${number} | ${skip} | ${count_int}
+
+| Create '${nr}' LXC containers on all DUT nodes with '${count}' cpus
+| | [Documentation] | Create and start multiple LXC containers on all DUT nodes.
+| | ... | Set the cpuset.cpus cgroup profile for pin of cpus.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create 5 LXC containers on all DUT nodes with 2 cpus \|
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Create '${nr}' LXC containers on '${dut}' node with '${count}' cpus
+
+| Destroy LXC container on DUT node
+| | [Documentation] | Stop and destroy LXC container on DUT node.
+| | ...
+| | ... | *Arguments:*
+| | ...
+| | ... | - dut_node - DUT node. Type: dictionary
+| | ... | - lxc_name - Name of LXC container. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Destroy LXC container on DUT node \| ${nodes['DUT1']} \
+| | ... | \| DUT1_slave_1 \|
+| | ...
+| | [Arguments] | ${dut_node} | ${lxc_name}
+| | ...
+| | Import Library | resources.libraries.python.LXCUtils
+| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
+| | Run keyword | ${lxc_name}.Set node | ${dut_node}
+| | Run keyword | ${lxc_name}.Container destroy
+
+| Destroy '${nr}' LXC containers on '${dut}' node
+| | [Documentation] | Stop and destroy multiple LXC containers on DUT node.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Destroy 5 LXC containers on DUT1 node \|
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | | Destroy LXC container on DUT node | ${nodes['${dut}']}
+| | | ... | ${dut}_${lxc_base_name}_${number}
+
+| Destroy '${nr}' LXC containers on all DUT nodes
+| | [Documentation] | Stop and destroy multiple LXC containers on all DUT nodes.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Destroy 5 LXC containers on all DUT nodes \|
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Destroy '${nr}' LXC containers on '${dut}' node
+
+| Install VPP on LXC container on DUT node
+| | [Documentation] | Install vpp on LXC container on DUT node.
+| | ...
+| | ... | *Arguments:*
+| | ...
+| | ... | - dut_node - DUT node. Type: dictionary
+| | ... | - lxc_name - Name of LXC container. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Install VPP on LXC container on DUT node \| ${nodes['DUT1']} \
+| | ... | \| DUT1_slave_1 \|
+| | ...
+| | [Arguments] | ${dut_node} | ${lxc_name}
+| | ...
+| | Import Library | resources.libraries.python.LXCUtils
+| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
+| | Run keyword | ${lxc_name}.Set node | ${dut_node}
+| | Run keyword | ${lxc_name}.Install VPP in container
+
+| Install VPP on '${nr}' LXC containers on '${dut}' node
+| | [Documentation] | Install VPP on multiple LXC containers on DUT node.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Install VPP on 5 LXC containers on DUT1 node \|
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | | Install VPP on LXC container on DUT node | ${nodes['${dut}']}
+| | | ... | ${dut}_${lxc_base_name}_${number}
+
+| Install VPP on '${nr}' LXC containers on all DUT nodes
+| | [Documentation] | Install VPP on multiple LXC containers on all DUT nodes.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Install VPP on 5 LXC containers on all DUT nodes \|
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Install VPP on '${nr}' LXC containers on '${dut}' node
+
+| Create startup configuration of VPP on LXC container on DUT node
+| | [Documentation] | Create base startup configuration of VPP on LXC container
+| | ... | on DUT node.
+| | ...
+| | ... | *Arguments:*
+| | ...
+| | ... | - dut_node - DUT node. Type: dictionary
+| | ... | - lxc_name - Name of LXC container. Type: dictionary
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create startup configuration of VPP on LXC container on DUT node \
+| | ... | \| ${nodes['DUT1']} \| DUT1_slave_1 \|
+| | ...
+| | [Arguments] | ${dut_node} | ${lxc_name}
+| | ...
+| | Import Library | resources.libraries.python.VppConfigGenerator
+| | ... | WITH NAME | ${lxc_name}_conf
+| | Run keyword | ${lxc_name}_conf.Set node | ${dut_node}
+| | Run keyword | ${lxc_name}_conf.Add unix CLI listen
+| | Run keyword | ${lxc_name}_conf.Add unix nodaemon
+| | Run keyword | ${lxc_name}_conf.Add plugin disable | "dpdk_plugin.so"
+| | Run Keyword | ${lxc_name}_conf.Apply config LXC | ${lxc_name}
+
+| Create startup configuration of VPP on '${nr}' LXC containers on '${dut}' node
+| | [Documentation] | Create base startup configuration of VPP on multiple LXC
+| | ... | container on DUT node.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create startup configuration of VPP on 1 LXC containers on DUT1 \
+| | ... | node \|
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | | Create startup configuration of VPP on LXC container on DUT node
+| | | ... | ${nodes['${dut}']} | ${dut}_${lxc_base_name}_${number}
+
+| Create startup configuration of VPP on '${nr}' LXC containers on all DUT nodes
+| | [Documentation] | Create base startup configuration of VPP on multiple LXC
+| | ... | container on all DUT nodes.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Create startup configuration of VPP on 1 LXC containers on all \
+| | ... | DUT nodes \|
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Create startup configuration of VPP on '${nr}' LXC containers on '${dut}' node