aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
Diffstat (limited to 'resources')
-rw-r--r--resources/libraries/python/ContainerUtils.py754
-rw-r--r--resources/libraries/python/VppConfigGenerator.py75
-rw-r--r--resources/libraries/robot/performance/performance_setup.robot27
-rw-r--r--resources/libraries/robot/performance/performance_utils.robot2
-rw-r--r--resources/libraries/robot/shared/container.robot121
-rw-r--r--resources/libraries/robot/shared/lxc.robot256
-rw-r--r--resources/libraries/robot/shared/memif.robot26
-rw-r--r--resources/templates/vat/memif_create_lxc.vat16
8 files changed, 922 insertions, 355 deletions
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
new file mode 100644
index 0000000000..fb2695fe44
--- /dev/null
+++ b/resources/libraries/python/ContainerUtils.py
@@ -0,0 +1,754 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bug workaround in pylint for abstract classes.
+#pylint: disable=W0223
+
+"""Library to manipulate Containers."""
+
+from collections import OrderedDict, Counter
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+
+
+__all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
+
+SUPERVISOR_CONF = '/etc/supervisord.conf'
+
+
+class ContainerManager(object):
+ """Container lifecycle management class."""
+
+ def __init__(self, engine):
+ """Initialize Container Manager class.
+
+ :param engine: Container technology used (LXC/Docker/...).
+ :type engine: str
+ :raises NotImplementedError: If container technology is not implemented.
+ """
+ try:
+ self.engine = globals()[engine]()
+ except KeyError:
+ raise NotImplementedError('{e} is not implemented.'
+ .format(e=engine))
+ self.containers = OrderedDict()
+
+ def get_container_by_name(self, name):
+ """Get container instance.
+
+ :param name: Container name.
+ :type name: str
+ :returns: Container instance.
+ :rtype: Container
+ :raises RuntimeError: If failed to get container with name.
+ """
+ try:
+ return self.containers[name]
+ except KeyError:
+ raise RuntimeError('Failed to get container with name: {n}'
+ .format(n=name))
+
+ def construct_container(self, **kwargs):
+ """Construct container object on node with specified parameters.
+
+ :param kwargs: Key-value pairs used to construct container.
+ :param kwargs: dict
+ """
+ # Create base class
+ self.engine.initialize()
+ # Set parameters
+ for key in kwargs:
+ setattr(self.engine.container, key, kwargs[key])
+
+ # Set additional environmental variables
+ setattr(self.engine.container, 'env',
+ 'MICROSERVICE_LABEL={n}'.format(n=kwargs['name']))
+
+ # Set cpuset.cpus cgroup
+ skip_cnt = kwargs['cpu_skip']
+ if not kwargs['cpu_shared']:
+ skip_cnt += kwargs['i'] * kwargs['cpu_count']
+ self.engine.container.cpuset_cpus = \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpuset_mems'],
+ skip_cnt=skip_cnt,
+ cpu_cnt=kwargs['cpu_count'],
+ smt_used=kwargs['smt_used'])
+
+ # Store container instance
+ self.containers[kwargs['name']] = self.engine.container
+
+ def construct_containers(self, **kwargs):
+ """Construct 1..N container(s) on node with specified name.
+ Ordinal number is automatically added to the name of container as
+ suffix.
+
+ :param kwargs: Name of container.
+ :param kwargs: str
+ """
+ name = kwargs['name']
+ for i in range(kwargs['count']):
+ # Name will contain ordinal suffix
+ kwargs['name'] = ''.join([name, str(i+1)])
+ # Create container
+ self.construct_container(i=i, **kwargs)
+
+ def acquire_all_containers(self):
+ """Acquire all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.acquire()
+
+ def build_all_containers(self):
+ """Build all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.build()
+
+ def create_all_containers(self):
+ """Create all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.create()
+
+ def execute_on_container(self, name, command):
+ """Execute command on container with name.
+
+ :param name: Container name.
+ :param command: Command to execute.
+ :type name: str
+ :type command: str
+ """
+ self.engine.container = self.get_container_by_name(name)
+ self.engine.execute(command)
+
+ def execute_on_all_containers(self, command):
+ """Execute command on all containers.
+
+ :param command: Command to execute.
+ :type command: str
+ """
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.execute(command)
+
+ def install_vpp_in_all_containers(self):
+ """Install VPP into all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ # We need to install supervisor client/server system to control VPP
+ # as a service
+ self.engine.install_supervisor()
+ self.engine.install_vpp()
+ self.engine.restart_vpp()
+
+ def configure_vpp_in_all_containers(self, vat_template_file):
+ """Configure VPP in all containers.
+
+ :param vat_template_file: Template file name of a VAT script.
+ :type vat_template_file: str
+ """
+ # Count number of DUTs based on node's host information
+ dut_cnt = len(Counter([self.containers[container].node['host']
+ for container in self.containers]))
+ container_cnt = len(self.containers)
+ mod = dut_cnt/container_cnt
+
+ for i, container in enumerate(self.containers):
+ self.engine.container = self.containers[container]
+ self.engine.create_vpp_startup_config()
+ self.engine.create_vpp_exec_config(vat_template_file,
+ memif_id1=i % mod * 2 + 1,
+ memif_id2=i % mod * 2 + 2,
+ socket1='memif-{c.name}-1'
+ .format(c=self.engine.container),
+ socket2='memif-{c.name}-2'
+ .format(c=self.engine.container))
+
+ def stop_all_containers(self):
+ """Stop all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.stop()
+
+ def destroy_all_containers(self):
+ """Destroy all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.destroy()
+
+
+class ContainerEngine(object):
+ """Abstract class for container engine."""
+
+ def __init__(self):
+ """Init ContainerEngine object."""
+ self.container = None
+
+ def initialize(self):
+ """Initialize container object."""
+ self.container = Container()
+
+ def acquire(self, force):
+ """Acquire/download container.
+
+ :param force: Destroy a container if exists and create.
+ :type force: bool
+ """
+ raise NotImplementedError
+
+ def build(self):
+ """Build container (compile)."""
+ raise NotImplementedError
+
+ def create(self):
+ """Create/deploy container."""
+ raise NotImplementedError
+
+ def execute(self, command):
+ """Execute process inside container.
+
+ :param command: Command to run inside container.
+ :type command: str
+ """
+ raise NotImplementedError
+
+ def stop(self):
+ """Stop container."""
+ raise NotImplementedError
+
+ def destroy(self):
+ """Destroy/remove container."""
+ raise NotImplementedError
+
+ def info(self):
+ """Info about container."""
+ raise NotImplementedError
+
+ def system_info(self):
+ """System info."""
+ raise NotImplementedError
+
+ def install_supervisor(self):
+ """Install supervisord inside a container."""
+ self.execute('sleep 3')
+ self.execute('apt-get update')
+ self.execute('apt-get install -y supervisor')
+ self.execute('echo "{0}" > {1}'
+ .format(
+ '[unix_http_server]\n'
+ 'file = /tmp/supervisor.sock\n\n'
+ '[rpcinterface:supervisor]\n'
+ 'supervisor.rpcinterface_factory = '
+ 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
+ '[supervisorctl]\n'
+ 'serverurl = unix:///tmp/supervisor.sock\n\n'
+ '[supervisord]\n'
+ 'pidfile = /tmp/supervisord.pid\n'
+ 'identifier = supervisor\n'
+ 'directory = /tmp\n'
+ 'logfile=/tmp/supervisord.log\n'
+ 'loglevel=debug\n'
+ 'nodaemon=false\n\n',
+ SUPERVISOR_CONF))
+ self.execute('supervisord -c {0}'.format(SUPERVISOR_CONF))
+
+ def install_vpp(self, install_dkms=False):
+ """Install VPP inside a container.
+
+ :param install_dkms: If install dkms package. This will impact install
+ time. Dkms is required for installation of vpp-dpdk-dkms. Default is
+ false.
+ :type install_dkms: bool
+ """
+ self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
+ self.execute('apt-get update')
+ if install_dkms:
+ self.execute('apt-get install -y dkms && '
+ 'dpkg -i --force-all {0}/install_dir/*.deb'
+ .format(self.container.guest_dir))
+ else:
+ self.execute('for i in $(ls -I \"*dkms*\" {0}/install_dir/); '
+ 'do dpkg -i --force-all {0}/install_dir/$i; done'
+ .format(self.container.guest_dir))
+ self.execute('apt-get -f install -y')
+ self.execute('echo "{0}" >> {1}'
+ .format(
+ '[program:vpp]\n'
+ 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
+ 'autorestart=false\n'
+ 'redirect_stderr=true\n'
+ 'priority=1',
+ SUPERVISOR_CONF))
+ self.execute('supervisorctl reload')
+
+ def restart_vpp(self):
+ """Restart VPP service inside a container."""
+ self.execute('supervisorctl restart vpp')
+
+ def create_vpp_startup_config(self,
+ config_filename='/etc/vpp/startup.conf'):
+ """Create base startup configuration of VPP on container.
+
+ :param config_filename: Startup configuration file name.
+ :type config_filename: str
+ """
+ cpuset_cpus = self.container.cpuset_cpus
+
+ # Create config instance
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(self.container.node)
+ vpp_config.set_config_filename(config_filename)
+ vpp_config.add_unix_cli_listen()
+ vpp_config.add_unix_nodaemon()
+ vpp_config.add_unix_exec('/tmp/running.exec')
+ # We will pop first core from list to be main core
+ vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ # if this is not only core in list, the rest will be used as workers.
+ if cpuset_cpus:
+ corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ vpp_config.add_cpu_corelist_workers(corelist_workers)
+ vpp_config.add_plugin_disable('dpdk_plugin.so')
+
+ self.execute('mkdir -p /etc/vpp/')
+ self.execute('echo "{c}" | tee {f}'
+ .format(c=vpp_config.get_config_str(),
+ f=vpp_config.get_config_filename()))
+
+ def create_vpp_exec_config(self, vat_template_file, **args):
+ """Create VPP exec configuration on container.
+
+ :param vat_template_file: File name of a VAT template script.
+ :param args: Parameters for VAT script.
+ :type vat_template_file: str
+ :type args: dict
+ """
+ vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
+ f=vat_template_file)
+
+ with open(vat_file_path, 'r') as template_file:
+ cmd_template = template_file.readlines()
+ for line_tmpl in cmd_template:
+ vat_cmd = line_tmpl.format(**args)
+ self.execute('echo "{c}" >> /tmp/running.exec'
+ .format(c=vat_cmd.replace('\n', '')))
+
+ def is_container_running(self):
+ """Check if container is running."""
+ raise NotImplementedError
+
+ def is_container_present(self):
+ """Check if container is present."""
+ raise NotImplementedError
+
+
+class LXC(ContainerEngine):
+ """LXC implementation."""
+
+ def __init__(self):
+ """Initialize LXC object."""
+ super(LXC, self).__init__()
+
+ def acquire(self, force=True):
+ """Acquire a privileged system object where configuration is stored and
+ where user information can be stored.
+
+ :param force: If a container exists, destroy it and create a new
+ container.
+ :type force: bool
+ :raises RuntimeError: If creating the container or writing the container
+ config fails.
+ """
+ if self.is_container_present():
+ if force:
+ self.destroy()
+ else:
+ return
+
+ image = self.container.image if self.container.image else\
+ "-d ubuntu -r xenial -a amd64"
+
+ cmd = 'lxc-create -t download --name {c.name} -- {image} '\
+ '--no-validate'.format(c=self.container, image=image)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to create container.')
+
+ if self.container.host_dir and self.container.guest_dir:
+ entry = 'lxc.mount.entry = '\
+ '{c.host_dir} /var/lib/lxc/{c.name}/rootfs{c.guest_dir} ' \
+ 'none bind,create=dir 0 0'.format(c=self.container)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'"
+ .format(e=entry, c=self.container))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to write {c.name} config.'
+ .format(c=self.container))
+
+ def create(self):
+ """Create/deploy an application inside a container on system.
+
+ :raises RuntimeError: If creating the container fails.
+ """
+ cpuset_cpus = '{0}'.format(
+ ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
+ if self.container.cpuset_cpus else ''
+
+ cmd = 'lxc-start --name {c.name} --daemon'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to start container {c.name}.'
+ .format(c=self.container))
+ self._lxc_wait('RUNNING')
+ self._lxc_cgroup(state_object='cpuset.cpus',
+ value=cpuset_cpus)
+
+ def execute(self, command):
+ """Start a process inside a running container. Runs the specified
+ command inside the container specified by name. The container has to
+ be running already.
+
+ :param command: Command to run inside container.
+ :type command: str
+ :raises RuntimeError: If running the command failed.
+ """
+ env = '--keep-env {0}'.format(
+ ' '.join('--set-var %s' % env for env in self.container.env))\
+ if self.container.env else ''
+
+ cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}'"\
+ .format(env=env, c=self.container, command=command)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to run command inside container '
+ '{c.name}.'.format(c=self.container))
+
+ def stop(self):
+ """Stop a container.
+
+ :raises RuntimeError: If stopping the container failed.
+ """
+ cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to stop container {c.name}.'
+ .format(c=self.container))
+ self._lxc_wait('STOPPED|FROZEN')
+
+ def destroy(self):
+ """Destroy a container.
+
+ :raises RuntimeError: If destroying container failed.
+ """
+ cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to destroy container {c.name}.'
+ .format(c=self.container))
+
+ def info(self):
+ """Query and shows information about a container.
+
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'lxc-info --name {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about container {c.name}.'
+ .format(c=self.container))
+
+ def system_info(self):
+ """Check the current kernel for LXC support.
+
+ :raises RuntimeError: If checking LXC support failed.
+ """
+ cmd = 'lxc-checkconfig'
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to check LXC support.')
+
+ def is_container_running(self):
+ """Check if container is running on node.
+
+ :returns: True if container is running.
+ :rtype: bool
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'lxc-info --no-humanize --state --name {c.name}'\
+ .format(c=self.container)
+
+ ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about container {c.name}.'
+ .format(c=self.container))
+ return True if 'RUNNING' in stdout else False
+
+ def is_container_present(self):
+ """Check if container is existing on node.
+
+ :returns: True if container is present.
+ :rtype: bool
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ return False if int(ret) else True
+
+ def _lxc_wait(self, state):
+ """Wait for a specific container state.
+
+ :param state: Specify the container state(s) to wait for.
+ :type state: str
+ :raises RuntimeError: If waiting for state of a container failed.
+ """
+ cmd = 'lxc-wait --name {c.name} --state "{s}"'\
+ .format(c=self.container, s=state)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to wait for state "{s}" of container '
+ '{c.name}.'.format(s=state, c=self.container))
+
+ def _lxc_cgroup(self, state_object, value=''):
+ """Manage the control group associated with a container.
+
+ :param state_object: Specify the state object name.
+ :param value: Specify the value to assign to the state object. If empty,
+ then action is GET, otherwise is action SET.
+ :type state_object: str
+ :type value: str
+ :raises RuntimeError: If getting/setting state of a container failed.
+ """
+ cmd = 'lxc-cgroup --name {c.name} {s} {v}'\
+ .format(c=self.container, s=state_object, v=value)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgset --copy-from / lxc')
+ if int(ret) != 0:
+ raise RuntimeError('Failed to copy cgroup settings from root.')
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ if value:
+ raise RuntimeError('Failed to set {s} of container {c.name}.'
+ .format(s=state_object, c=self.container))
+ else:
+ raise RuntimeError('Failed to get {s} of container {c.name}.'
+ .format(s=state_object, c=self.container))
+
+
+class Docker(ContainerEngine):
+ """Docker implementation."""
+
+ def __init__(self):
+ """Initialize Docker object."""
+ super(Docker, self).__init__()
+
+ def acquire(self, force=True):
+ """Pull an image or a repository from a registry.
+
+ :param force: Destroy a container if exists.
+ :type force: bool
+ :raises RuntimeError: If pulling a container failed.
+ """
+ if self.is_container_present():
+ if force:
+ self.destroy()
+ else:
+ return
+
+ cmd = 'docker pull {c.image}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to create container {c.name}.'
+ .format(c=self.container))
+
+ def create(self):
+ """Create/deploy container.
+
+ :raises RuntimeError: If creating a container failed.
+ """
+ cpuset_cpus = '--cpuset-cpus={0}'.format(
+ ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
+ if self.container.cpuset_cpus else ''
+
+ cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
+ if self.container.cpuset_mems is not None else ''
+
+ env = '{0}'.format(
+ ' '.join('--env %s' % env for env in self.container.env))\
+ if self.container.env else ''
+
+ command = '{0}'.format(self.container.command)\
+ if self.container.command else ''
+
+ publish = '{0}'.format(
+ ' '.join('--publish %s' % var for var in self.container.publish))\
+ if self.container.publish else ''
+
+ volume = '--volume {c.host_dir}:{c.guest_dir}'.format(c=self.container)\
+ if self.container.host_dir and self.container.guest_dir else ''
+
+ cmd = 'docker run '\
+ '--privileged --detach --interactive --tty --rm '\
+ '--cgroup-parent lxc {cpuset_cpus} {cpuset_mems} {publish} '\
+ '{env} {volume} --name {container.name} {container.image} '\
+ '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
+ container=self.container, command=command,
+ env=env, publish=publish, volume=volume)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to create container {c.name}'
+ .format(c=self.container))
+
+ self.info()
+
+ def execute(self, command):
+ """Start a process inside a running container. Runs the specified
+ command inside the container specified by name. The container has to
+ be running already.
+
+ :param command: Command to run inside container.
+ :type command: str
+ :raises RuntimeError: If runnig the command in a container failed.
+ """
+ cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}'"\
+ .format(c=self.container, command=command)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to execute command in container '
+ '{c.name}.'.format(c=self.container))
+
+ def stop(self):
+ """Stop running container.
+
+ :raises RuntimeError: If stopping a container failed.
+ """
+ cmd = 'docker stop {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to stop container {c.name}.'
+ .format(c=self.container))
+
+ def destroy(self):
+ """Remove a container.
+
+ :raises RuntimeError: If removing a container failed.
+ """
+ cmd = 'docker rm --force {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to destroy container {c.name}.'
+ .format(c=self.container))
+
+ def info(self):
+ """Return low-level information on Docker objects.
+
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'docker inspect {c.name}'.format(c=self.container)
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about container {c.name}.'
+ .format(c=self.container))
+
+ def system_info(self):
+ """Display the docker system-wide information.
+
+ :raises RuntimeError: If displaying system information failed.
+ """
+ cmd = 'docker system info'
+
+ ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get system info.')
+
+ def is_container_present(self):
+ """Check if container is present on node.
+
+ :returns: True if container is present.
+ :rtype: bool
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'docker ps --all --quiet --filter name={c.name}'\
+ .format(c=self.container)
+
+ ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about container {c.name}.'
+ .format(c=self.container))
+ return True if stdout else False
+
+ def is_container_running(self):
+ """Check if container is running on node.
+
+ :returns: True if container is running.
+ :rtype: bool
+ :raises RuntimeError: If getting info about a container failed.
+ """
+ cmd = 'docker ps --quiet --filter name={c.name}'\
+ .format(c=self.container)
+
+ ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
+ if int(ret) != 0:
+ raise RuntimeError('Failed to get info about container {c.name}.'
+ .format(c=self.container))
+ return True if stdout else False
+
+
+class Container(object):
+ """Container class."""
+
+ def __init__(self):
+ """Initialize Container object."""
+ pass
+
+ def __getattr__(self, attr):
+ try:
+ return self.__dict__[attr]
+ except KeyError:
+ return None
+
+ def __setattr__(self, attr, value):
+ try:
+ # Check if attribute exists
+ self.__dict__[attr]
+ except KeyError:
+ # Creating new attribute
+ if attr == 'node':
+ self.__dict__['ssh'] = SSH()
+ self.__dict__['ssh'].connect(value)
+ self.__dict__[attr] = value
+ else:
+ # Updating attribute base of type
+ if isinstance(self.__dict__[attr], list):
+ self.__dict__[attr].append(value)
+ else:
+ self.__dict__[attr] = value
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index dac8bae6bb..7bd0175b3d 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -61,6 +61,23 @@ class VppConfigGenerator(object):
"""
self._vpp_config_filename = filename
+ def get_config_filename(self):
+ """Get startup configuration filename.
+
+ :returns: Startup configuration filename.
+ :rtype: str
+ """
+ return self._vpp_config_filename
+
+ def get_config_str(self):
+ """Get dumped startup configuration in VPP config format.
+
+ :returns: Startup configuration in VPP config format.
+ :rtype: str
+ """
+ self.dump_config(self._nodeconfig)
+ return self._vpp_config
+
def add_config_item(self, config, value, path):
"""Add startup configuration item.
@@ -349,61 +366,3 @@ class VppConfigGenerator(object):
else:
raise RuntimeError('VPP failed to restart on node {}'.
format(self._hostname))
-
- def apply_config_lxc(self, lxc_name, waittime=5, retries=12):
- """Generate and apply VPP configuration for node in a container.
-
- Use data from calls to this class to form a startup.conf file and
- replace /etc/vpp/startup.conf with it on node inside a container.
-
- :param lxc_name: LXC container name.
- :param waittime: Time to wait for VPP to restart (default 5 seconds).
- :param retries: Number of times (default 12) to re-try waiting.
- :type lxc_name: str
- :type waittime: int
- :type retries: int
- :raises RuntimeError: If writing config file failed, or restarting of
- VPP failed.
- """
- self.dump_config(self._nodeconfig)
-
- ssh = SSH()
- ssh.connect(self._node)
-
- # We're using this "| sudo tee" construct because redirecting
- # a sudo's output ("sudo echo xxx > /path/to/file") does not
- # work on most platforms...
- (ret, _, _) = \
- ssh.exec_command_lxc('echo "{0}" | sudo tee {1}'.
- format(self._vpp_config,
- self._vpp_config_filename), lxc_name)
-
- if ret != 0:
- raise RuntimeError('Writing config file failed in {0} to node {1}'.
- format(lxc_name, self._hostname))
-
- # Instead of restarting, we'll do separate start and stop
- # actions. This way we don't care whether VPP was running
- # to begin with.
- ssh.exec_command_lxc('service {0} stop'.
- format(self._vpp_service_name), lxc_name)
- (ret, _, _) = \
- ssh.exec_command_lxc('service {0} start'.
- format(self._vpp_service_name), lxc_name)
- if ret != 0:
- raise RuntimeError('Restarting VPP failed in {0} on node {1}'.
- format(lxc_name, self._hostname))
-
- # Sleep <waittime> seconds, up to <retry> times,
- # and verify if VPP is running.
- for _ in range(retries):
- time.sleep(waittime)
- (ret, _, _) = \
- ssh.exec_command_lxc('echo show hardware-interfaces | '
- 'nc 0 5002 || echo "VPP not yet running"',
- lxc_name)
- if ret == 0:
- break
- else:
- raise RuntimeError('VPP failed to restart in {0} on node {1}'.
- format(lxc_name, self._hostname))
diff --git a/resources/libraries/robot/performance/performance_setup.robot b/resources/libraries/robot/performance/performance_setup.robot
index 9f6d5aeac8..73643dac68 100644
--- a/resources/libraries/robot/performance/performance_setup.robot
+++ b/resources/libraries/robot/performance/performance_setup.robot
@@ -372,6 +372,17 @@
| | Configure crypto device on all DUTs | force_init=${True}
| | Configure kernel module on all DUTs | igb_uio | force_load=${True}
+| Set up performance topology with containers
+| | [Documentation]
+| | ... | Suite preparation phase that starts containers
+| | ...
+| | Set Suite Variable | @{container_groups} | @{EMPTY}
+| | Construct VNF containers on all DUTs
+| | Acquire all 'VNF' containers
+| | Create all 'VNF' containers
+| | Configure VPP in all 'VNF' containers
+| | Install VPP in all 'VNF' containers
+
# Suite teardowns
| Tear down 3-node performance topology
@@ -380,12 +391,14 @@
| | ...
| | Teardown traffic generator | ${tg}
-| Tear down 3-node performance topology with LXC
+| Tear down 3-node performance topology with container
| | [Documentation]
-| | ... | Suite teardown phase with traffic generator teardown and LXC destroy.
+| | ... | Suite teardown phase with traffic generator teardown and container
+| | ... | destroy.
| | ...
| | Teardown traffic generator | ${tg}
-| | Destroy '${lxc_count}' LXC containers on all DUT nodes
+| | :FOR | ${group} | IN | @{container_groups}
+| | | Destroy all '${group}' containers
| Tear down 2-node performance topology
| | [Documentation]
@@ -393,12 +406,14 @@
| | ...
| | Teardown traffic generator | ${tg}
-| Tear down 2-node performance topology with LXC
+| Tear down 2-node performance topology with container
| | [Documentation]
-| | ... | Suite teardown phase with traffic generator teardown and LXC destroy.
+| | ... | Suite teardown phase with traffic generator teardown and container
+| | ... | destroy.
| | ...
| | Teardown traffic generator | ${tg}
-| | Destroy '${lxc_count}' LXC containers on all DUT nodes
+| | :FOR | ${group} | IN | @{container_groups}
+| | | Destroy all '${group}' containers
# Tests setups
diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot
index e045075686..884bc4831e 100644
--- a/resources/libraries/robot/performance/performance_utils.robot
+++ b/resources/libraries/robot/performance/performance_utils.robot
@@ -23,7 +23,7 @@
| Resource | resources/libraries/robot/shared/default.robot
| Resource | resources/libraries/robot/shared/interfaces.robot
| Resource | resources/libraries/robot/shared/counters.robot
-| Resource | resources/libraries/robot/shared/lxc.robot
+| Resource | resources/libraries/robot/shared/container.robot
| Resource | resources/libraries/robot/shared/memif.robot
| Resource | resources/libraries/robot/l2/l2_bridge_domain.robot
| Resource | resources/libraries/robot/l2/l2_xconnect.robot
diff --git a/resources/libraries/robot/shared/container.robot b/resources/libraries/robot/shared/container.robot
new file mode 100644
index 0000000000..c8c940ba45
--- /dev/null
+++ b/resources/libraries/robot/shared/container.robot
@@ -0,0 +1,121 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Documentation | Keywords related to linux containers
+| Library | resources.libraries.python.CpuUtils
+| Library | resources.libraries.python.topology.Topology
+
+*** Keywords ***
+| Construct VNF containers on all DUTs
+| | [Documentation] | Construct 1..N VNF container(s) of specific technology on
+| | ... | all DUT nodes.
+| | ...
+| | ${group}= | Set Variable | VNF
+| | ${guest_dir}= | Set Variable | /mnt/host
+| | ${host_dir}= | Set Variable | /tmp
+| | ${skip_cpus}= | Evaluate | ${vpp_cpus}+${system_cpus}
+| | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
+| | ... | engine=${container_engine} | WITH NAME | ${group}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | ${env}= | Create List | LC_ALL="en_US.UTF-8"
+| | | ... | DEBIAN_FRONTEND=noninteractive | ETCDV3_ENDPOINTS=172.17.0.1:2379
+| | | ${cpu_node}= | Get interfaces numa node | ${nodes['${dut}']}
+| | | ... | ${dut1_if1} | ${dut1_if2}
+| | | Run Keyword | ${group}.Construct containers
+| | | ... | name=${dut}_${group} | node=${nodes['${dut}']}
+| | | ... | host_dir=${host_dir} | guest_dir=${guest_dir}
+| | | ... | image=${container_image} | cpu_count=${container_cpus}
+| | | ... | cpu_skip=${skip_cpus} | smt_used=${False} | cpuset_mems=${cpu_node}
+| | | ... | cpu_shared=${False} | env=${env} | count=${container_count}
+| | Append To List | ${container_groups} | ${group}
+
+| Construct ETCD containers on all DUTs
+| | [Documentation] | Construct Docker ETCD container on all DUTs.
+| | ...
+| | ${group}= | Set Variable | ETCD
+| | ${command}= | Set Variable
+| | ... | /usr/local/bin/etcd -advertise-client-urls http://0.0.0.0:2379 -listen-client-urls http://0.0.0.0:2379
+| | ${host_dir}= | Set Variable | /tmp
+| | ${image}= | Set Variable | quay.io/coreos/etcd:v3.2.5
+| | ${publish}= | Create List | 2379:2379
+| | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
+| | ... | engine=Docker | WITH NAME | ${group}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | ${cpu_node}= | Get interfaces numa node | ${nodes['${dut}']}
+| | | ... | ${dut1_if1} | ${dut1_if2}
+| | | Run Keyword | ${group}.Construct container
+| | | ... | name=${dut}_${group} | node=${nodes['${dut}']}
+| | | ... | image=${container_image} | cpu_count=${1} | cpu_skip=${0}
+| | | ... | smt_used=${False} | cpuset_mems=${cpu_node} | cpu_shared=${True}
+| | | ... | publish=${publish} | command=${command}
+| | Append To List | ${container_groups} | ${group}
+
+| Construct Kafka containers on all DUTs
+| | [Documentation] | Construct Docker Kafka container on all DUTs.
+| | ...
+| | ${group}= | Set Variable | Kafka
+| | ${image}= | Set Variable | spotify/kafka
+| | ${publish}= | Create List | 2181:2181 | 9092:9092
+| | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
+| | ... | engine=Docker | WITH NAME | ${group}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | ${env}= | Create List | ADVERTISED_HOST=172.17.0.1 | ADVERTISED_PORT=9092
+| | | ${cpu_node}= | Get interfaces numa node | ${nodes['${dut}']}
+| | | ... | ${dut1_if1} | ${dut1_if2}
+| | | Run Keyword | ${group}.Construct container
+| | | ... | name=${dut}_${group} | node=${nodes['${dut}']} | image=${image}
+| | | ... | cpu_count=${1} | cpu_skip=${0} | cpuset_mems=${cpu_node}
+| | | ... | smt_used=${False} | cpu_shared=${True} | publish=${publish}
+| | | ... | env=${env}
+| | Append To List | ${container_groups} | ${group}
+
+| Acquire all '${group}' containers
+| | [Documentation] | Acquire all container(s) in specific container group on
+| | ... | all DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Acquire all containers
+
+| Create all '${group}' containers
+| | [Documentation] | Create/deploy all container(s) in specific container group
+| | ... | on all DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Create all containers
+
+| Install VPP in all '${group}' containers
+| | [Documentation] | Install VPP on all container(s) in specific container
+| | ... | group on all DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Install VPP In All Containers
+
+| Configure VPP in all '${group}' containers
+| | [Documentation] | Configure VPP on all container(s) in specific container
+| | ... | group on all DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Configure VPP In All Containers
+| | ... | memif_create_lxc.vat
+
+| Stop all '${group}' containers
+| | [Documentation] | Stop all container(s) in specific container group on all
+| | ... | DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Stop all containers
+
+| Destroy all '${group}' containers
+| | [Documentation] | Destroy all container(s) in specific container group on
+| | ... | all DUT nodes.
+| | ...
+| | Run Keyword | ${group}.Destroy all containers
diff --git a/resources/libraries/robot/shared/lxc.robot b/resources/libraries/robot/shared/lxc.robot
deleted file mode 100644
index 52d81dd3e4..0000000000
--- a/resources/libraries/robot/shared/lxc.robot
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-*** Settings ***
-| Documentation | Keywords related to linux container (LXC)
-| Library | resources.libraries.python.LXCUtils
-| Library | resources.libraries.python.CpuUtils
-| Library | resources.libraries.python.topology.Topology
-
-*** Keywords ***
-| Create LXC container on DUT node
-| | [Documentation] | Setup lxc container on DUT node.
-| | ...
-| | ... | *Arguments:*
-| | ...
-| | ... | - dut_node - DUT node. Type: dictionary
-| | ... | - lxc_name - Name of LXC container. Type: dictionary
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create LXC container on DUT node \| ${nodes['DUT1']} \
-| | ... | \| DUT1_slave_1 \|
-| | ...
-| | [Arguments] | ${dut_node} | ${lxc_name}
-| | ...
-| | Import Library | resources.libraries.python.LXCUtils
-| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
-| | Run keyword | ${lxc_name}.Set node | ${dut_node}
-| | Run keyword | ${lxc_name}.Create container | force_create=${TRUE}
-| | Run keyword | ${lxc_name}.Mount host dir in container
-
-| Create LXC container on DUT node with cpuset
-| | [Documentation] | Create LXC container on DUT node with cpuset.
-| | ...
-| | ... | *Arguments:*
-| | ...
-| | ... | - dut_node - DUT node. Type: dictionary
-| | ... | - lxc_name - Name of LXC container. Type: dictionary
-| | ... | - skip - number of cpus which will be skipped. Type: integer
-| | ... | - count - number of cpus which will be allocated for lxc. Type:
-| | ... | integer
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create LXC container on DUT node with cpuset \
-| | ... | \| ${nodes['DUT1']} \| DUT1_slave_1 \| 6 \| 1 \|
-| | ...
-| | [Arguments] | ${dut_node} | ${lxc_name} | ${skip}=${6} | ${count}=${1}
-| | ...
-| | Import Library | resources.libraries.python.LXCUtils
-| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${lxc_cpus}= | CPU list per node str | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${False}
-| | Set Suite Variable | ${lxc_cpus}
-| | Run keyword | ${lxc_name}.Set node | ${dut_node}
-| | Run keyword | ${lxc_name}.Create container | force_create=${TRUE}
-| | Run keyword | ${lxc_name}.Mount host dir in container
-| | Run keyword | ${lxc_name}.Container cpuset cpus | 0,${lxc_cpus}
-
-| Create '${nr}' LXC containers on '${dut}' node
-| | [Documentation] | Create and start multiple lxc containers on DUT node.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create 5 LXC containers on DUT1 node \|
-| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | Create LXC container on DUT node | ${nodes['${dut}']}
-| | | ... | ${dut}_${lxc_base_name}_${number}
-
-| Create '${nr}' LXC containers on all DUT nodes
-| | [Documentation] | Create and start multiple LXC containers on all DUT nodes.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create 5 LXC containers on all DUT nodes \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Create '${nr}' LXC containers on '${dut}' node
-
-| Create '${nr}' LXC containers on '${dut}' node with '${count}' cpus
-| | [Documentation] | Create and start multiple LXC containers on DUT node.
-| | ... | Set the cpuset.cpus cgroup profile for pin of cpus.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create 5 LXC containers on DUT1 node with 2 cpus \|
-| | ...
-| | ${skip_cpus}= | Evaluate | ${vpp_cpus}+${system_cpus}
-| | ${count_int}= | Convert To Integer | ${count}
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | ${skip}= | Evaluate | ${skip_cpus} + (${nr} - 1) * ${count}
-| | | Create LXC container on DUT node with cpuset | ${nodes['${dut}']}
-| | | ... | ${dut}_${lxc_base_name}_${number} | ${skip} | ${count_int}
-
-| Create '${nr}' LXC containers on all DUT nodes with '${count}' cpus
-| | [Documentation] | Create and start multiple LXC containers on all DUT nodes.
-| | ... | Set the cpuset.cpus cgroup profile for pin of cpus.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create 5 LXC containers on all DUT nodes with 2 cpus \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Create '${nr}' LXC containers on '${dut}' node with '${count}' cpus
-
-| Destroy LXC container on DUT node
-| | [Documentation] | Stop and destroy LXC container on DUT node.
-| | ...
-| | ... | *Arguments:*
-| | ...
-| | ... | - dut_node - DUT node. Type: dictionary
-| | ... | - lxc_name - Name of LXC container. Type: dictionary
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Destroy LXC container on DUT node \| ${nodes['DUT1']} \
-| | ... | \| DUT1_slave_1 \|
-| | ...
-| | [Arguments] | ${dut_node} | ${lxc_name}
-| | ...
-| | Import Library | resources.libraries.python.LXCUtils
-| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
-| | Run keyword | ${lxc_name}.Set node | ${dut_node}
-| | Run keyword | ${lxc_name}.Destroy container
-
-| Destroy '${nr}' LXC containers on '${dut}' node
-| | [Documentation] | Stop and destroy multiple LXC containers on DUT node.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Destroy 5 LXC containers on DUT1 node \|
-| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | Destroy LXC container on DUT node | ${nodes['${dut}']}
-| | | ... | ${dut}_${lxc_base_name}_${number}
-
-| Destroy '${nr}' LXC containers on all DUT nodes
-| | [Documentation] | Stop and destroy multiple LXC containers on all DUT nodes.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Destroy 5 LXC containers on all DUT nodes \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Destroy '${nr}' LXC containers on '${dut}' node
-
-| Install VPP on LXC container on DUT node
-| | [Documentation] | Install vpp on LXC container on DUT node.
-| | ...
-| | ... | *Arguments:*
-| | ...
-| | ... | - dut_node - DUT node. Type: dictionary
-| | ... | - lxc_name - Name of LXC container. Type: dictionary
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Install VPP on LXC container on DUT node \| ${nodes['DUT1']} \
-| | ... | \| DUT1_slave_1 \|
-| | ...
-| | [Arguments] | ${dut_node} | ${lxc_name}
-| | ...
-| | Import Library | resources.libraries.python.LXCUtils
-| | ... | container_name=${lxc_name} | WITH NAME | ${lxc_name}
-| | Run keyword | ${lxc_name}.Set node | ${dut_node}
-| | Run keyword | ${lxc_name}.Install VPP in container
-
-| Install VPP on '${nr}' LXC containers on '${dut}' node
-| | [Documentation] | Install VPP on multiple LXC containers on DUT node.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Install VPP on 5 LXC containers on DUT1 node \|
-| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | Install VPP on LXC container on DUT node | ${nodes['${dut}']}
-| | | ... | ${dut}_${lxc_base_name}_${number}
-
-| Install VPP on '${nr}' LXC containers on all DUT nodes
-| | [Documentation] | Install VPP on multiple LXC containers on all DUT nodes.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Install VPP on 5 LXC containers on all DUT nodes \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Install VPP on '${nr}' LXC containers on '${dut}' node
-
-| Create startup configuration of VPP on LXC container on DUT node
-| | [Documentation] | Create base startup configuration of VPP on LXC container
-| | ... | on DUT node.
-| | ...
-| | ... | *Arguments:*
-| | ...
-| | ... | - dut_node - DUT node. Type: dictionary
-| | ... | - lxc_name - Name of LXC container. Type: dictionary
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create startup configuration of VPP on LXC container on DUT node \
-| | ... | \| ${nodes['DUT1']} \| DUT1_slave_1 \|
-| | ...
-| | [Arguments] | ${dut_node} | ${lxc_name}
-| | ...
-| | Import Library | resources.libraries.python.VppConfigGenerator
-| | ... | WITH NAME | ${lxc_name}_conf
-| | Run keyword | ${lxc_name}_conf.Set node | ${dut_node}
-| | Run keyword | ${lxc_name}_conf.Add unix CLI listen
-| | Run keyword | ${lxc_name}_conf.Add unix nodaemon
-| | Run keyword | ${lxc_name}_conf.Add unix exec | "/tmp/running.exec"
-| | Run keyword | ${lxc_name}_conf.Add CPU main core | "0"
-| | Run keyword | ${lxc_name}_conf.Add CPU corelist workers | ${lxc_cpus}
-| | Run Keyword | ${lxc_name}_conf.Apply config LXC | ${lxc_name}
-
-| Create startup configuration of VPP on '${nr}' LXC containers on '${dut}' node
-| | [Documentation] | Create base startup configuration of VPP on multiple LXC
-| | ... | container on DUT node.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create startup configuration of VPP on 1 LXC containers on DUT1 \
-| | ... | node \|
-| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | Create startup configuration of VPP on LXC container on DUT node
-| | | ... | ${nodes['${dut}']} | ${dut}_${lxc_base_name}_${number}
-
-| Create startup configuration of VPP on '${nr}' LXC containers on all DUT nodes
-| | [Documentation] | Create base startup configuration of VPP on multiple LXC
-| | ... | container on all DUT nodes.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create startup configuration of VPP on 1 LXC containers on all \
-| | ... | DUT nodes \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Create startup configuration of VPP on '${nr}' LXC containers on '${dut}' node
diff --git a/resources/libraries/robot/shared/memif.robot b/resources/libraries/robot/shared/memif.robot
index 71909af490..a4857feccf 100644
--- a/resources/libraries/robot/shared/memif.robot
+++ b/resources/libraries/robot/shared/memif.robot
@@ -51,29 +51,3 @@
| | Set Interface State | ${dut_node} | ${memif_2} | up
| | Set Test Variable | ${${memif_if1}} | ${memif_1}
| | Set Test Variable | ${${memif_if2}} | ${memif_2}
-
-| Create memif VPP configuration on '${nr}' LXC containers on '${dut}' node
-| | [Documentation] | Create memif configuration of VPP on multiple LXC
-| | ... | container on DUT node.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create memif VPP configuration on 1 LXC containers on DUT1 node \|
-| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | Run Keyword | ${dut}_${lxc_base_name}_${number}.Create VPP cfg in container
-| | | ... | memif_create_lxc.vat | socket1=memif-${dut}_VNF${number}-1
-| | | ... | socket2=memif-${dut}_VNF${number}-2
-
-| Create memif VPP configuration on '${nr}' LXC containers on all DUT nodes
-| | [Documentation] | Create memif configuration of VPP on multiple LXC
-| | ... | container on all DUT nodes.
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Create memif VPP configuration on 1 LXC containers on all \
-| | ... | DUT nodes \|
-| | ...
-| | ${duts}= | Get Matches | ${nodes} | DUT*
-| | :FOR | ${dut} | IN | @{duts}
-| | | Create memif VPP configuration on '${nr}' LXC containers on '${dut}' node
diff --git a/resources/templates/vat/memif_create_lxc.vat b/resources/templates/vat/memif_create_lxc.vat
index 5bc77d8ff3..92e456c2a4 100644
--- a/resources/templates/vat/memif_create_lxc.vat
+++ b/resources/templates/vat/memif_create_lxc.vat
@@ -1,8 +1,8 @@
-create memif id 1 socket /mnt/host/{socket1} slave
-set int state memif0/1 up
-
-create memif id 2 socket /mnt/host/{socket2} slave
-set int state memif1/2 up
-
-set interface l2 xconnect memif1/2 memif0/1
-set interface l2 xconnect memif0/1 memif1/2
+create memif id {memif_id1} socket /mnt/host/{socket1} slave
+set int state memif0/{memif_id1} up
+
+create memif id {memif_id2} socket /mnt/host/{socket2} slave
+set int state memif1/{memif_id2} up
+
+set interface l2 xconnect memif1/{memif_id2} memif0/{memif_id1}
+set interface l2 xconnect memif0/{memif_id1} memif1/{memif_id2}