aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2018-08-07 08:17:12 +0000
committerPeter Mikus <pmikus@cisco.com>2018-08-20 13:59:33 +0000
commit92d4e47bfbca31e10c44dee7f74da4c6fd9e6e4c (patch)
tree0587db0aba8b73db4cbb944e02b7369fe784f0c1 /resources
parent5353acda23c0884c74ef9ba3e2ec81dd13190e95 (diff)
Refactor VHOST code
CSIT-1164 Create VM vhost tests for 2-node topology CSIT-1173 Implement VM vhost KWs for 2-node topology - Cleanup QemuUtils.py Library (PEP8, some TODOs, readability, multi queue, ring size, qemu version detection) - Cleanup VHOST KWs perf (Unify running QEMU on N-node topology) - Add ability to configure Queues, Hyperthreading, automatically compute number of CPU for VM/Testpmd and RXQ for Testpmd workload. - Partial cleanup of configuration KWs (l2xc, l2bd) for 2-node - Create IPv4 2-node KW Change-Id: I8aae1355bafa651de715cd438fe706c443ea1d88 Signed-off-by: Peter Mikus <pmikus@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r--resources/libraries/bash/config/config4
-rw-r--r--resources/libraries/bash/config/defaults4
-rw-r--r--resources/libraries/python/DUTSetup.py166
-rw-r--r--resources/libraries/python/DpdkUtil.py6
-rw-r--r--resources/libraries/python/QemuUtils.py530
-rw-r--r--resources/libraries/python/constants.py4
-rw-r--r--resources/libraries/robot/performance/performance_configuration.robot945
-rw-r--r--resources/libraries/robot/shared/default.robot2
-rw-r--r--resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml8
9 files changed, 800 insertions, 869 deletions
diff --git a/resources/libraries/bash/config/config b/resources/libraries/bash/config/config
index 42f23bdaf8..8cd1fe4793 100644
--- a/resources/libraries/bash/config/config
+++ b/resources/libraries/bash/config/config
@@ -1,5 +1,5 @@
-QEMU_INSTALL_DIR=/opt/qemu-2.5.0
-QEMU_INSTALL_VERSION=qemu-2.5.0
+QEMU_INSTALL_DIR=/opt/qemu-2.11.2
+QEMU_INSTALL_VERSION=qemu-2.11.2
QEMU_PATCH=false
QEMU_FORCE_INSTALL=false
QEMU_TARGET_LIST=x86_64-softmmu
diff --git a/resources/libraries/bash/config/defaults b/resources/libraries/bash/config/defaults
index a70add12b1..547fece17d 100644
--- a/resources/libraries/bash/config/defaults
+++ b/resources/libraries/bash/config/defaults
@@ -2,8 +2,8 @@
typeset -A cfg
cfg=( # set default values in config array
- [QEMU_INSTALL_DIR]="/opt/qemu-2.5.0"
- [QEMU_INSTALL_VERSION]="qemu-2.5.0"
+ [QEMU_INSTALL_DIR]="/opt/qemu-2.11.2"
+ [QEMU_INSTALL_VERSION]="qemu-2.11.2"
[QEMU_PATCH]=false
[QEMU_FORCE_INSTALL]=false
[QEMU_TARGET_LIST]=x86_64-softmmu
diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py
index 7c8ca1b828..632e9ea073 100644
--- a/resources/libraries/python/DUTSetup.py
+++ b/resources/libraries/python/DUTSetup.py
@@ -643,3 +643,169 @@ class DUTSetup(object):
DUTSetup.vpp_show_version_verbose(node)
DUTSetup.vpp_show_interfaces(node)
+
+ @staticmethod
+ def get_huge_page_size(node):
+ """Get default size of huge pages in system.
+
+ :param node: Node in the topology.
+ :type node: dict
+ :returns: Default size of free huge pages in system.
+ :rtype: int
+ :raises RuntimeError: If reading failed for three times.
+ """
+ ssh = SSH()
+ ssh.connect(node)
+
+ for _ in range(3):
+ ret_code, stdout, _ = ssh.exec_command_sudo(
+ "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
+ if ret_code == 0:
+ try:
+ huge_size = int(stdout)
+ except ValueError:
+ logger.trace('Reading huge page size information failed')
+ else:
+ break
+ else:
+ raise RuntimeError('Getting huge page size information failed.')
+ return huge_size
+
+ @staticmethod
+ def get_huge_page_free(node, huge_size):
+ """Get number of free huge pages in system.
+
+ :param node: Node in the topology.
+ :param huge_size: Size of hugepages.
+ :type node: dict
+ :type huge_size: int
+ :returns: Number of free huge pages in system.
+ :rtype: int
+ :raises RuntimeError: If reading failed for three times.
+ """
+ # TODO: add numa aware option
+ ssh = SSH()
+ ssh.connect(node)
+
+ for _ in range(3):
+ ret_code, stdout, _ = ssh.exec_command_sudo(
+ 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
+ format(huge_size))
+ if ret_code == 0:
+ try:
+ huge_free = int(stdout)
+ except ValueError:
+ logger.trace('Reading free huge pages information failed')
+ else:
+ break
+ else:
+ raise RuntimeError('Getting free huge pages information failed.')
+ return huge_free
+
+ @staticmethod
+ def get_huge_page_total(node, huge_size):
+ """Get total number of huge pages in system.
+
+ :param node: Node in the topology.
+ :param huge_size: Size of hugepages.
+ :type node: dict
+ :type huge_size: int
+
+ :returns: Total number of huge pages in system.
+ :rtype: int
+ :raises RuntimeError: If reading failed for three times.
+ """
+ # TODO: add numa aware option
+ ssh = SSH()
+ ssh.connect(node)
+
+ for _ in range(3):
+ ret_code, stdout, _ = ssh.exec_command_sudo(
+ 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
+ format(huge_size))
+ if ret_code == 0:
+ try:
+ huge_total = int(stdout)
+ except ValueError:
+ logger.trace('Reading total huge pages information failed')
+ else:
+ break
+ else:
+ raise RuntimeError('Getting total huge pages information failed.')
+ return huge_total
+
+ @staticmethod
+ def check_huge_page(node, huge_mnt, mem_size, allocate=False):
+ """Check if there is enough HugePages in system. If allocate is set to
+ true, try to allocate more HugePages.
+
+ :param node: Node in the topology.
+ :param huge_mnt: HugePage mount point.
+ :param mem_size: Requested memory in MB.
+ :param allocate: Whether to allocate more memory if not enough.
+ :type node: dict
+ :type huge_mnt: str
+ :type mem_size: str
+ :type allocate: bool
+
+ :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
+ or increasing map count failed.
+ """
+ # TODO: split function into smaller parts.
+ ssh = SSH()
+ ssh.connect(node)
+
+ # Get huge pages information
+ huge_size = DUTSetup.get_huge_page_size(node)
+ huge_free = DUTSetup.get_huge_page_free(node, huge_size)
+ huge_total = DUTSetup.get_huge_page_total(node, huge_size)
+
+ # Check if memory reqested is available on host
+ if (mem_size * 1024) > (huge_free * huge_size):
+ # If we want to allocate hugepage dynamically
+ if allocate:
+ mem_needed = (mem_size * 1024) - (huge_free * huge_size)
+ huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
+ max_map_count = huge_to_allocate*4
+ # Increase maximum number of memory map areas a process may have
+ ret_code, _, _ = ssh.exec_command_sudo(
+ 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
+ format(max_map_count))
+ if int(ret_code) != 0:
+ raise RuntimeError('Increase map count failed on {host}'.
+ format(host=node['host']))
+ # Increase hugepage count
+ ret_code, _, _ = ssh.exec_command_sudo(
+ 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
+ format(huge_to_allocate))
+ if int(ret_code) != 0:
+ raise RuntimeError('Mount huge pages failed on {host}'.
+ format(host=node['host']))
+ # If we do not want to allocate dynamicaly end with error
+ else:
+ raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
+ format(huge_free, huge_free * huge_size))
+ # Check if huge pages mount point exist
+ has_huge_mnt = False
+ ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
+ if int(ret_code) == 0:
+ for line in stdout.splitlines():
+ # Try to find something like:
+ # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
+ mount = line.split()
+ if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
+ has_huge_mnt = True
+ break
+ # If huge page mount point not exist create one
+ if not has_huge_mnt:
+ ret_code, _, _ = ssh.exec_command_sudo(
+ 'mkdir -p {mnt}'.format(mnt=huge_mnt))
+ if int(ret_code) != 0:
+ raise RuntimeError('Create mount dir failed on {host}'.
+ format(host=node['host']))
+ ret_code, _, _ = ssh.exec_command_sudo(
+ 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
+ format(mnt=huge_mnt))
+ if int(ret_code) != 0:
+ raise RuntimeError('Mount huge pages failed on {host}'.
+ format(host=node['host']))
diff --git a/resources/libraries/python/DpdkUtil.py b/resources/libraries/python/DpdkUtil.py
index 541ec9b151..60ee86763c 100644
--- a/resources/libraries/python/DpdkUtil.py
+++ b/resources/libraries/python/DpdkUtil.py
@@ -29,8 +29,8 @@ class DpdkUtil(object):
:rtype: str
"""
# Set the hexadecimal bitmask of the cores to run on.
- eal_coremask = '-c {} '.format(args['eal_coremask'])\
- if args.get('eal_coremask', '') else ''
+ eal_corelist = '-l {} '.format(args['eal_corelist'])\
+ if args.get('eal_corelist', '') else ''
# Set master core.
eal_master_core = '--master-lcore 0 '
# Set the number of memory channels to use.
@@ -42,7 +42,7 @@ class DpdkUtil(object):
# Load an external driver. Multiple -d options are allowed.
eal_driver = '-d /usr/lib/librte_pmd_virtio.so '
eal_options = '-v '\
- + eal_coremask\
+ + eal_corelist\
+ eal_master_core\
+ eal_mem_channels\
+ eal_socket_mem\
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index 5821455cc3..588002896c 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -15,11 +15,16 @@
from time import time, sleep
import json
+import re
+# Disable due to pylint bug
+# pylint: disable=no-name-in-module,import-error
+from distutils.version import StrictVersion
from robot.api import logger
from resources.libraries.python.ssh import SSH, SSHTimeout
from resources.libraries.python.constants import Constants
+from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.topology import NodeType, Topology
@@ -28,16 +33,20 @@ class QemuUtils(object):
def __init__(self, qemu_id=1):
self._qemu_id = qemu_id
+ self._vhost_id = 0
+ self._ssh = None
+ self._node = None
+ # Qemu Options
+ self._qemu_opt = {}
# Path to QEMU binary. Use x86_64 by default
- self._qemu_path = '/usr/bin/'
- self._qemu_bin = 'qemu-system-x86_64'
+ self._qemu_opt['qemu_path'] = '/usr/bin/'
+ self._qemu_opt['qemu_bin'] = 'qemu-system-x86_64'
# QEMU Machine Protocol socket
- self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
+ self._qemu_opt['qmp_sock'] = '/tmp/qmp{0}.sock'.format(self._qemu_id)
# QEMU Guest Agent socket
- self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id)
+ self._qemu_opt['qga_sock'] = '/tmp/qga{0}.sock'.format(self._qemu_id)
# QEMU PID file
- self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id)
- self._qemu_opt = {}
+ self._qemu_opt['pid_file'] = '/tmp/qemu{0}.pid'.format(self._qemu_id)
# Default 1 CPU.
self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1'
# Daemonize the QEMU process after initialization. Default one
@@ -57,6 +66,10 @@ class QemuUtils(object):
self._qemu_opt['huge_allocate'] = False
# Default image for CSIT virl setup
self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
+ # Virtio queue count
+ self._qemu_opt['queue_count'] = 1
+ # Virtio queue size
+ self._qemu_opt['queue_size'] = None
# VM node info dict
self._vm_info = {
'type': NodeType.VM,
@@ -65,12 +78,9 @@ class QemuUtils(object):
'password': 'cisco',
'interfaces': {},
}
- # Virtio queue count
- self._qemu_opt['queues'] = 1
- self._vhost_id = 0
- self._ssh = None
- self._node = None
- self._socks = [self._qmp_sock, self._qga_sock]
+ # Qemu Sockets
+ self._socks = [self._qemu_opt.get('qmp_sock'),
+ self._qemu_opt.get('qga_sock')]
def qemu_set_path(self, path):
"""Set binary path for QEMU.
@@ -78,22 +88,39 @@ class QemuUtils(object):
:param path: Absolute path in filesystem.
:type path: str
"""
- self._qemu_path = path
+ self._qemu_opt['qemu_path'] = path
+
+ def qemu_set_queue_count(self, count):
+ """Set number of virtio queues.
+
+ :param count: Number of virtio queues.
+ :type count: int
+ """
+ self._qemu_opt['queue_count'] = int(count)
- def qemu_set_smp(self, cpus, cores, threads, sockets):
+ def qemu_set_queue_size(self, size):
+ """Set RX/TX size of virtio queues.
+
+ :param size: Size of virtio queues.
+ :type size: int
+ """
+ self._qemu_opt['queue_size'] = int(size)
+
+ def qemu_set_smp(self, smp, cores, threads, sockets):
"""Set SMP option for QEMU.
- :param cpus: Number of CPUs.
+ :param smp: Number of CPUs.
:param cores: Number of CPU cores on one socket.
:param threads: Number of threads on one CPU core.
:param sockets: Number of discrete sockets in the system.
- :type cpus: int
+ :type smp: int
:type cores: int
:type threads: int
:type sockets: int
"""
- self._qemu_opt['smp'] = '-smp {},cores={},threads={},sockets={}'.format(
- cpus, cores, threads, sockets)
+ self._qemu_opt['smp'] = \
+ ('-smp {smp},cores={cores},threads={threads},sockets={sockets}'.
+ format(smp=smp, cores=cores, threads=threads, sockets=sockets))
def qemu_set_ssh_fwd_port(self, fwd_port):
"""Set host port for guest SSH forwarding.
@@ -150,17 +177,15 @@ class QemuUtils(object):
qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
if len(qemu_cpus) != len(host_cpus):
- logger.debug('Host CPU count {0}, Qemu Thread count {1}'.format(
- len(host_cpus), len(qemu_cpus)))
raise ValueError('Host CPU count must match Qemu Thread count')
for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
- cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id'])
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = self._ssh.exec_command_sudo(
+ 'taskset -pc {host_cpu} {thread_id}'.
+ format(host_cpu=host_cpu, thread_id=qemu_cpu['thread_id']))
if int(ret_code) != 0:
- logger.debug('Set affinity failed {0}'.format(stderr))
- raise RuntimeError('Set affinity failed on {0}'.format(
- self._node['host']))
+ raise RuntimeError('Set affinity failed on {host}'.
+ format(host=self._node['host']))
def qemu_set_scheduler_policy(self):
"""Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
@@ -171,12 +196,12 @@ class QemuUtils(object):
qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']
for qemu_cpu in qemu_cpus:
- cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id'])
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = self._ssh.exec_command_sudo(
+ 'chrt -r -p 1 {thread_id}'.
+ format(thread_id=qemu_cpu['thread_id']))
if int(ret_code) != 0:
- logger.debug('Set SCHED_RR failed {0}'.format(stderr))
- raise RuntimeError('Set SCHED_RR failed on {0}'.format(
- self._node['host']))
+ raise RuntimeError('Set SCHED_RR failed on {host}'.
+ format(host=self._node['host']))
def qemu_set_node(self, node):
"""Set node to run QEMU on.
@@ -190,7 +215,7 @@ class QemuUtils(object):
self._vm_info['host'] = node['host']
arch = Topology.get_node_arch(node)
- self._qemu_bin = 'qemu-system-{arch}'.format(arch=arch)
+ self._qemu_opt['qemu_bin'] = 'qemu-system-{arch}'.format(arch=arch)
def qemu_add_vhost_user_if(self, socket, server=True, mac=None,
jumbo_frames=False):
@@ -208,34 +233,39 @@ class QemuUtils(object):
"""
self._vhost_id += 1
# Create unix socket character device.
- chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id,
- socket)
- if server is True:
- chardev += ',server'
+ chardev = (' -chardev socket,id=char{vhost_id},path={socket}{server}'.
+ format(vhost_id=self._vhost_id,
+ socket=socket,
+ server=',server' if server is True else ''))
self._qemu_opt['options'] += chardev
# Create Vhost-user network backend.
- netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}'
- .format(self._vhost_id, self._qemu_opt['queues']))
+ netdev = (' -netdev vhost-user,id=vhost{vhost_id},'
+ 'chardev=char{vhost_id},queues={queue_count}'.
+ format(vhost_id=self._vhost_id,
+ queue_count=self._qemu_opt.get('queue_count')))
self._qemu_opt['options'] += netdev
# If MAC is not specified use auto-generated MAC address based on
# template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU
# with ID 1 is 52:54:00:00:01:01
- if mac is None:
- mac = '52:54:00:00:{0:02x}:{1:02x}'.\
- format(self._qemu_id, self._vhost_id)
- extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\
- 'guest_tso6=off,guest_ecn=off'
- if jumbo_frames:
- extend_options += ",mrg_rxbuf=on"
- else:
- extend_options += ",mrg_rxbuf=off"
+ mac = ('52:54:00:00:{qemu_id:02x}:{vhost_id:02x}'.
+ format(qemu_id=self._qemu_id, vhost_id=self._vhost_id))\
+ if mac is None else mac
+
+ queue_size = (',rx_queue_size={queue_size},tx_queue_size={queue_size}'.
+ format(queue_size=self._qemu_opt.get('queue_size')))\
+ if self._qemu_opt.get('queue_size') else ''
+
# Create Virtio network device.
- device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format(
- self._vhost_id, mac, extend_options)
+ device = (' -device virtio-net-pci,netdev=vhost{vhost_id},mac={mac},'
+ 'mq=on,csum=off,gso=off,guest_tso4=off,guest_tso6=off,'
+ 'guest_ecn=off,mrg_rxbuf={mbuf}{queue_size}'.
+ format(vhost_id=self._vhost_id, mac=mac,
+ mbuf='on' if jumbo_frames else 'off',
+ queue_size=queue_size))
self._qemu_opt['options'] += device
# Add interface MAC and socket to the node dict
if_data = {'mac_address': mac, 'socket': socket}
- if_name = 'vhost{}'.format(self._vhost_id)
+ if_name = 'vhost{vhost_id}'.format(vhost_id=self._vhost_id)
self._vm_info['interfaces'][if_name] = if_data
# Add socket to the socket list
self._socks.append(socket)
@@ -252,36 +282,30 @@ class QemuUtils(object):
response will contain the "error" keyword instead of "return".
"""
# To enter command mode, the qmp_capabilities command must be issued.
- qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \
- '{ \\"execute\\": \\"' + cmd + \
- '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock
-
- (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd)
+ ret_code, stdout, _ = self._ssh.exec_command(
+ 'echo "{{ \\"execute\\": \\"qmp_capabilities\\" }}'
+ '{{ \\"execute\\": \\"{cmd}\\" }}" | '
+ 'sudo -S socat - UNIX-CONNECT:{qmp_sock}'.
+ format(cmd=cmd, qmp_sock=self._qemu_opt.get('qmp_sock')))
if int(ret_code) != 0:
- logger.debug('QMP execute failed {0}'.format(stderr))
- raise RuntimeError('QMP execute "{0}"'
- ' failed on {1}'.format(cmd, self._node['host']))
- logger.trace(stdout)
+ raise RuntimeError('QMP execute "{cmd}" failed on {host}'.
+ format(cmd=cmd, host=self._node['host']))
# Skip capabilities negotiation messages.
out_list = stdout.splitlines()
if len(out_list) < 3:
- raise RuntimeError('Invalid QMP output on {0}'.format(
- self._node['host']))
+ raise RuntimeError('Invalid QMP output on {host}'.
+ format(host=self._node['host']))
return json.loads(out_list[2])
def _qemu_qga_flush(self):
- """Flush the QGA parser state
- """
- qga_cmd = '(printf "\xFF"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \
- self._qga_sock
- #TODO: probably need something else
- (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
+ """Flush the QGA parser state."""
+ ret_code, stdout, _ = self._ssh.exec_command(
+ '(printf "\xFF"; sleep 1) | '
+ 'sudo -S socat - UNIX-CONNECT:{qga_sock}'.
+ format(qga_sock=self._qemu_opt.get('qga_sock')))
if int(ret_code) != 0:
- logger.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}" '
- 'failed on {1}'.format(qga_cmd,
- self._node['host']))
- logger.trace(stdout)
+ raise RuntimeError('QGA flush failed on {host}'.
+ format(host=self._node['host']))
if not stdout:
return {}
return json.loads(stdout.split('\n', 1)[0])
@@ -294,16 +318,13 @@ class QemuUtils(object):
:param cmd: QGA command to execute.
:type cmd: str
"""
- qga_cmd = '(echo "{ \\"execute\\": \\"' + \
- cmd + \
- '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \
- self._qga_sock
- (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd)
+ ret_code, stdout, _ = self._ssh.exec_command(
+ '(echo "{{ \\"execute\\": \\"{cmd}\\" }}"; sleep 1) | '
+ 'sudo -S socat - UNIX-CONNECT:{qga_sock}'.
+ format(cmd=cmd, qga_sock=self._qemu_opt.get('qga_sock')))
if int(ret_code) != 0:
- logger.debug('QGA execute failed {0}'.format(stderr))
- raise RuntimeError('QGA execute "{0}"'
- ' failed on {1}'.format(cmd, self._node['host']))
- logger.trace(stdout)
+ raise RuntimeError('QGA execute "{cmd}" failed on {host}'.
+ format(cmd=cmd, host=self._node['host']))
if not stdout:
return {}
return json.loads(stdout.split('\n', 1)[0])
@@ -320,13 +341,15 @@ class QemuUtils(object):
start = time()
while True:
if time() - start > timeout:
- raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
+ raise RuntimeError('timeout, VM {disk} not booted on {host}'.
+ format(disk=self._qemu_opt['disk_image'],
+ host=self._node['host']))
out = None
try:
out = self._qemu_qga_flush()
except ValueError:
- logger.trace('QGA qga flush unexpected output {}'.format(out))
+ logger.trace('QGA qga flush unexpected output {out}'.
+ format(out=out))
# Empty output - VM not booted yet
if not out:
sleep(5)
@@ -334,13 +357,16 @@ class QemuUtils(object):
break
while True:
if time() - start > timeout:
- raise RuntimeError('timeout, VM {0} not booted on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
+ raise RuntimeError('timeout, VM with {disk} not booted '
+ 'on {host}'.
+ format(disk=self._qemu_opt['disk_image'],
+ host=self._node['host']))
out = None
try:
out = self._qemu_qga_exec('guest-ping')
except ValueError:
- logger.trace('QGA guest-ping unexpected output {}'.format(out))
+ logger.trace('QGA guest-ping unexpected output {out}'.
+ format(out=out))
# Empty output - VM not booted yet
if not out:
sleep(5)
@@ -353,10 +379,12 @@ class QemuUtils(object):
else:
# If there is an unexpected output from QGA guest-info, try
# again until timeout.
- logger.trace('QGA guest-ping unexpected output {}'.format(out))
+ logger.trace('QGA guest-ping unexpected output {out}'.
+ format(out=out))
- logger.trace('VM {0} booted on {1}'.format(self._qemu_opt['disk_image'],
- self._node['host']))
+ logger.trace('VM with {disk_image} booted on {host}'.
+ format(disk_image=self._qemu_opt['disk_image'],
+ host=self._node['host']))
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
@@ -367,8 +395,10 @@ class QemuUtils(object):
interfaces = out.get('return')
mac_name = {}
if not interfaces:
- raise RuntimeError('Get VM {0} interface list failed on {1}'.format(
- self._qemu_opt['disk_image'], self._node['host']))
+ raise RuntimeError('Get VM {disk_image} interface list failed '
+ 'on {host}'.
+ format(disk_image=self._qemu_opt['disk_image'],
+ host=self._node['host']))
# Create MAC-name dict
for interface in interfaces:
if 'hardware-address' not in interface:
@@ -379,196 +409,77 @@ class QemuUtils(object):
mac = interface.get('mac_address')
if_name = mac_name.get(mac)
if if_name is None:
- logger.trace('Interface name for MAC {} not found'.format(mac))
+ logger.trace('Interface name for MAC {mac} not found'.
+ format(mac=mac))
else:
interface['name'] = if_name
- def _huge_page_check(self, allocate=False):
- """Huge page check."""
- huge_mnt = self._qemu_opt.get('huge_mnt')
- mem_size = self._qemu_opt.get('mem_size')
-
- # Get huge pages information
- huge_size = self._get_huge_page_size()
- huge_free = self._get_huge_page_free(huge_size)
- huge_total = self._get_huge_page_total(huge_size)
-
- # Check if memory reqested by qemu is available on host
- if (mem_size * 1024) > (huge_free * huge_size):
- # If we want to allocate hugepage dynamically
- if allocate:
- mem_needed = abs((huge_free * huge_size) - (mem_size * 1024))
- huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
- max_map_count = huge_to_allocate*4
- # Increase maximum number of memory map areas a process may have
- cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format(
- max_map_count)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
- # Increase hugepage count
- cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format(
- huge_to_allocate)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- logger.debug('Mount huge pages failed {0}'.format(stderr))
- raise RuntimeError('Mount huge pages failed on {0}'.format(
- self._node['host']))
- # If we do not want to allocate dynamicaly end with error
- else:
- raise RuntimeError(
- 'Not enough free huge pages: {0}, '
- '{1} MB'.format(huge_free, huge_free * huge_size)
- )
- # Check if huge pages mount point exist
- has_huge_mnt = False
- (_, output, _) = self._ssh.exec_command('cat /proc/mounts')
- for line in output.splitlines():
- # Try to find something like:
- # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
- mount = line.split()
- if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
- has_huge_mnt = True
- break
- # If huge page mount point not exist create one
- if not has_huge_mnt:
- cmd = 'mkdir -p {0}'.format(huge_mnt)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- logger.debug('Create mount dir failed: {0}'.format(stderr))
- raise RuntimeError('Create mount dir failed on {0}'.format(
- self._node['host']))
- cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format(
- huge_mnt)
- (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- logger.debug('Mount huge pages failed {0}'.format(stderr))
- raise RuntimeError('Mount huge pages failed on {0}'.format(
- self._node['host']))
-
- def _get_huge_page_size(self):
- """Get default size of huge pages in system.
-
- :returns: Default size of free huge pages in system.
- :rtype: int
- :raises RuntimeError: If reading failed for three times.
- """
- # TODO: remove to dedicated library
- cmd_huge_size = "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'"
- for _ in range(3):
- (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_size)
- if ret == 0:
- try:
- huge_size = int(out)
- except ValueError:
- logger.trace('Reading huge page size information failed')
- else:
- break
- else:
- raise RuntimeError('Getting huge page size information failed.')
- return huge_size
-
- def _get_huge_page_free(self, huge_size):
- """Get total number of huge pages in system.
-
- :param huge_size: Size of hugepages.
- :type huge_size: int
- :returns: Number of free huge pages in system.
- :rtype: int
- :raises RuntimeError: If reading failed for three times.
- """
- # TODO: add numa aware option
- # TODO: remove to dedicated library
- cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
- 'free_hugepages'.format(huge_size)
- for _ in range(3):
- (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free)
- if ret == 0:
- try:
- huge_free = int(out)
- except ValueError:
- logger.trace('Reading free huge pages information failed')
- else:
- break
- else:
- raise RuntimeError('Getting free huge pages information failed.')
- return huge_free
-
- def _get_huge_page_total(self, huge_size):
- """Get total number of huge pages in system.
-
- :param huge_size: Size of hugepages.
- :type huge_size: int
- :returns: Total number of huge pages in system.
- :rtype: int
- :raises RuntimeError: If reading failed for three times.
- """
- # TODO: add numa aware option
- # TODO: remove to dedicated library
- cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\
- 'nr_hugepages'.format(huge_size)
- for _ in range(3):
- (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total)
- if ret == 0:
- try:
- huge_total = int(out)
- except ValueError:
- logger.trace('Reading total huge pages information failed')
- else:
- break
- else:
- raise RuntimeError('Getting total huge pages information failed.')
- return huge_total
-
def qemu_start(self):
"""Start QEMU and wait until VM boot.
.. note:: First set at least node to run QEMU on.
- .. warning:: Starts only one VM on the node.
:returns: VM node info.
:rtype: dict
"""
# Qemu binary path
- bin_path = '{0}{1}'.format(self._qemu_path, self._qemu_bin)
+ bin_path = ('{qemu_path}{qemu_bin}'.
+ format(qemu_path=self._qemu_opt.get('qemu_path'),
+ qemu_bin=self._qemu_opt.get('qemu_bin')))
- # SSH forwarding
- ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format(
- self._qemu_opt.get('ssh_fwd_port'))
# Memory and huge pages
- mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \
- 'share=on -m {0} -numa node,memdev=mem'.format(
- self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt'))
+ mem = ('-object memory-backend-file,id=mem,size={mem_size}M,'
+ 'mem-path={path},share=on -m {mem_size} -numa node,memdev=mem'.
+ format(mem_size=self._qemu_opt.get('mem_size'),
+ path=self._qemu_opt.get('huge_mnt')))
+
+ # Drive option
+ drive = ('-drive file={disk_image},format=raw,cache=none,if=virtio'
+ '{locking}'.
+ format(disk_image=self._qemu_opt.get('disk_image'),
+ locking=',file.locking=off'\
+ if self._qemu_version_is_greater('2.10') else ''))
- # By default check only if hugepages are available.
- # If 'huge_allocate' is set to true try to allocate as well.
- self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate'))
-
- # Disk option
- drive = '-drive file={0},format=raw,cache=none,if=virtio'.format(
- self._qemu_opt.get('disk_image'))
+ # SSH forwarding
+ ssh = ('-net user,hostfwd=tcp::{ssh_fwd_port}-:22'.
+ format(ssh_fwd_port=self._qemu_opt.get('ssh_fwd_port')))
# Setup QMP via unix socket
- qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock)
- # Setup serial console
- serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \
- 'nowait -device isa-serial,chardev=gnc0'.format(
- self._qemu_opt.get('serial_port'))
+ qmp = ('-qmp unix:{qmp_sock},server,nowait'.
+ format(qmp_sock=self._qemu_opt.get('qmp_sock')))
# Setup QGA via chardev (unix socket) and isa-serial channel
- qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \
- '-device isa-serial,chardev=qga0'.format(self._qga_sock)
+ qga = ('-chardev socket,path={qga_sock},server,nowait,id=qga0 '
+ '-device isa-serial,chardev=qga0'.
+ format(qga_sock=self._qemu_opt.get('qga_sock')))
+ # Setup serial console
+ serial = ('-chardev socket,host=127.0.0.1,port={serial_port},id=gnc0,'
+ 'server,nowait -device isa-serial,chardev=gnc0'.
+ format(serial_port=self._qemu_opt.get('serial_port')))
+
# Graphic setup
graphic = '-monitor none -display none -vga none'
+
# PID file
- pid = '-pidfile {}'.format(self._pid_file)
+ pid = ('-pidfile {pid_file}'.
+ format(pid_file=self._qemu_opt.get('pid_file')))
+
+ # By default check only if hugepages are available.
+ # If 'huge_allocate' is set to true try to allocate as well.
+ DUTSetup.check_huge_page(self._node, self._qemu_opt.get('huge_mnt'),
+ self._qemu_opt.get('mem_size'),
+ allocate=self._qemu_opt.get('huge_allocate'))
# Run QEMU
- cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'.format(
- bin_path, self._qemu_opt.get('smp'), mem, ssh_fwd,
- self._qemu_opt.get('options'), drive, qmp, serial, qga, graphic,
- pid)
+ cmd = ('{bin_path} {smp} {mem} {ssh} {options} {drive} {qmp} {serial} '
+ '{qga} {graphic} {pid}'.
+ format(bin_path=bin_path, smp=self._qemu_opt.get('smp'),
+ mem=mem, ssh=ssh, options=self._qemu_opt.get('options'),
+ drive=drive, qmp=qmp, serial=serial, qga=qga,
+ graphic=graphic, pid=pid))
try:
- (ret_code, _, _) = self._ssh.exec_command_sudo(cmd, timeout=300)
+ ret_code, _, _ = self._ssh.exec_command_sudo(cmd, timeout=300)
if int(ret_code) != 0:
- raise RuntimeError('QEMU start failed on {0}'.format(
- self._node['host']))
+ raise RuntimeError('QEMU start failed on {host}'.
+ format(host=self._node['host']))
# Wait until VM boot
self._wait_until_vm_boot()
except (RuntimeError, SSHTimeout):
@@ -586,8 +497,9 @@ class QemuUtils(object):
out = self._qemu_qmp_exec('quit')
err = out.get('error')
if err is not None:
- raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format(
- self._node['host'], json.dumps(err)))
+ raise RuntimeError('QEMU quit failed on {host}: {error}'.
+ format(host=self._node['host'],
+ error=json.dumps(err)))
def qemu_system_powerdown(self):
"""Power down the system (if supported)."""
@@ -595,9 +507,8 @@ class QemuUtils(object):
err = out.get('error')
if err is not None:
raise RuntimeError(
- 'QEMU system powerdown failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err))
- )
+ 'QEMU system powerdown failed on {host}: {error}'.
+ format(host=self._node['host'], error=json.dumps(err)))
def qemu_system_reset(self):
"""Reset the system."""
@@ -605,19 +516,20 @@ class QemuUtils(object):
err = out.get('error')
if err is not None:
raise RuntimeError(
- 'QEMU system reset failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
+ 'QEMU system reset failed on {host}: {error}'.
+ format(host=self._node['host'], error=json.dumps(err)))
def qemu_kill(self):
"""Kill qemu process."""
# Note: in QEMU start phase there are 3 QEMU processes because we
# daemonize QEMU
- self._ssh.exec_command_sudo('chmod +r {}'.format(self._pid_file))
- self._ssh.exec_command_sudo('kill -SIGKILL $(cat {})'
- .format(self._pid_file))
+ self._ssh.exec_command_sudo('chmod +r {pid}'.
+ format(pid=self._qemu_opt.get('pid_file')))
+ self._ssh.exec_command_sudo('kill -SIGKILL $(cat {pid})'.
+ format(pid=self._qemu_opt.get('pid_file')))
# Delete PID file
- cmd = 'rm -f {}'.format(self._pid_file)
- self._ssh.exec_command_sudo(cmd)
+ self._ssh.exec_command_sudo('rm -f {pid}'.
+ format(pid=self._qemu_opt.get('pid_file')))
def qemu_kill_all(self, node=None):
"""Kill all qemu processes on DUT node if specified.
@@ -632,12 +544,13 @@ class QemuUtils(object):
def qemu_clear_socks(self):
"""Remove all sockets created by QEMU."""
# If serial console port still open kill process
- cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port'))
- self._ssh.exec_command_sudo(cmd)
+ self._ssh.exec_command_sudo('fuser -k {serial_port}/tcp'.
+ format(serial_port=\
+ self._qemu_opt.get('serial_port')))
# Delete all created sockets
- for sock in self._socks:
- cmd = 'rm -f {}'.format(sock)
- self._ssh.exec_command_sudo(cmd)
+ for socket in self._socks:
+ self._ssh.exec_command_sudo('rm -f {socket}'.
+ format(socket=socket))
def qemu_system_status(self):
"""Return current VM status.
@@ -669,9 +582,42 @@ class QemuUtils(object):
return ret.get('status')
else:
err = out.get('error')
- raise RuntimeError(
- 'QEMU query-status failed on {0}, '
- 'error: {1}'.format(self._node['host'], json.dumps(err)))
+ raise RuntimeError('QEMU query-status failed on {host}: {error}'.
+ format(host=self._node['host'],
+ error=json.dumps(err)))
+
+ def qemu_version(self):
+ """Return Qemu version.
+
+ :returns: Qemu version.
+ :rtype: str
+ """
+ # Qemu binary path
+ bin_path = ('{qemu_path}{qemu_bin}'.
+ format(qemu_path=self._qemu_opt.get('qemu_path'),
+ qemu_bin=self._qemu_opt.get('qemu_bin')))
+
+ try:
+ ret_code, stdout, _ = self._ssh.exec_command_sudo(
+ '{bin_path} --version'.
+ format(bin_path=bin_path))
+ if int(ret_code) != 0:
+ raise RuntimeError('Failed to get QEMU version on {host}'.
+ format(host=self._node['host']))
+
+ return re.match(r'QEMU emulator version ([\d.]*)', stdout).group(1)
+ except (RuntimeError, SSHTimeout):
+ self.qemu_kill_all()
+ self.qemu_clear_socks()
+ raise
+
+ def _qemu_version_is_greater(self, version):
+ """Compare Qemu versions.
+
+ :returns: True if installed Qemu version is greater.
+ :rtype: bool
+ """
+ return StrictVersion(self.qemu_version()) > StrictVersion(version)
@staticmethod
def build_qemu(node, force_install=False, apply_patch=False):
@@ -688,23 +634,25 @@ class QemuUtils(object):
ssh = SSH()
ssh.connect(node)
- directory = ' --directory={0}'.format(Constants.QEMU_INSTALL_DIR)
- if apply_patch:
- directory += '-patch'
- else:
- directory += '-base'
- version = ' --version={0}'.format(Constants.QEMU_INSTALL_VERSION)
+ directory = (' --directory={install_dir}{patch}'.
+ format(install_dir=Constants.QEMU_INSTALL_DIR,
+ patch='-patch' if apply_patch else '-base'))
+ version = (' --version={install_version}'.
+ format(install_version=Constants.QEMU_INSTALL_VERSION))
force = ' --force' if force_install else ''
patch = ' --patch' if apply_patch else ''
arch = Topology.get_node_arch(node)
- target_list = ' --target-list={0}-softmmu'.format(arch)
+ target_list = (' --target-list={arch}-softmmu'.
+ format(arch=arch))
- (ret_code, stdout, stderr) = \
- ssh.exec_command(
- "sudo -E sh -c '{0}/{1}/qemu_build.sh{2}{3}{4}{5}{6}'"\
- .format(Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH,
- version, directory, force, patch, target_list), 1000)
+ ret_code, _, _ = ssh.exec_command(
+ "sudo -E sh -c '{fw_dir}/{lib_sh}/qemu_build.sh{version}{directory}"
+ "{force}{patch}{target_list}'".
+ format(fw_dir=Constants.REMOTE_FW_DIR,
+ lib_sh=Constants.RESOURCES_LIB_SH,
+ version=version, directory=directory, force=force,
+ patch=patch, target_list=target_list), 1000)
if int(ret_code) != 0:
- logger.debug('QEMU build failed {0}'.format(stdout + stderr))
- raise RuntimeError('QEMU build failed on {0}'.format(node['host']))
+ raise RuntimeError('QEMU build failed on {host}'.
+ format(host=node['host']))
diff --git a/resources/libraries/python/constants.py b/resources/libraries/python/constants.py
index 9507e66d85..a0a427af4e 100644
--- a/resources/libraries/python/constants.py
+++ b/resources/libraries/python/constants.py
@@ -33,10 +33,10 @@ class Constants(object):
VPP_UNIT = 'vpp'
# QEMU version to install
- QEMU_INSTALL_VERSION = 'qemu-2.5.0'
+ QEMU_INSTALL_VERSION = 'qemu-2.11.2'
# QEMU install directory
- QEMU_INSTALL_DIR = '/opt/qemu-2.5.0'
+ QEMU_INSTALL_DIR = '/opt/qemu-2.11.2'
# TRex install version
TREX_INSTALL_VERSION = '2.35'
diff --git a/resources/libraries/robot/performance/performance_configuration.robot b/resources/libraries/robot/performance/performance_configuration.robot
index 45147e7feb..a43556132d 100644
--- a/resources/libraries/robot/performance/performance_configuration.robot
+++ b/resources/libraries/robot/performance/performance_configuration.robot
@@ -51,6 +51,7 @@
| | :FOR | ${dut} | IN | @{duts}
| | | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1} | up
| | | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2} | up
+| | All VPP Interfaces Ready Wait | ${nodes}
| | ${duts}= | Get Matches | ${nodes} | DUT*
| | :FOR | ${dut} | IN | @{duts}
| | | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1}
@@ -181,167 +182,137 @@
| | Vpp Route Add | ${dut2} | 20.0.0.0 | 32 | 3.3.3.1 | ${dut2_if2}
| | ... | count=${count}
-| Initialize IPv4 forwarding with vhost in 3-node circular topology
+| Initialize IPv4 forwarding with vhost in 2-node circular topology
| | [Documentation]
-| | ... | Create vhost-user interfaces in VPP. Set UP state of all VPP
-| | ... | interfaces in path on nodes in 3-node circular topology. Create 2
-| | ... | FIB tables on each DUT with multipath routing. Assign pair of
-| | ... | Physical and Virtual interfaces on both nodes to each FIB table.
-| | ... | Setup IPv4 addresses with /30 prefix on DUT-TG links and /30 prefix
-| | ... | on DUT1-DUT2 link. Set routing on all DUT nodes in all FIB tables
-| | ... | with prefix /24 and next hop of neighbour IPv4 address. Setup
-| | ... | ARP on all VPP interfaces.
+| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on
+| | ... | VPP node. Set UP state of all VPP interfaces in path. Create
+| | ... | ${vm_count}+1 FIB tables on DUT with multipath routing. Assign each
+| | ... | Virtual interface to FIB table with Physical interface or Virtual
+| | ... | interface on both nodes. Setup IPv4 addresses with /30 prefix on
+| | ... | DUT-TG links. Set routing on DUT nodes in all FIB tables with prefix
+| | ... | /24 and next hop of neighbour IPv4 address. Setup ARP on all VPP
+| | ... | interfaces.
| | ...
| | ... | *Arguments:*
-| | ... | - sock1 - Sock path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Sock path for second Vhost-User interface. Type: string
-| | ...
-| | ... | _NOTE:_ This KW uses following test case variables:
-| | ... | - ${dut1} - DUT1 node.
-| | ... | - ${dut2} - DUT2 node.
-| | ... | - ${dut1_if1} - DUT1 interface towards TG.
-| | ... | - ${dut1_if2} - DUT1 interface towards DUT2.
-| | ... | - ${dut2_if1} - DUT2 interface towards DUT1.
-| | ... | - ${dut2_if2} - DUT2 interface towards TG.
+| | ... | - vm_count - Number of guest VMs. Type: integer
| | ...
-| | ... | *Return:*
-| | ... | - No value returned
+| | ... | *Note:*
+| | ... | Socket paths for VM are defined in following format:
+| | ... | - /tmp/sock-${VM_ID}-1
+| | ... | - /tmp/sock-${VM_ID}-2
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| IPv4 forwarding with vhost initialized in a 3-node circular \
-| | ... | topology \| /tmp/sock1 \| /tmp/sock2 \|
+| | ... | \| IPv4 forwarding with Vhost-User initialized in a 2-node circular\
+| | ... | topology \| 1 \|
| | ...
-| | [Arguments] | ${sock1} | ${sock2}
+| | [Arguments] | ${vm_count}=${1}
| | ...
| | Set interfaces in path up
-| | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | ... | ${sock1} | ${sock2} | dut1_vhost_if1 | dut1_vhost_if2
-| | ${dut1_vif1}= | Set Variable | ${dut1_vhost_if1}
-| | ${dut1_vif2}= | Set Variable | ${dut1_vhost_if2}
-| | Set Interface State | ${dut1} | ${dut1_vif1} | up
-| | Set Interface State | ${dut1} | ${dut1_vif2} | up
-| | Configure vhost interfaces for L2BD forwarding | ${dut2}
-| | ... | ${sock1} | ${sock2} | dut2_vhost_if1 | dut2_vhost_if2
-| | ${dut2_vif1}= | Set Variable | ${dut2_vhost_if1}
-| | ${dut2_vif2}= | Set Variable | ${dut2_vhost_if2}
-| | Set Interface State | ${dut2} | ${dut2_vif1} | up
-| | Set Interface State | ${dut2} | ${dut2_vif2} | up
-| | Add Fib Table | ${dut1} | ${fib_table_1}
-| | And Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | vrf=${fib_table_1}
-| | ... | gateway=4.4.4.2 | interface=${dut1_vif1} | multipath=${TRUE}
+| | ${fib_table_1}= | Set Variable | ${101}
+| | ${fib_table_2}= | Evaluate | ${fib_table_1}+${vm_count}
| | Add Fib Table | ${dut1} | ${fib_table_1}
| | And Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | vrf=${fib_table_1}
| | ... | gateway=1.1.1.2 | interface=${dut1_if1} | multipath=${TRUE}
| | Add Fib Table | ${dut1} | ${fib_table_2}
| | And Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | vrf=${fib_table_2}
| | ... | gateway=2.2.2.2 | interface=${dut1_if2} | multipath=${TRUE}
-| | Add Fib Table | ${dut1} | ${fib_table_2}
-| | And Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | vrf=${fib_table_2}
-| | ... | gateway=5.5.5.2 | interface=${dut1_vif2} | multipath=${TRUE}
-| | Add Fib Table | ${dut2} | ${fib_table_1}
-| | And Vpp Route Add | ${dut2} | 10.10.10.0 | 24 | vrf=${fib_table_1}
-| | ... | gateway=2.2.2.1 | interface=${dut2_if1} | multipath=${TRUE}
-| | Add Fib Table | ${dut2} | ${fib_table_1}
-| | And Vpp Route Add | ${dut2} | 20.20.20.0 | 24 | vrf=${fib_table_1}
-| | ... | gateway=4.4.4.1 | interface=${dut2_vif1} | multipath=${TRUE}
-| | Add Fib Table | ${dut2} | ${fib_table_2}
-| | And Vpp Route Add | ${dut2} | 10.10.10.0 | 24 | vrf=${fib_table_2}
-| | ... | gateway=5.5.5.2 | interface=${dut2_vif2} | multipath=${TRUE}
-| | Add Fib Table | ${dut2} | ${fib_table_2}
-| | And Vpp Route Add | ${dut2} | 20.20.20.0 | 24 | vrf=${fib_table_2}
-| | ... | gateway=3.3.3.2 | interface=${dut2_if2} | multipath=${TRUE}
| | Assign Interface To Fib Table | ${dut1} | ${dut1_if1} | ${fib_table_1}
-| | Assign Interface To Fib Table | ${dut1} | ${dut1_vif1} | ${fib_table_1}
| | Assign Interface To Fib Table | ${dut1} | ${dut1_if2} | ${fib_table_2}
-| | Assign Interface To Fib Table | ${dut1} | ${dut1_vif2} | ${fib_table_2}
-| | Assign Interface To Fib Table | ${dut2} | ${dut2_if1} | ${fib_table_1}
-| | Assign Interface To Fib Table | ${dut2} | ${dut2_vif1} | ${fib_table_1}
-| | Assign Interface To Fib Table | ${dut2} | ${dut2_if2} | ${fib_table_2}
-| | Assign Interface To Fib Table | ${dut2} | ${dut2_vif2} | ${fib_table_2}
| | Configure IP addresses on interfaces | ${dut1} | ${dut1_if1} | 1.1.1.2 | 30
| | Configure IP addresses on interfaces | ${dut1} | ${dut1_if2} | 2.2.2.1 | 30
-| | Configure IP addresses on interfaces | ${dut1} | ${dut1_vif1} | 4.4.4.1 | 30
-| | Configure IP addresses on interfaces | ${dut1} | ${dut1_vif2} | 5.5.5.1 | 30
-| | Configure IP addresses on interfaces | ${dut2} | ${dut2_if1} | 2.2.2.2 | 30
-| | Configure IP addresses on interfaces | ${dut2} | ${dut2_if2} | 3.3.3.1 | 30
-| | Configure IP addresses on interfaces | ${dut2} | ${dut2_vif1} | 4.4.4.1 | 30
-| | Configure IP addresses on interfaces | ${dut2} | ${dut2_vif2} | 5.5.5.1 | 30
| | ${tg1_if1_mac}= | Get Interface MAC | ${tg} | ${tg_if1}
-| | ${dut1_if2_mac}= | Get Interface MAC | ${dut1} | ${dut1_if2}
| | ${tg1_if2_mac}= | Get Interface MAC | ${tg} | ${tg_if2}
-| | ${dut2_if1_mac}= | Get Interface MAC | ${dut2} | ${dut2_if1}
-| | ${dut1_vif1_idx}= | Get Interface SW Index | ${dut1} | ${dut1_vif1}
-| | ${dut1_vif2_idx}= | Get Interface SW Index | ${dut1} | ${dut1_vif2}
-| | ${dut2_vif1_idx}= | Get Interface SW Index | ${dut2} | ${dut2_vif1}
-| | ${dut2_vif2_idx}= | Get Interface SW Index | ${dut2} | ${dut2_vif2}
-| | ${dut1_vif1_mac}= | Get Vhost User Mac By Sw Index | ${dut1}
-| | ... | ${dut1_vif1_idx}
-| | ${dut1_vif2_mac}= | Get Vhost User Mac By Sw Index | ${dut1}
-| | ... | ${dut1_vif2_idx}
-| | ${dut2_vif1_mac}= | Get Vhost User Mac By Sw Index | ${dut2}
-| | ... | ${dut2_vif1_idx}
-| | ${dut2_vif2_mac}= | Get Vhost User Mac By Sw Index | ${dut2}
-| | ... | ${dut2_vif2_idx}
-| | Set Test Variable | ${dut1_vif1_mac}
-| | Set Test Variable | ${dut1_vif2_mac}
-| | Set Test Variable | ${dut2_vif1_mac}
-| | Set Test Variable | ${dut2_vif2_mac}
| | Add arp on dut | ${dut1} | ${dut1_if1} | 1.1.1.1 | ${tg1_if1_mac}
| | Add arp on dut | ${dut1} | ${dut1_if2} | 2.2.2.2 | ${dut2_if1_mac}
-| | Add arp on dut | ${dut1} | ${dut1_vif1} | 4.4.4.2 | 52:54:00:00:04:01
-| | Add arp on dut | ${dut1} | ${dut1_vif2} | 5.5.5.2 | 52:54:00:00:04:02
-| | Add arp on dut | ${dut2} | ${dut2_if1} | 2.2.2.1 | ${dut1_if2_mac}
-| | Add arp on dut | ${dut2} | ${dut2_if2} | 3.3.3.2 | ${tg1_if2_mac}
-| | Add arp on dut | ${dut2} | ${dut2_vif1} | 4.4.4.2 | 52:54:00:00:04:01
-| | Add arp on dut | ${dut2} | ${dut2_vif2} | 5.5.5.2 | 52:54:00:00:04:02
-| | Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | 4.4.4.2 | ${dut1_vif1}
-| | ... | vrf=${fib_table_1}
| | Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | 1.1.1.1 | ${dut1_if1}
| | ... | vrf=${fib_table_1}
| | Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | 2.2.2.2 | ${dut1_if2}
| | ... | vrf=${fib_table_2}
-| | Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | 5.5.5.2 | ${dut1_vif2}
-| | ... | vrf=${fib_table_2}
-| | Vpp Route Add | ${dut2} | 20.20.20.0 | 24 | 4.4.4.2 | ${dut2_vif1}
-| | ... | vrf=${fib_table_1}
-| | Vpp Route Add | ${dut2} | 10.10.10.0 | 24 | 2.2.2.1 | ${dut2_if1}
-| | ... | vrf=${fib_table_1}
-| | Vpp Route Add | ${dut2} | 20.20.20.0 | 24 | 3.3.3.2 | ${dut2_if2}
-| | ... | vrf=${fib_table_2}
-| | Vpp Route Add | ${dut2} | 10.10.10.0 | 24 | 5.5.5.2 | ${dut2_vif2}
-| | ... | vrf=${fib_table_2}
+| | ${ip_base_start}= | Set Variable | ${4}
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
+| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
+| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
+| | | ${fib_table_1}= | Evaluate | ${100}+${number}
+| | | ${fib_table_2}= | Evaluate | ${fib_table_1}+${1}
+| | | ${ip_base_vif1}= | Evaluate | ${ip_base_start}+(${number}-1)*2
+| | | ${ip_base_vif2}= | Evaluate | ${ip_base_vif1}+1
+| | | ${ip_net_vif1}= | Set Variable
+| | | ... | ${ip_base_vif1}.${ip_base_vif1}.${ip_base_vif1}
+| | | ${ip_net_vif2}= | Set Variable
+| | | ... | ${ip_base_vif2}.${ip_base_vif2}.${ip_base_vif2}
+| | | Configure vhost interfaces for L2BD forwarding | ${dut1}
+| | | ... | ${sock1} | ${sock2} | dut1-vhost-${number}-if1
+| | | ... | dut1-vhost-${number}-if2
+| | | Set Interface State | ${dut1} | ${dut1-vhost-${number}-if1} | up
+| | | Set Interface State | ${dut1} | ${dut1-vhost-${number}-if2} | up
+| | | Add Fib Table | ${dut1} | ${fib_table_1}
+| | | And Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | vrf=${fib_table_1}
+| | | ... | gateway=${ip_net_vif1}.1 | interface=${dut1-vhost-${number}-if1}
+| | | ... | multipath=${TRUE}
+| | | Add Fib Table | ${dut1} | ${fib_table_2}
+| | | And Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | vrf=${fib_table_2}
+| | | ... | gateway=${ip_net_vif2}.2 | interface=${dut1-vhost-${number}-if2}
+| | | ... | multipath=${TRUE}
+| | | Assign Interface To Fib Table | ${dut1} | ${dut1-vhost-${number}-if1}
+| | | ... | ${fib_table_1}
+| | | Assign Interface To Fib Table | ${dut1} | ${dut1-vhost-${number}-if2}
+| | | ... | ${fib_table_2}
+| | | Configure IP addresses on interfaces
+| | | ... | ${dut1} | ${dut1-vhost-${number}-if1} | ${ip_net_vif1}.1 | 30
+| | | ... | ${dut1} | ${dut1-vhost-${number}-if2} | ${ip_net_vif2}.1 | 30
+| | | ${dut1_vif1_idx}= | Get Interface SW Index | ${dut1}
+| | | ... | ${dut1-vhost-${number}-if1}
+| | | ${dut1_vif2_idx}= | Get Interface SW Index | ${dut1}
+| | | ... | ${dut1-vhost-${number}-if2}
+| | | ${dut1_vif1_mac}= | Get Vhost User Mac By Sw Index | ${dut1}
+| | | ... | ${dut1_vif1_idx}
+| | | ${dut1_vif2_mac}= | Get Vhost User Mac By Sw Index | ${dut1}
+| | | ... | ${dut1_vif2_idx}
+| | | Set Test Variable | ${dut1-vhost-${number}-if1_mac}
+| | | ... | ${dut1_vif1_mac}
+| | | Set Test Variable | ${dut1-vhost-${number}-if2_mac}
+| | | ... | ${dut1_vif2_mac}
+| | | ${qemu_id}= | Set Variable If | ${number} < 10 | 0${number}
+| | | ... | ${number}
+| | | Add arp on dut | ${dut1} | ${dut1-vhost-${number}-if1}
+| | | ... | ${ip_net_vif1}.2 | 52:54:00:00:${qemu_id}:01
+| | | Add arp on dut | ${dut1} | ${dut1-vhost-${number}-if2}
+| | | ... | ${ip_net_vif2}.2 | 52:54:00:00:${qemu_id}:02
+| | | Vpp Route Add | ${dut1} | 20.20.20.0 | 24 | ${ip_net_vif1}.2
+| | | ... | ${dut1-vhost-${number}-if1} | vrf=${fib_table_1}
+| | | Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | ${ip_net_vif2}.2
+| | | ... | ${dut1-vhost-${number}-if2} | vrf=${fib_table_2}
-| Initialize IPv4 forwarding with vhost for '${nr}' VMs in 3-node circular topology
+| Initialize IPv4 forwarding with vhost in 3-node circular topology
| | [Documentation]
| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on all
-| | ... | VPP nodes. Set UP state of all VPP interfaces in path. Create ${nr}+1
-| | ... | FIB tables on each DUT with multipath routing. Assign each Virtual
-| | ... | interface to FIB table with Physical interface or Virtual interface on
-| | ... | both nodes. Setup IPv4 addresses with /30 prefix on DUT-TG links and
-| | ... | /30 prefix on DUT1-DUT2 link. Set routing on all DUT nodes in all FIB
-| | ... | tables with prefix /24 and next hop of neighbour IPv4 address. Setup
-| | ... | ARP on all VPP interfaces.
+| | ... | VPP nodes. Set UP state of all VPP interfaces in path. Create
+| | ... | ${vm_count}+1 FIB tables on each DUT with multipath routing. Assign
+| | ... | each Virtual interface to FIB table with Physical interface or Virtual
+| | ... | interface on both nodes. Setup IPv4 addresses with /30 prefix on
+| | ... | DUT-TG links and /30 prefix on DUT1-DUT2 link. Set routing on all DUT
+| | ... | nodes in all FIB tables with prefix /24 and next hop of neighbour IPv4
+| | ... | address. Setup ARP on all VPP interfaces.
| | ...
| | ... | *Arguments:*
-| | ... | _None_
+| | ... | - vm_count - Number of guest VMs. Type: integer
| | ...
| | ... | *Note:*
| | ... | Socket paths for VM are defined in following format:
| | ... | - /tmp/sock-${VM_ID}-1
| | ... | - /tmp/sock-${VM_ID}-2
| | ...
-| | ... | *Return:*
-| | ... | - No value returned
-| | ...
| | ... | *Example:*
| | ...
-| | ... | \| IPv4 forwarding with Vhost-User for '2' VMs initialized in \
-| | ... | a 3-node circular topology \|
+| | ... | \| IPv4 forwarding with Vhost-User initialized in a 3-node circular\
+| | ... | topology \| 1 \|
+| | ...
+| | [Arguments] | ${vm_count}=${1}
| | ...
| | Set interfaces in path up
| | ${fib_table_1}= | Set Variable | ${101}
-| | ${fib_table_2}= | Evaluate | ${fib_table_1}+${nr}
+| | ${fib_table_2}= | Evaluate | ${fib_table_1}+${vm_count}
| | Add Fib Table | ${dut1} | ${fib_table_1}
| | And Vpp Route Add | ${dut1} | 10.10.10.0 | 24 | vrf=${fib_table_1}
| | ... | gateway=1.1.1.2 | interface=${dut1_if1} | multipath=${TRUE}
@@ -379,7 +350,7 @@
| | Vpp Route Add | ${dut2} | 20.20.20.0 | 24 | 3.3.3.2 | ${dut2_if2}
| | ... | vrf=${fib_table_2}
| | ${ip_base_start}= | Set Variable | ${4}
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
| | | ${fib_table_1}= | Evaluate | ${100}+${number}
@@ -849,80 +820,61 @@
| | ... | 172.16.0.2 | 172.16.0.1
| | Configure L2XC | ${dut2} | ${dut2_if2} | ${dut2s_vxlan}
-| Initialize L2 xconnect with Vhost-User in 3-node circular topology
+| Initialize L2 xconnect with Vhost-User on node
| | [Documentation]
-| | ... | Create two Vhost-User interfaces on all defined VPP nodes. Cross
-| | ... | connect each Vhost interface with one physical interface.
+| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on
+| | ... | defined VPP node. Add each Vhost-User interface into L2 cross-connect
+| | ... | with with physical inteface or Vhost-User interface of another VM.
| | ...
| | ... | *Arguments:*
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - dut - DUT node. Type: string
+| | ... | - vm_count - VM count. Type: integer
| | ...
-| | ... | _NOTE:_ This KW uses following test case variables:
-| | ... | - ${dut1} - DUT1 node.
-| | ... | - ${dut2} - DUT2 node.
-| | ... | - ${dut1_if1} - DUT1 interface towards TG.
-| | ... | - ${dut1_if2} - DUT1 interface towards DUT2.
-| | ... | - ${dut2_if1} - DUT2 interface towards DUT1.
-| | ... | - ${dut2_if2} - DUT2 interface towards TG.
+| | ... | *Note:*
+| | ... | Socket paths for VM are defined in following format:
+| | ... | - /tmp/sock-${VM_ID}-1
+| | ... | - /tmp/sock-${VM_ID}-2
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| L2 xconnect with Vhost-User initialized in a 3-node \
-| | ... | circular topology \| /tmp/sock1 \| /tmp/sock2 \|
+| | ... | \| Initialize L2 xconnect with Vhost-User on node \| DUT1 \| 1 \|
| | ...
-| | [Arguments] | ${sock1} | ${sock2}
+| | [Arguments] | ${dut} | ${vm_count}=${1}
| | ...
-| | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | ... | ${sock1} | ${sock2} | dut1_vhost_if1 | dut1_vhost_if2
-| | Configure L2XC | ${dut1} | ${dut1_if1} | ${dut1_vhost_if1}
-| | Configure L2XC | ${dut1} | ${dut1_if2} | ${dut1_vhost_if2}
-| | Configure vhost interfaces for L2BD forwarding | ${dut2}
-| | ... | ${sock1} | ${sock2} | dut2_vhost_if1 | dut2_vhost_if2
-| | Configure L2XC | ${dut2} | ${dut2_if1} | ${dut2_vhost_if1}
-| | Configure L2XC | ${dut2} | ${dut2_if2} | ${dut2_vhost_if2}
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
+| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
+| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
+| | | ${prev_index}= | Evaluate | ${number}-1
+| | | Configure vhost interfaces for L2BD forwarding | ${nodes['${dut}']}
+| | | ... | ${sock1} | ${sock2} | ${dut}-vhost-${number}-if1
+| | | ... | ${dut}-vhost-${number}-if2
+| | | ${dut_xconnect_if1}= | Set Variable If | ${number}==1 | ${${dut}_if1}
+| | | ... | ${${dut}-vhost-${prev_index}-if2}
+| | | Configure L2XC | ${nodes['${dut}']} | ${dut_xconnect_if1}
+| | | ... | ${${dut}-vhost-${number}-if1}
+| | | Run Keyword If | ${number}==${vm_count} | Configure L2XC
+| | | ... | ${nodes['${dut}']} | ${${dut}-vhost-${number}-if2} | ${${dut}_if2}
+| | All VPP Interfaces Ready Wait | ${nodes}
-| Initialize L2 xconnect with Vhost-User for '${nr}' in 3-node circular topology
+| Initialize L2 xconnect with Vhost-User
| | [Documentation]
-| | ... | Create pairs of Vhost-User interfaces on all defined VPP nodes. Cross
-| | ... | connect each Vhost interface with one physical interface or virtual
-| | ... | interface to create a chain accross DUT node.
+| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on
+| | ... | all VPP nodes. Add each Vhost-User interface into L2 cross-connect
+| | ... | with with physical inteface or Vhost-User interface of another VM.
| | ...
| | ... | *Arguments:*
-| | ... | _None_
-| | ...
-| | ... | *Note:*
-| | ... | Socket paths for VM are defined in following format:
-| | ... | - /tmp/sock-${VM_ID}-1
-| | ... | - /tmp/sock-${VM_ID}-2
+| | ... | - vm_count - VM count. Type: integer
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| L2 xconnect with Vhost-User for '2' initialized in a 3-node \
-| | ... | circular topology \|
+| | ... | \| Initialize L2 xconnect with Vhost-User \| 1 \|
| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
-| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
-| | | ${prev_index}= | Evaluate | ${number}-1
-| | | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | | ... | ${sock1} | ${sock2} | dut1-vhost-${number}-if1
-| | | ... | dut1-vhost-${number}-if2
-| | | ${dut1_xconnect_if1}= | Set Variable If | ${number}==1 | ${dut1_if1}
-| | | ... | ${dut1-vhost-${prev_index}-if2}
-| | | Configure L2XC | ${dut1} | ${dut1_xconnect_if1}
-| | | ... | ${dut1-vhost-${number}-if1}
-| | | Configure vhost interfaces for L2BD forwarding | ${dut2}
-| | | ... | ${sock1} | ${sock2} | dut2-vhost-${number}-if1
-| | | ... | dut2-vhost-${number}-if2
-| | | ${dut2_xconnect_if1}= | Set Variable If | ${number}==1 | ${dut2_if1}
-| | | ... | ${dut2-vhost-${prev_index}-if2}
-| | | Configure L2XC | ${dut2} | ${dut2_xconnect_if1}
-| | | ... | ${dut2-vhost-${number}-if1}
-| | | Run Keyword If | ${number}==${nr} | Configure L2XC
-| | | ... | ${dut1} | ${dut1-vhost-${number}-if2} | ${dut1_if2}
-| | | Run Keyword If | ${number}==${nr} | Configure L2XC
-| | | ... | ${dut2} | ${dut2-vhost-${number}-if2} | ${dut2_if2}
+| | [Arguments] | ${vm_count}=${1}
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Initialize L2 xconnect with Vhost-User on node | ${dut}
+| | | ... | vm_count=${vm_count}
| Initialize L2 xconnect with Vhost-User and VLAN in 3-node circular topology
| | [Documentation]
@@ -1386,47 +1338,48 @@
| | Configure L2XC | ${dut2} | ${dut2_if1} | ${dut2_if2}
| | Configure MACIP ACLs | ${dut1} | ${dut1_if1} | ${dut1_if2}
-| Initialize L2 bridge domains with Vhost-User in 3-node circular topology
+| Initialize L2 bridge domains with Vhost-User on node
| | [Documentation]
-| | ... | Create two Vhost-User interfaces on all defined VPP nodes. Add each
-| | ... | Vhost-User interface into L2 bridge domains with learning enabled
-| | ... | with physical inteface.
+| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on
+| | ... | defined VPP node. Add each Vhost-User interface into L2 bridge
+| | ... | domains with learning enabled with physical inteface or Vhost-User
+| | ... | interface of another VM.
| | ...
| | ... | *Arguments:*
-| | ... | - bd_id1 - Bridge domain ID. Type: integer
-| | ... | - bd_id2 - Bridge domain ID. Type: integer
-| | ... | - sock1 - Sock path for the first Vhost-User interface. Type: string
-| | ... | - sock2 - Sock path for the second Vhost-User interface. Type: string
+| | ... | - dut - DUT node. Type: string
+| | ... | - vm_count - VM count. Type: integer
| | ...
-| | ... | _NOTE:_ This KW uses following test case variables:
-| | ... | - ${dut1} - DUT1 node.
-| | ... | - ${dut2} - DUT2 node.
-| | ... | - ${dut1_if1} - DUT1 interface towards TG.
-| | ... | - ${dut1_if2} - DUT1 interface towards DUT2.
-| | ... | - ${dut2_if1} - DUT2 interface towards DUT1.
-| | ... | - ${dut2_if2} - DUT2 interface towards TG.
+| | ... | *Note:*
+| | ... | Socket paths for VM are defined in following format:
+| | ... | - /tmp/sock-${VM_ID}-1
+| | ... | - /tmp/sock-${VM_ID}-2
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| L2 bridge domains with Vhost-User initialized in a 3-node \
-| | ... | circular topology \| 1 \| 2 \| /tmp/sock1 \| /tmp/sock2 \|
+| | ... | \| Initialize L2 bridge domains with Vhost-User on node \| DUT1 \
+| | ... | \| 1 \|
| | ...
-| | [Arguments] | ${bd_id1} | ${bd_id2} | ${sock1} | ${sock2}
+| | [Arguments] | ${dut} | ${vm_count}=${1}
| | ...
-| | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | ... | ${sock1} | ${sock2} | dut1_vhost_if1 | dut1_vhost_if2
-| | Add interface to bridge domain | ${dut1} | ${dut1_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut1} | ${dut1_vhost_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut1} | ${dut1_if2} | ${bd_id2}
-| | Add interface to bridge domain | ${dut1} | ${dut1_vhost_if2} | ${bd_id2}
-| | Configure vhost interfaces for L2BD forwarding | ${dut2}
-| | ... | ${sock1} | ${sock2} | dut2_vhost_if1 | dut2_vhost_if2
-| | Add interface to bridge domain | ${dut2} | ${dut2_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut2} | ${dut2_vhost_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut2} | ${dut2_if2} | ${bd_id2}
-| | Add interface to bridge domain | ${dut2} | ${dut2_vhost_if2} | ${bd_id2}
+| | ${bd_id2}= | Evaluate | ${vm_count}+1
+| | Add interface to bridge domain | ${nodes['${dut}']}
+| | ... | ${${dut}_if1} | ${1}
+| | Add interface to bridge domain | ${nodes['${dut}']}
+| | ... | ${${dut}_if2} | ${bd_id2}
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
+| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
+| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
+| | | Configure vhost interfaces for L2BD forwarding | ${nodes['${dut}']}
+| | | ... | ${sock1} | ${sock2} | ${dut}-vhost-${number}-if1
+| | | ... | ${dut}-vhost-${number}-if2
+| | | ${bd_id2}= | Evaluate | ${number}+1
+| | | Add interface to bridge domain | ${nodes['${dut}']}
+| | | ... | ${${dut}-vhost-${number}-if1} | ${number}
+| | | Add interface to bridge domain | ${nodes['${dut}']}
+| | | ... | ${${dut}-vhost-${number}-if2} | ${bd_id2}
+| | All VPP Interfaces Ready Wait | ${nodes}
-| Initialize L2 bridge domains with Vhost-User for '${nr}' VMs in 3-node circular topology
+| Initialize L2 bridge domains with Vhost-User
| | [Documentation]
| | ... | Create pairs of Vhost-User interfaces for defined number of VMs on all
| | ... | defined VPP nodes. Add each Vhost-User interface into L2 bridge
@@ -1434,41 +1387,18 @@
| | ... | interface of another VM.
| | ...
| | ... | *Arguments:*
-| | ... | _None_
-| | ...
-| | ... | *Note:*
-| | ... | Socket paths for VM are defined in following format:
-| | ... | - /tmp/sock-${VM_ID}-1
-| | ... | - /tmp/sock-${VM_ID}-2
+| | ... | - vm_count - VM count. Type: integer
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| L2 bridge domains with Vhost-User for '2' VMs initialized in \
-| | ... | a 3-node circular topology \|
+| | ... | \| Initialize L2 bridge domains with Vhost-User \| 1 \|
| | ...
-| | ${bd_id2}= | Evaluate | ${nr}+1
-| | Add interface to bridge domain | ${dut1} | ${dut1_if1} | ${1}
-| | Add interface to bridge domain | ${dut1} | ${dut1_if2} | ${bd_id2}
-| | Add interface to bridge domain | ${dut2} | ${dut2_if1} | ${1}
-| | Add interface to bridge domain | ${dut2} | ${dut2_if2} | ${bd_id2}
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
-| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
-| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
-| | | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | | ... | ${sock1} | ${sock2} | dut1-vhost-${number}-if1
-| | | ... | dut1-vhost-${number}-if2
-| | | ${bd_id2}= | Evaluate | ${number}+1
-| | | Add interface to bridge domain | ${dut1}
-| | | ... | ${dut1-vhost-${number}-if1} | ${number}
-| | | Add interface to bridge domain | ${dut1}
-| | | ... | ${dut1-vhost-${number}-if2} | ${bd_id2}
-| | | Configure vhost interfaces for L2BD forwarding | ${dut2}
-| | | ... | ${sock1} | ${sock2} | dut2-vhost-${number}-if1
-| | | ... | dut2-vhost-${number}-if2
-| | | Add interface to bridge domain | ${dut2}
-| | | ... | ${dut2-vhost-${number}-if1} | ${number}
-| | | Add interface to bridge domain | ${dut2}
-| | | ... | ${dut2-vhost-${number}-if2} | ${bd_id2}
+| | [Arguments] | ${vm_count}=${1}
+| | ...
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Initialize L2 bridge domains with Vhost-User on node | ${dut}
+| | | ... | vm_count=${vm_count}
| Initialize L2 bridge domain with VXLANoIPv4 in 3-node circular topology
| | [Documentation]
@@ -1609,32 +1539,6 @@
| | Add interface to bridge domain | ${dut1} | ${vhost_if1} | ${dut1_bd_id1}
| | Add interface to bridge domain | ${dut1} | ${vhost_if2} | ${dut1_bd_id2}
-| Initialize L2 bridge domains with Vhost-User in 2-node circular topology
-| | [Documentation]
-| | ... | Create two Vhost-User interfaces on all defined VPP nodes. Add each
-| | ... | Vhost-User interface into L2 bridge domains with learning enabled
-| | ... | with physical inteface.
-| | ...
-| | ... | *Arguments:*
-| | ... | - bd_id1 - Bridge domain ID. Type: integer
-| | ... | - bd_id2 - Bridge domain ID. Type: integer
-| | ... | - sock1 - Sock path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Sock path for second Vhost-User interface. Type: string
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| L2 bridge domains with Vhost-User initialized in a 2-node \
-| | ... | circular topology \| 1 \| 2 \| /tmp/sock1 \| /tmp/sock2 \|
-| | ...
-| | [Arguments] | ${bd_id1} | ${bd_id2} | ${sock1} | ${sock2}
-| | ...
-| | Configure vhost interfaces for L2BD forwarding | ${dut1}
-| | ... | ${sock1} | ${sock2}
-| | Add interface to bridge domain | ${dut1} | ${dut1_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut1} | ${vhost_if1} | ${bd_id1}
-| | Add interface to bridge domain | ${dut1} | ${dut1_if2} | ${bd_id2}
-| | Add interface to bridge domain | ${dut1} | ${vhost_if2} | ${bd_id2}
-
| Initialize L2 bridge domains with VLAN dot1q sub-interfaces in a 3-node circular topology
| | [Documentation]
| | ... | Setup L2 bridge domain topology with learning enabled with VLAN
@@ -1877,445 +1781,348 @@
| Configure guest VM with dpdk-testpmd connected via vhost-user
| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd. Qemu Guest uses by default 5 cores and 2048M. Testpmd
-| | ... | uses 5 cores (1 main core and 4 cores dedicated to io) mem-channel=4,
-| | ... | txq/rxq=256, burst=64, disable-hw-vlan, disable-rss,
-| | ... | driver usr/lib/librte_pmd_virtio.so and fwd mode is io.
+| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting\
+| | ... | DPDK testpmd.
| | ...
| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
+| | ... | - dut - DUT node to start guest VM on. Type: dictionary
| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - sock2 - Socket path for second Vhost-User interface.
+| | ... | Type: string
| | ... | - vm_name - QemuUtil instance name. Type: string
-| | ... | - skip - Number of cpus which will be skipped. Type: integer
-| | ... | - count - Number of cpus which will be allocated for qemu.
-| | ... | Type: integer
-| | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT node.
-| | ... | Type: integer
-| | ... | - jumbo_frames - Set True if jumbo frames are used in the test.
+| | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT
+| | ... | node. Type: integer
+| | ... | - jumbo - Set True if jumbo frames are used in the test.
| | ... | Type: bool
-| | ... | - use_tuned_cfs - FIXME.
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
+| | ... | Type: bool
+| | ...
+| | ... | *Note:*
+| | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
+| | ... | \${cpu_count_int} set by "Add worker threads and rxqueues to all DUTs"
| | ...
| | ... | *Example:*
| | ...
| | ... | \| Configure guest VM with dpdk-testpmd connected via vhost-user \
-| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \| ${6} \
-| | ... | \| ${5} \|
-| | ... | \| Configure guest VM with dpdk-testpmd connected via vhost-user \
| | ... | \| ${nodes['DUT1']} \| /tmp/sock-2-1 \| /tmp/sock-2-2 \| DUT1_VM2 \
| | ... | \| qemu_id=${2} \|
| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name} | ${skip}=${6}
-| | ... | ${count}=${5} | ${qemu_id}=${1} | ${jumbo_frames}=${False}
+| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${qemu_id}=${1}
+| | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
+| | ... | ${use_tuned_cfs}=${False}
| | ...
| | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
| | ... | WITH NAME | ${vm_name}
+| | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
| | ${serial_port}= | Evaluate | ${qemu_id} + ${4555}
| | Run keyword | ${vm_name}.Qemu Set Serial Port | ${serial_port}
| | ${ssh_fwd_port}= | Evaluate | ${qemu_id} + ${10021}
| | Run keyword | ${vm_name}.Qemu Set Ssh Fwd Port | ${ssh_fwd_port}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${skip_cnt}= | Evaluate | ${skip} + (${qemu_id} - 1) * ${count}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${count} | smt_used=${False}
+| | ${dut_numa}= | Get interfaces numa node | ${nodes['${dut}']}
+| | ... | ${${dut}_if1} | ${${dut}_if2}
+# Compute CPU placement for VM based on expected DUT placement.
+| | ${os_cpus}= | Set Variable | ${1}
+| | ${dut_main_cpus}= | Set Variable | ${1}
+| | ${dut_wk_cpus}= | Set Variable | ${cpu_count_int}
+| | ${vm_cpus}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus}
+| | ${skip_dut}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} + ${os_cpus}
+| | ${skip_cpu}= | Evaluate | ${skip_dut} + (${qemu_id} - ${1}) * ${vm_cpus}
+| | ${qemu_cpus}= | Cpu slice of list per node | ${nodes['${dut}']}
+| | ... | ${dut_numa} | skip_cnt=${skip_cpu} | cpu_cnt=${vm_cpus}
+| | ... | smt_used=${smt_used}
+| | ${vm_thrs}= | Get Length | ${qemu_cpus}
+| | Run keyword | ${vm_name}.Qemu Set Queue Count | ${rxq_count_int}
+| | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
-| | ... | jumbo_frames=${jumbo_frames}
+| | ... | jumbo_frames=${jumbo}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ... | jumbo_frames=${jumbo_frames}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
+| | ... | jumbo_frames=${jumbo}
+| | ${apply_patch}= | Set Variable | ${False}
| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
| | ... | ${perf_qemu_path}-patch/bin/
| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
-| | ... | apply_patch=${apply_patch}
+| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${nodes['${dut}']}
+| | ... | apply_patch=${False}
| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
+| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thrs} | ${vm_thrs} | 1 | 1
| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus}
| | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy
-| | ${max_pkt_len}= | Set Variable If | ${jumbo_frames} | 9000 | ${EMPTY}
-| | Dpdk Testpmd Start | ${vm} | eal_coremask=0x1f | eal_mem_channels=4
-| | ... | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE}
-| | ... | pmd_txd=${perf_qemu_qsz} | pmd_rxd=${perf_qemu_qsz}
+| | ${max_pkt_len}= | Set Variable If | ${jumbo} | 9000 | ${EMPTY}
+| | ${testpmd_cpus}= | Evaluate | ${thr_count_int} + ${1}
+| | ${testpmd_cpus}= | Cpu list per node str | ${nodes['${dut}']} | ${0}
+| | ... | cpu_cnt=${testpmd_cpus}
+| | Dpdk Testpmd Start | ${vm} | eal_corelist=${testpmd_cpus}
+| | ... | eal_mem_channels=4 | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE}
+| | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
+| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int}
| | ... | pmd_max_pkt_len=${max_pkt_len}
| | Return From Keyword | ${vm}
-| Configure '${nr}' guest VMs with dpdk-testpmd connected via vhost-user in 3-node circular topology
+| Configure guest VMs with dpdk-testpmd connected via vhost-user on node
| | [Documentation]
-| | ... | Start QEMU guests with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd for defined number of VMs on all defined VPP nodes.
+| | ... | Start ${vm_count} QEMU guests with two vhost-user interfaces and\
+| | ... | interconnecting DPDK testpmd for defined number of VMs on all defined\
+| | ... | VPP nodes.
| | ...
| | ... | *Arguments:*
-| | ... | _None_
-| | ...
-| | ... | _NOTE:_ This KW expects following test case variables to be set:
-| | ... | - ${system_cpus} - Number of CPUs allocated for OS itself.
-| | ... | - ${vpp_cpus} - Number of CPUs allocated for VPP.
-| | ... | - ${vm_cpus} - Number of CPUs to be allocated per QEMU instance.
-| | ... | - ${jumbo_frames} - Jumbo frames are used (True) or are not used
-| | ... | (False) in the test.
+| | ... | - dut - DUT node to start guest VM on. Type: dictionary
+| | ... | - vm_count - Number of guest VMs. Type: int
+| | ... | - jumbo - Jumbo frames are used (True) or are not used (False)
+| | ... | in the test. Type: boolean
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
+| | ... | Type: bool
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| Configure '2' guest VMs with dpdk-testpmd connected via vhost-user\
-| | ... | in 3-node circular topology \|
+| | ... | \| Configure guest VMs with dpdk-testpmd connected via \
+| | ... | vhost-user on node \| DUT1 \| 1 \| False \| 256 \|
| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | [Arguments] | ${dut} | ${vm_count}=${1} | ${jumbo}=${False} |
+| | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False}
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
-| | | ${skip_cpus}= | Evaluate | ${vpp_cpus}+${system_cpus}
-| | | ${vm1}= | Configure guest VM with dpdk-testpmd connected via vhost-user
-| | | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM${number}
-| | | ... | skip=${skip_cpus} | count=${vm_cpus} | qemu_id=${number}
-| | | ... | jumbo_frames=${jumbo_frames}
-| | | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM${number} | ${vm1}
-| | | ${vm2}= | Configure guest VM with dpdk-testpmd connected via vhost-user
-| | | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM${number}
-| | | ... | skip=${skip_cpus} | count=${vm_cpus} | qemu_id=${number}
-| | | ... | jumbo_frames=${jumbo_frames}
-| | | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM${number} | ${vm2}
-
-| Configure guest VM with dpdk-testpmd using SMT connected via vhost-user
+| | | ${vm}=
+| | | ... | Configure guest VM with dpdk-testpmd connected via vhost-user
+| | | ... | ${dut} | ${sock1} | ${sock2} | ${dut}_VM${number}
+| | | ... | qemu_id=${number} | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz}
+| | | ... | use_tuned_cfs=${use_tuned_cfs}
+| | | Set To Dictionary | ${${dut}_vm_refs} | ${dut}_VM${number} | ${vm}
+
+| Configure guest VMs with dpdk-testpmd connected via vhost-user
| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd. Qemu Guest uses by default 5 cores and 2048M. Testpmd
-| | ... | uses 5 cores (1 main core and 4 cores dedicated to io) mem-channel=4,
-| | ... | txq/rxq=256, burst=64, disable-hw-vlan, disable-rss,
-| | ... | driver usr/lib/librte_pmd_virtio.so and fwd mode is io.
+| | ... | Start ${vm_count} QEMU guests with two vhost-user interfaces and\
+| | ... | interconnecting DPDK testpmd defined number of VMs on all defined VPP\
+| | ... | nodes.
| | ...
| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
-| | ... | - vm_name - QemuUtil instance name. Type: string
-| | ... | - skip - number of cpus which will be skipped. Type: int
-| | ... | - count - number of cpus which will be allocated for qemu. Type: int
-| | ... | - jumbo_frames - Set True if jumbo frames are used in the test.
+| | ... | - vm_count - Number of guest VMs. Type: int
+| | ... | - jumbo - Jumbo frames are used (True) or are not used (False)
+| | ... | in the test. Type: boolean
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
| | ... | Type: bool
-| | ... | - use_tuned_cfs - FIXME.
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| Configure guest VM with dpdk-testpmd using SMT connected via \
-| | ... | vhost-user \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \
-| | ... | \| ${6} \| ${5} \|
+| | ... | \| Configure guest VMs with dpdk-testpmd connected via vhost-user\
+| | ... | \| 1 \| False \| 256 \|
| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name} | ${skip}=${6}
-| | ... | ${count}=${5} | ${jumbo_frames}=${False}
+| | [Arguments] | ${vm_count}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
+| | ... | ${use_tuned_cfs}=${False}
| | ...
-| | Import Library | resources.libraries.python.QemuUtils
-| | ... | WITH NAME | ${vm_name}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${TRUE}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
-| | ... | jumbo_frames=${jumbo_frames}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ... | jumbo_frames=${jumbo_frames}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
-| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
-| | ... | ${perf_qemu_path}-patch/bin/
-| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
-| | ... | apply_patch=${apply_patch}
-| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
-| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
-| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
-| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
-| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus}
-| | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy
-| | ${max_pkt_len}= | Set Variable If | ${jumbo_frames} | 9000 | ${EMPTY}
-| | Dpdk Testpmd Start | ${vm} | eal_coremask=0x1f | eal_mem_channels=4
-| | ... | pmd_fwd_mode=io | pmd_disable_hw_vlan=${TRUE}
-| | ... | pmd_txd=${perf_qemu_qsz} | pmd_rxd=${perf_qemu_qsz}
-| | ... | pmd_max_pkt_len=${max_pkt_len}
-| | Return From Keyword | ${vm}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Configure guest VMs with dpdk-testpmd connected via vhost-user on node
+| | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
+| | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
| Configure guest VM with dpdk-testpmd-mac connected via vhost-user
| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd. Qemu Guest uses by default 5 cores and 2048M. Testpmd
-| | ... | uses 5 cores (1 main core and 4 cores dedicated to io) mem-channel=4,
-| | ... | txq/rxq=256, burst=64, disable-hw-vlan, disable-rss,
-| | ... | driver usr/lib/librte_pmd_virtio.so and fwd mode is mac rewrite.
+| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting\
+| | ... | DPDK testpmd.
| | ...
| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - dut - DUT node to start guest VM on. Type: dictionary
+| | ... | - sock1 - Socket path for first Vhost-User interface.
+| | ... | Type: string
+| | ... | - sock2 - Socket path for second Vhost-User interface.
+| | ... | Type: string
| | ... | - vm_name - QemuUtil instance name. Type: string
| | ... | - eth0_mac - MAC address of first Vhost interface. Type: string
| | ... | - eth1_mac - MAC address of second Vhost interface. Type: string
-| | ... | - skip - number of cpus which will be skipped. Type: integer
-| | ... | - count - number of cpus which will be allocated for qemu.
-| | ... | Type: integer
-| | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT node.
-| | ... | Type: integer
-| | ... | - jumbo_frames - Set True if jumbo frames are used in the test.
+| | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT
+| | ... | node. Type: integer
+| | ... | - jumbo - Set True if jumbo frames are used in the test.
| | ... | Type: bool
-| | ... | - use_tuned_cfs - FIXME.
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
+| | ... | Type: bool
+| | ...
+| | ... | *Note:*
+| | ... | KW uses test variables \${rxq_count_int}, \${thr_count_int} and
+| | ... | \${cpu_count_int} set by "Add worker threads and rxqueues to all DUTs"
| | ...
| | ... | *Example:*
| | ...
| | ... | \| Configure guest VM with dpdk-testpmd-mac connected via vhost-user \
| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \
-| | ... | \| 00:00:00:00:00:01 \| 00:00:00:00:00:02 \| ${6} \| ${5} \|
-| | ... | \| Configure guest VM with dpdk-testpmd-mac connected via vhost-user \
-| | ... | \| ${nodes['DUT1']} \| /tmp/sock-2-1 \| /tmp/sock-2-2 \| DUT1_VM2 \
-| | ... | \| 00:00:00:00:02:01 \| 00:00:00:00:02:02 \| ${6} \| ${5} \
-| | ... | \| qemu_id=${2} \|
+| | ... | \| 00:00:00:00:00:01 \| 00:00:00:00:00:02 \|
| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name}
-| | ... | ${eth0_mac} | ${eth1_mac} | ${skip}=${6} | ${count}=${5}
-| | ... | ${qemu_id}=${1} | ${jumbo_frames}=${False}
+| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name}
+| | ... | ${eth0_mac} | ${eth1_mac} | ${qemu_id}=${1} | ${jumbo}=${False}
+| | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False}
| | ...
| | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id}
| | ... | WITH NAME | ${vm_name}
+| | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']}
| | ${serial_port}= | Evaluate | ${qemu_id} + ${4555}
| | Run keyword | ${vm_name}.Qemu Set Serial Port | ${serial_port}
| | ${ssh_fwd_port}= | Evaluate | ${qemu_id} + ${10021}
| | Run keyword | ${vm_name}.Qemu Set Ssh Fwd Port | ${ssh_fwd_port}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${skip_cnt}= | Evaluate | ${skip} + (${qemu_id} - 1) * ${count}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${count} | smt_used=${False}
+| | ${dut_numa}= | Get interfaces numa node | ${nodes['${dut}']}
+| | ... | ${${dut}_if1} | ${${dut}_if2}
+# Compute CPU placement for VM based on expected DUT placement.
+| | ${os_cpus}= | Set Variable | ${1}
+| | ${dut_main_cpus}= | Set Variable | ${1}
+| | ${dut_wk_cpus}= | Set Variable | ${cpu_count_int}
+| | ${vm_cpus}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus}
+| | ${skip_dut}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} + ${os_cpus}
+| | ${skip_cpu}= | Evaluate | ${skip_dut} + (${qemu_id} - ${1}) * ${vm_cpus}
+| | ${qemu_cpus}= | Cpu slice of list per node | ${nodes['${dut}']}
+| | ... | ${dut_numa} | skip_cnt=${skip_cpu} | cpu_cnt=${vm_cpus}
+| | ... | smt_used=${smt_used}
+| | ${vm_thrs}= | Get Length | ${qemu_cpus}
+| | Run keyword | ${vm_name}.Qemu Set Queue Count | ${rxq_count_int}
+| | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
-| | ... | jumbo_frames=${jumbo_frames}
+| | ... | jumbo_frames=${jumbo}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ... | jumbo_frames=${jumbo_frames}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
+| | ... | jumbo_frames=${jumbo}
+| | ${apply_patch}= | Set Variable | ${False}
| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
| | ... | ${perf_qemu_path}-patch/bin/
| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
-| | ... | apply_patch=${apply_patch}
+| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${nodes['${dut}']}
+| | ... | apply_patch=${False}
| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
+| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thrs} | ${vm_thrs} | 1 | 1
| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus}
| | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy
-| | ${max_pkt_len}= | Set Variable If | ${jumbo_frames} | 9000 | ${EMPTY}
-| | Dpdk Testpmd Start | ${vm} | eal_coremask=0x1f
+| | ${max_pkt_len}= | Set Variable If | ${jumbo} | 9000 | ${EMPTY}
+| | ${testpmd_cpus}= | Evaluate | ${thr_count_int} + ${1}
+| | ${testpmd_cpus}= | Cpu list per node str | ${nodes['${dut}']} | ${0}
+| | ... | cpu_cnt=${testpmd_cpus}
+| | Dpdk Testpmd Start | ${vm} | eal_corelist=${testpmd_cpus}
| | ... | eal_mem_channels=4 | pmd_fwd_mode=mac | pmd_eth_peer_0=0,${eth0_mac}
| | ... | pmd_eth_peer_1=1,${eth1_mac} | pmd_disable_hw_vlan=${TRUE}
-| | ... | pmd_txd=${perf_qemu_qsz} | pmd_rxd=${perf_qemu_qsz}
+| | ... | pmd_rxd=${perf_qemu_qsz} | pmd_txd=${perf_qemu_qsz}
+| | ... | pmd_rxq=${rxq_count_int} | pmd_txq=${rxq_count_int}
| | ... | pmd_max_pkt_len=${max_pkt_len}
| | Return From Keyword | ${vm}
-| Configure '${nr}' guest VMs with dpdk-testpmd-mac connected via vhost-user in 3-node circular topology
+| Configure guest VMs with dpdk-testpmd-mac connected via vhost-user on node
| | [Documentation]
-| | ... | Start QEMU guests with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd with fwd mode set to mac rewrite for defined number of
-| | ... | VMs on all defined VPP nodes.
+| | ... | Start ${vm_count} QEMU guests with two vhost-user interfaces and\
+| | ... | interconnecting DPDK testpmd with fwd mode set to mac rewrite for\
+| | ... | defined number of VMs on all defined VPP nodes.
| | ...
| | ... | *Arguments:*
-| | ... | _None_
+| | ... | - dut - DUT node to start guest VM on. Type: dictionary
+| | ... | - vm_count} - Number of guest VMs. Type: int
+| | ... | - jumbo - Jumbo frames are used (True) or are not used (False)
+| | ... | in the test. Type: boolean
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
+| | ... | Type: bool
| | ...
| | ... | _NOTE:_ This KW expects following test case variables to be set:
-| | ... | - ${system_cpus} - Number of CPUs allocated for OS itself.
-| | ... | - ${vpp_cpus} - Number of CPUs allocated for VPP.
-| | ... | - ${vm_cpus} - Number of CPUs to be allocated per QEMU instance.
-| | ... | - ${jumbo_frames} - Jumbo frames are used (True) or are not used
-| | ... | (False) in the test.
+| | ... | - cpu_count_int - Number of Physical CPUs allocated for DUT.
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| Configure '2' guest VMs with dpdk-testpmd-mac connected via vhost-user\
-| | ... | in 3-node circular topology \|
+| | ... | \| Configure guest VMs with dpdk-testpmd-mac connected via \
+| | ... | vhost-user on node \| DUT1 \| 1 \| False \| 256 \|
| | ...
-| | :FOR | ${number} | IN RANGE | 1 | ${nr}+1
+| | [Arguments] | ${dut} | ${vm_count}=${1} | ${jumbo}=${False} |
+| | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False}
+| | ...
+| | :FOR | ${number} | IN RANGE | 1 | ${vm_count}+1
| | | ${sock1}= | Set Variable | /tmp/sock-${number}-1
| | | ${sock2}= | Set Variable | /tmp/sock-${number}-2
-| | | ${skip_cpus}= | Evaluate | ${vpp_cpus}+${system_cpus}
-| | | ${vm1}=
-| | | ... | Configure guest VM with dpdk-testpmd-mac connected via vhost-user
-| | | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM${number}
-| | | ... | ${dut1-vhost-${number}-if1_mac}
-| | | ... | ${dut1-vhost-${number}-if2_mac} | skip=${skip_cpus}
-| | | ... | count=${vm_cpus} | qemu_id=${number}
-| | | ... | jumbo_frames=${jumbo_frames}
-| | | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM${number} | ${vm1}
-| | | ${vm2}=
+| | | ${vm}=
| | | ... | Configure guest VM with dpdk-testpmd-mac connected via vhost-user
-| | | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM${number}
-| | | ... | ${dut2-vhost-${number}-if1_mac}
-| | | ... | ${dut2-vhost-${number}-if2_mac} | skip=${skip_cpus}
-| | | ... | count=${vm_cpus} | qemu_id=${number}
-| | | ... | jumbo_frames=${jumbo_frames}
-| | | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM${number} | ${vm2}
-
-| Configure guest VM with dpdk-testpmd-mac using SMT connected via vhost-user
+| | | ... | ${dut} | ${sock1} | ${sock2} | ${dut}_VM${number}
+| | | ... | ${${dut}-vhost-${number}-if1_mac}
+| | | ... | ${${dut}-vhost-${number}-if2_mac} | qemu_id=${number}
+| | | ... | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz}
+| | | ... | use_tuned_cfs=${use_tuned_cfs}
+| | | Set To Dictionary | ${${dut}_vm_refs} | ${dut}_VM${number} | ${vm}
+
+| Configure guest VMs with dpdk-testpmd-mac connected via vhost-user
| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | DPDK testpmd. Qemu Guest uses by default 5 cores and 2048M. Testpmd
-| | ... | uses 5 cores (1 main core and 4 cores dedicated to io) mem-channel=4,
-| | ... | txq/rxq=256, burst=64, disable-hw-vlan, disable-rss,
-| | ... | driver usr/lib/librte_pmd_virtio.so and fwd mode is mac rewrite.
+| | ... | Start ${vm_count} QEMU guests with two vhost-user interfaces and\
+| | ... | interconnecting DPDK testpmd with fwd mode set to mac rewrite for\
+| | ... | defined number of VMs on all defined VPP nodes.
| | ...
| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
-| | ... | - vm_name - QemuUtil instance name. Type: string
-| | ... | - eth0_mac - MAC address of first Vhost interface. Type: string
-| | ... | - eth1_mac - MAC address of second Vhost interface. Type: string
-| | ... | - skip - number of cpus which will be skipped. Type: int
-| | ... | - count - number of cpus which will be allocated for qemu. Type: int
-| | ... | - jumbo_frames - Set True if jumbo frames are used in the test.
+| | ... | - vm_count - Number of guest VMs. Type: int
+| | ... | - jumbo - Jumbo frames are used (True) or are not used (False)
+| | ... | in the test. Type: boolean
+| | ... | - perf_qemu_qsz - Virtio Queue Size. Type: int
+| | ... | - use_tuned_cfs - Set True if CFS RR should be used for Qemu SMP.
| | ... | Type: bool
-| | ... | - use_tuned_cfs - FIXME.
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| Configure guest VM with dpdk-testpmd-mac using SMT connected via \
-| | ... | vhost-user \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM\
-| | ... | \| 00:00:00:00:00:01 \| 00:00:00:00:00:02 \| ${6} \| ${5} \|
+| | ... | \| Configure guest VMs with dpdk-testpmd-mac connected via vhost-user\
+| | ... | \| 1 \| False \| 256 \|
| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name}
-| | ... | ${eth0_mac} | ${eth1_mac} | ${skip}=${6} | ${count}=${5}
-| | ... | ${jumbo_frames}=${False}
+| | [Arguments] | ${vm_count}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256}
+| | ... | ${use_tuned_cfs}=${False}
| | ...
-| | Import Library | resources.libraries.python.QemuUtils
-| | ... | WITH NAME | ${vm_name}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${TRUE}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
-| | ... | jumbo_frames=${jumbo_frames}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ... | jumbo_frames=${jumbo_frames}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
-| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
-| | ... | ${perf_qemu_path}-patch/bin/
-| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
-| | ... | apply_patch=${apply_patch}
-| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
-| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
-| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
-| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
-| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus}
-| | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy
-| | ${max_pkt_len}= | Set Variable If | ${jumbo_frames} | 9000 | ${EMPTY}
-| | Dpdk Testpmd Start | ${vm} | eal_coremask=0x1f
-| | ... | eal_mem_channels=4 | pmd_fwd_mode=mac | pmd_eth_peer_0=0,${eth0_mac}
-| | ... | pmd_eth_peer_1=1,${eth1_mac} | pmd_disable_hw_vlan=${TRUE}
-| | ... | pmd_txd=${perf_qemu_qsz} | pmd_rxd=${perf_qemu_qsz}
-| | ... | pmd_max_pkt_len=${max_pkt_len}
-| | Return From Keyword | ${vm}
+| | ${duts}= | Get Matches | ${nodes} | DUT*
+| | :FOR | ${dut} | IN | @{duts}
+| | | Configure guest VMs with dpdk-testpmd-mac connected via vhost-user on node
+| | | ... | ${dut} | vm_count=${vm_count} | jumbo=${jumbo}
+| | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False}
| Configure guest VM with linux bridge connected via vhost-user
| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | linux bridge. Qemu Guest uses 2048M.
+| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting\
+| | ... | linux bridge.
| | ...
| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
+| | ... | - dut - DUT node to start guest VM on. Type: dictionary
+| | ... | - sock1 - Socket path for first Vhost-User interface.
+| | ... | Type: string
+| | ... | - sock2 - Socket path for second Vhost-User interface.
+| | ... | Type: string
| | ... | - vm_name - QemuUtil instance name. Type: string
-| | ... | - skip - number of cpus which will be skipped. Type: int
-| | ... | - count - number of cpus which will be allocated for qemu. Type: int
-| | ...
-| | ... | *Example:*
-| | ...
-| | ... | \| Configure guest VM with linux bridge connected via vhost-user \
-| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \| ${6} \
-| | ... | \| ${5} \|
-| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name} | ${skip}=${6}
-| | ... | ${count}=${5}
+| | ... | - skip_cnt - number of cpus which will be skipped. Type: int
| | ...
-| | Import Library | resources.libraries.python.QemuUtils
-| | ... | WITH NAME | ${vm_name}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
-| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${False}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
-| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
-| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
-| | ... | ${perf_qemu_path}-patch/bin/
-| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
-| | ... | apply_patch=${apply_patch}
-| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
-| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
-| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
-| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
-| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus}
-| | ${br}= | Set Variable | br0
-| | ${vhost1}= | Get Vhost User If Name By Sock | ${vm} | ${sock1}
-| | ${vhost2}= | Get Vhost User If Name By Sock | ${vm} | ${sock2}
-| | Linux Add Bridge | ${vm} | ${br} | ${vhost1} | ${vhost2}
-| | Set Interface State | ${vm} | ${vhost1} | up | if_type=name
-| | Set Interface State | ${vm} | ${vhost2} | up | if_type=name
-| | Set Interface State | ${vm} | ${br} | up | if_type=name
-| | Return From Keyword | ${vm}
-
-| Configure guest VM with linux bridge using SMT connected via vhost-user
-| | [Documentation]
-| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting
-| | ... | linux bridge. Qemu Guest uses 2048M.
-| | ...
-| | ... | *Arguments:*
-| | ... | - dut_node - DUT node to start guest VM on. Type: dictionary
-| | ... | - sock1 - Socket path for first Vhost-User interface. Type: string
-| | ... | - sock2 - Socket path for second Vhost-User interface. Type: string
-| | ... | - vm_name - QemuUtil instance name. Type: string
-| | ... | - skip - number of cpus which will be skipped. Type: int
-| | ... | - count - number of cpus which will be allocated for qemu. Type: int
+| | ... | _NOTE:_ This KW expects following test case variables to be set:
+| | ... | - cpu_count_int - Number of Physical CPUs allocated for DUT.
| | ...
| | ... | *Example:*
| | ...
-| | ... | \| Guest VM with Linux Bridge using SMT connected via vhost-user is \
-| | ... | setup \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \
-| | ... | \| ${6}\| ${5} \|
+| | ... | \| Configure guest VM with linux bridge connected via vhost-user \
+| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \| ${6} \|
| | ...
-| | [Arguments] | ${dut_node} | ${sock1} | ${sock2} | ${vm_name} | ${skip}=${6}
+| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${skip_cnt}=${6}
| | ... | ${count}=${5}
| | ...
| | Import Library | resources.libraries.python.QemuUtils
| | ... | WITH NAME | ${vm_name}
-| | ${dut_numa}= | Get interfaces numa node | ${dut_node}
+| | Run keyword | ${vm_name}.Qemu Set Node | ${dut}
+| | ${dut_numa}= | Get interfaces numa node | ${dut}
| | ... | ${dut1_if1} | ${dut1_if2}
-| | ${qemu_cpus}= | Cpu slice of list per node | ${dut_node} | ${dut_numa}
-| | ... | skip_cnt=${skip} | cpu_cnt=${count} | smt_used=${TRUE}
+| | ${vm_phy_cpus}= | Evaluate | ${cpu_count_int} + ${1}
+| | ${skip_cnt}= | Evaluate | ${skip} + (${qemu_id} - ${1}) * ${vm_phy_cpus}
+| | ${qemu_cpus}= | Cpu slice of list per node | ${dut} | ${dut_numa}
+| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${vm_phy_cpus} | smt_used=${smt_used}
+| | ${vm_thr_cpus}= | Get Length | ${qemu_cpus}
+| | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1}
| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2}
-| | ${apply_patch}= | Set Variable If | "${perf_qemu_qsz}" == "256" | ${False}
-| | ... | ${TRUE}
+| | ${apply_patch}= | Set Variable | ${False}
| | ${perf_qemu_path}= | Set Variable If | ${apply_patch}
| | ... | ${perf_qemu_path}-patch/bin/
| | ... | ${perf_qemu_path}-base/bin/
-| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut_node}
+| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut}
| | ... | apply_patch=${apply_patch}
| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path}
-| | Run keyword | ${vm_name}.Qemu Set Node | ${dut_node}
-| | Run keyword | ${vm_name}.Qemu Set Smp | ${count} | ${count} | 1 | 1
+| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thr_cpus} | ${vm_thr_cpus}
+| | ... | 1 | 1
| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048
| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image}
| | ${vm}= | Run keyword | ${vm_name}.Qemu Start
diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot
index 97b2ed62f9..137e2e829a 100644
--- a/resources/libraries/robot/shared/default.robot
+++ b/resources/libraries/robot/shared/default.robot
@@ -182,6 +182,7 @@
| | | Set Tags | ${thr_count_int}T${cpu_count_int}C
| | Set Test Variable | ${smt_used}
| | Set Test Variable | ${thr_count_int}
+| | Set Test Variable | ${cpu_count_int}
| | Set Test Variable | ${rxq_count_int}
| Create Kubernetes VSWITCH startup config on all DUTs
@@ -230,6 +231,7 @@
| | | Set Tags | ${thr_count_int}T${cpu_count_int}C
| | Set Test Variable | ${smt_used}
| | Set Test Variable | ${thr_count_int}
+| | Set Test Variable | ${cpu_count_int}
| | Set Test Variable | ${rxq_count_int}
| Create Kubernetes VNF'${i}' startup config on all DUTs
diff --git a/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml
index 6c9a839730..dd0176b72b 100644
--- a/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml
@@ -88,3 +88,11 @@
update_cache: True
become: yes
tags: install-java
+
+- name: Install Pixman (Qemu-dep)
+ apt:
+ name: 'libpixman-1-dev'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-pixman