aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2019-04-19 06:40:44 +0000
committerPeter Mikus <pmikus@cisco.com>2019-04-26 11:03:30 +0000
commit780b65d82863effd53ccb0e0150c29d22522c61d (patch)
tree0c5a5a48d067e337a754e34a6182470ef5aff9da /resources/libraries/python
parentf6e9d58ecd95789e0b3292983fafff223ec75a2c (diff)
NF density tests with dtc=0.5 and dtcr=2
Change-Id: Icff556142280ad0b6261e0a2bfb71672ee6b3807 Signed-off-by: Peter Mikus <pmikus@cisco.com>
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/Constants.py18
-rw-r--r--resources/libraries/python/CpuUtils.py169
-rw-r--r--resources/libraries/python/QemuManager.py62
-rw-r--r--resources/libraries/python/autogen/Regenerator.py2
4 files changed, 140 insertions, 111 deletions
diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py
index 71673aff86..0f0003a763 100644
--- a/resources/libraries/python/Constants.py
+++ b/resources/libraries/python/Constants.py
@@ -34,12 +34,24 @@ class Constants(object):
# vat templates location
RESOURCES_TPL_VAT = 'resources/templates/vat'
+ # Kubernetes templates location
+ RESOURCES_TPL_K8S = 'resources/templates/kubernetes'
+
+ # KernelVM templates location
+ RESOURCES_TPL_VM = 'resources/templates/vm'
+
# OpenVPP VAT binary name
VAT_BIN_NAME = 'vpp_api_test'
# VPP service unit name
VPP_UNIT = 'vpp'
+ # Number of system CPU cores.
+ CPU_CNT_SYSTEM = 1
+
+ # Number of vswitch main thread CPU cores.
+ CPU_CNT_MAIN = 1
+
# QEMU binary path
QEMU_BIN_PATH = '/usr/bin'
@@ -58,12 +70,6 @@ class Constants(object):
# TRex install directory
TREX_INSTALL_DIR = '/opt/trex-core-2.35'
- # Kubernetes templates location
- RESOURCES_TPL_K8S = 'resources/templates/kubernetes'
-
- # KernelVM templates location
- RESOURCES_TPL_VM = 'resources/templates/vm'
-
# Honeycomb directory location at topology nodes:
REMOTE_HC_DIR = '/opt/honeycomb'
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index 57bf7fdb71..67bf312f5d 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -13,7 +13,11 @@
"""CPU utilities library."""
-from resources.libraries.python.ssh import SSH
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import Topology
__all__ = ["CpuUtils"]
@@ -66,19 +70,8 @@ class CpuUtils(object):
:type nodes: dict
:raises RuntimeError: If the ssh command "lscpu -p" fails.
"""
- ssh = SSH()
for node in nodes.values():
- ssh.connect(node)
- cmd = "lscpu -p"
- ret, stdout, stderr = ssh.exec_command(cmd)
-# parsing of "lscpu -p" output:
-# # CPU,Core,Socket,Node,,L1d,L1i,L2,L3
-# 0,0,0,0,,0,0,0,0
-# 1,1,0,0,,1,1,1,0
- if ret != 0:
- raise RuntimeError(
- "Failed to execute ssh command, ret: {} err: {}".format(
- ret, stderr))
+ stdout, _ = exec_cmd_no_error(node, 'lscpu -p')
node['cpuinfo'] = list()
for line in stdout.split("\n"):
if line and line[0] != "#":
@@ -243,58 +236,134 @@ class CpuUtils(object):
return cpu_range
@staticmethod
- def cpu_slice_of_list_for_nf(**kwargs):
- """Return list of node related list of CPU numbers.
+ def cpu_slice_of_list_for_nf(node, cpu_node, nf_chains=1, nf_nodes=1,
+ nf_chain=1, nf_node=1, nf_dtc=1, nf_mtcr=2,
+ nf_dtcr=1, skip_cnt=0):
+ """Return list of DUT node related list of CPU numbers. The main
+ computing unit is physical core count.
- :param kwargs: Key-value pairs used to compute placement.
- :type kwargs: dict
- :returns: Cpu numbers related to numa from argument.
+ :param node: DUT node.
+ :param cpu_node: Numa node number.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param vs_dtc: Amount of physical cores for vswitch dataplane.
+ :param nf_dtc: Amount of physical cores for NF dataplane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF dataplane thread per core ratio.
+ :param skip_cnt: Skip first "skip_cnt" CPUs.
+ :type node: dict
+ :param cpu_node: int.
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type vs_dtc: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :type skip_cnt: int
+ :returns: List of CPUs allocated to NF.
:rtype: list
:raises RuntimeError: If we require more cpus than available or if
placement is not possible due to wrong parameters.
"""
- if kwargs['chain_id'] - 1 >= kwargs['chains']:
+ if nf_chain - 1 >= nf_chains:
raise RuntimeError("ChainID is higher than total number of chains!")
- if kwargs['node_id'] - 1 >= kwargs['nodeness']:
- raise RuntimeError("NodeID is higher than chain nodeness!")
+ if nf_node - 1 >= nf_nodes:
+ raise RuntimeError("NodeID is higher than chain nodes!")
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
- cpu_list = CpuUtils.cpu_list_per_node(kwargs['node'],
- kwargs['cpu_node'], smt_used)
- cpu_list_len = len(cpu_list)
-
- mt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['mtcr'] - 1)\
- / kwargs['mtcr']
- dt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['dtcr'] - 1)\
- / kwargs['dtcr']
-
- cpu_req = kwargs['skip_cnt'] + mt_req + dt_req
- if smt_used and cpu_req > cpu_list_len / CpuUtils.NR_OF_THREADS:
+ smt_used = CpuUtils.is_smt_enabled(node['cpuinfo'])
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ # CPU thread sibling offset.
+ sib = len(cpu_list) / CpuUtils.NR_OF_THREADS
+
+ if not smt_used and not isinstance(nf_dtc, int):
+ raise RuntimeError("Cannot allocate if SMT is not enabled!")
+ # TODO: Workaround as we are using physical core as main unit, we must
+ # adjust number of physical dataplane cores in case of float for further
+ # array referencing. As rounding method in Py2.7 and Py3.x differs, we
+ # are using static mapping. This can be rewritten using flat arrays and
+ # different logic (from Physical core unit to Logical core unit).
+ dtc = 1 if not isinstance(nf_dtc, int) else nf_dtc
+
+ mt_req = ((nf_chains * nf_nodes) + nf_mtcr - 1) / nf_mtcr
+ dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) / nf_dtcr
+ cpu_req = skip_cnt + mt_req + dt_req
+
+ if smt_used and cpu_req > len(cpu_list) / CpuUtils.NR_OF_THREADS:
raise RuntimeError("Not enough CPU cores available for placement!")
- elif not smt_used and cpu_req > cpu_list_len:
+ elif not smt_used and cpu_req > len(cpu_list):
raise RuntimeError("Not enough CPU cores available for placement!")
- offset = (kwargs['node_id'] - 1) + (kwargs['chain_id'] - 1)\
- * kwargs['nodeness']
- dtc = kwargs['dtc']
+ offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
try:
mt_odd = (offset / mt_req) & 1
- mt_skip = kwargs['skip_cnt'] + (offset % mt_req)
- dt_skip = kwargs['skip_cnt'] + mt_req + (offset % dt_req) * dtc
+ mt_skip = skip_cnt + (offset % mt_req)
+ dt_odd = (offset / dt_req) & 1
+ dt_skip = skip_cnt + mt_req + (offset % dt_req) * dtc
except ZeroDivisionError:
raise RuntimeError("Invalid placement combination!")
-
if smt_used:
- cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
- cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
+ mt_list = [cpu for cpu in cpu_list[mt_skip+sib:mt_skip+sib + 1]] \
+ if mt_odd else [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
+ dt_list = [cpu for cpu in cpu_list[dt_skip+sib:dt_skip+sib + dtc]] \
+ if dt_odd else [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
+ if isinstance(nf_dtc, int):
+ dt_list = \
+ [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
+ dt_list += \
+ [cpu for cpu in cpu_list[dt_skip+sib:dt_skip+sib + dtc]]
+ else:
+ mt_list = [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
+ dt_list = [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
- mt_cpu_list = [cpu for cpu in cpu_list_1[mt_skip:mt_skip + 1]] \
- if mt_odd else [cpu for cpu in cpu_list_0[mt_skip:mt_skip + 1]]
+ return mt_list + dt_list
- dt_cpu_list = [cpu for cpu in cpu_list_0[dt_skip:dt_skip + dtc]]
- dt_cpu_list += [cpu for cpu in cpu_list_1[dt_skip:dt_skip + dtc]]
- else:
- mt_cpu_list = [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]]
- dt_cpu_list = [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]]
+ @staticmethod
+ def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1,
+ nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
+
+ """Get affinity of NF (network function). Result will be used to compute
+ the amount of CPUs and also affinity.
+
+ :param nodes: Physical topology nodes.
+ :param node: SUT node.
+ :param nf_chains: Number of NF chains.
+ :param nf_nodes: Number of NF nodes in chain.
+ :param nf_chain: Chain number indexed from 1.
+ :param nf_node: Node number indexed from 1.
+ :param vs_dtc: Amount of physical cores for vswitch dataplane.
+ :param nf_dtc: Amount of physical cores for NF dataplane.
+ :param nf_mtcr: NF main thread per core ratio.
+ :param nf_dtcr: NF dataplane thread per core ratio.
+ :type nodes: dict
+ :type node: dict
+ :type nf_chains: int
+ :type nf_nodes: int
+ :type nf_chain: int
+ :type nf_node: int
+ :type vs_dtc: int
+ :type nf_dtc: int or float
+ :type nf_mtcr: int
+ :type nf_dtcr: int
+ :returns: List of CPUs allocated to NF.
+ :rtype: list
+ """
+ skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
+
+ interface_list = []
+ interface_list.append(
+ BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node)))
+ interface_list.append(
+ BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node)))
+
+ cpu_node = Topology.get_interfaces_numa_node(
+ nodes[node], *interface_list)
+
+ return CpuUtils.cpu_slice_of_list_for_nf(
+ node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
+ nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
+ nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt)
- return mt_cpu_list + dt_cpu_list
diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py
index 0ea6164cb7..6f5db6ecbb 100644
--- a/resources/libraries/python/QemuManager.py
+++ b/resources/libraries/python/QemuManager.py
@@ -15,61 +15,14 @@
from collections import OrderedDict
-from robot.libraries.BuiltIn import BuiltIn
-
from resources.libraries.python.Constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.QemuUtils import QemuUtils
-from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.topology import NodeType
__all__ = ["QemuManager"]
-def get_affinity_vm(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
- cpu_count_int=1, vnf_count_int=1):
- """Get affinity of VM. Result will be used to compute the amount of
- CPUs and also affinity.
-
- :param node: SUT nodes.
- :param node: DUT node.
- :param nf_chains: Number of NF chains.
- :param nf_nodes: Number of NF nodes in chain.
- :param nf_chain: Chain ID.
- :param nf_node: Node ID.
- :param cpu_count_int: Amount of Dataplane threads of vswitch.
- :param vnf_count_int: Amount of Dataplane threads of vnf.
- :type nodes: dict
- :type node: dict
- :type nf_chains: int
- :type nf_nodes: int
- :type nf_chain: int
- :type nf_node: int
- :type cpu_count_int: int
- :type vnf_count_int: int
- :returns: List of CPUs allocated to VM.
- :rtype: list
- """
- sut_sc = 1
- dut_mc = 1
- dut_dc = cpu_count_int
- skip_cnt = sut_sc + dut_mc + dut_dc
- dtc = vnf_count_int
-
- interface_list = []
- interface_list.append(
- BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node)))
- interface_list.append(
- BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node)))
-
- cpu_node = Topology.get_interfaces_numa_node(nodes[node], *interface_list)
-
- nf_cpus = CpuUtils.cpu_slice_of_list_for_nf(
- node=nodes[node], cpu_node=cpu_node, chains=nf_chains,
- nodeness=nf_nodes, chain_id=nf_chain, node_id=nf_node, mtcr=2, dtcr=1,
- dtc=dtc, skip_cnt=skip_cnt)
-
- return nf_cpus
-
class QemuManager(object):
"""QEMU lifecycle management class"""
@@ -97,8 +50,9 @@ class QemuManager(object):
nf_chains = int(kwargs['nf_chains'])
nf_nodes = int(kwargs['nf_nodes'])
queues = kwargs['rxq_count_int'] if kwargs['auto_scale'] else 1
- cpu_count_int = kwargs['cpu_count_int']
- vnf_count_int = kwargs['cpu_count_int'] if kwargs['auto_scale'] else 1
+ vs_dtc = kwargs['vs_dtc']
+ nf_dtc = kwargs['vs_dtc'] if kwargs['auto_scale'] else kwargs['nf_dtc']
+ nf_dtcr = kwargs['nf_dtcr'] if isinstance(kwargs['nf_dtcr'], int) else 2
img = Constants.QEMU_VM_KERNEL
@@ -113,10 +67,10 @@ class QemuManager(object):
vif2_mac = kwargs['tg_if2_mac'] if nf_node == nf_nodes \
else '52:54:00:00:{id:02x}:01'.format(id=qemu_id + 1)
- self.machines_affinity[name] = get_affinity_vm(
+ self.machines_affinity[name] = CpuUtils.get_affinity_nf(
nodes=self.nodes, node=node, nf_chains=nf_chains,
nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
- cpu_count_int=cpu_count_int, vnf_count_int=vnf_count_int)
+ vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr)
self.machines[name] = QemuUtils(
node=self.nodes[node], qemu_id=qemu_id,
@@ -131,10 +85,10 @@ class QemuManager(object):
jumbo_frames=kwargs['jumbo'])
self.machines[name].qemu_add_vhost_user_if(
sock1, jumbo_frames=kwargs['jumbo'], queues=queues,
- queue_size=1024)
+ queue_size=kwargs['perf_qemu_qsz'])
self.machines[name].qemu_add_vhost_user_if(
sock2, jumbo_frames=kwargs['jumbo'], queues=queues,
- queue_size=1024)
+ queue_size=kwargs['perf_qemu_qsz'])
def construct_vms_on_all_nodes(self, **kwargs):
"""Construct 1..Mx1..N VMs(s) with specified name on all nodes.
diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py
index f47c88b022..85625c4c35 100644
--- a/resources/libraries/python/autogen/Regenerator.py
+++ b/resources/libraries/python/autogen/Regenerator.py
@@ -119,7 +119,7 @@ class Regenerator(object):
# Not supported by AVF driver.
# https://git.fd.io/vpp/tree/src/plugins/avf/README.md
emit = False
- if "-16vm-" in suite_id or "-16dcr-" in suite_id:
+ if "-16vm2t-" in suite_id or "-16dcr2t-" in suite_id:
if kwargs["phy_cores"] > 3:
# CSIT lab only has 28 (physical) core processors,
# so these test would fail when attempting to assign cores.