aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
authorpmikus <pmikus@cisco.com>2021-06-11 13:06:34 +0000
committerPeter Mikus <pmikus@cisco.com>2021-06-17 13:26:40 +0000
commita275fa0062158d712152f542b7bc9ec40b5c5f31 (patch)
tree7d55c04747fd49f0ac08c9452976088f2df0791f /resources/libraries/python
parent70766243f6c00c77fedc00d68114c108528950d6 (diff)
Core: Rework CPU allocation
Signed-off-by: pmikus <pmikus@cisco.com> Change-Id: I6826add7b3032041632c3952c45a3c64409400b0
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/CpuUtils.py121
-rw-r--r--resources/libraries/python/IPsecUtil.py57
-rw-r--r--resources/libraries/python/InterfaceUtil.py52
-rw-r--r--resources/libraries/python/NATUtil.py2
4 files changed, 169 insertions, 63 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index e23404b1dd..f556c51814 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -85,29 +85,6 @@ class CpuUtils:
)
@staticmethod
- def worker_count_from_cores_and_smt(phy_cores, smt_used):
- """Simple conversion utility, needs smt from caller.
-
- The implementation assumes we pack 1 or 2 workers per core,
- depending on hyperthreading.
-
- Some keywords use None to indicate no core/worker limit,
- so this converts None to None.
-
- :param phy_cores: How many physical cores to use for workers.
- :param smt_used: Whether symmetric multithreading is used.
- :type phy_cores: Optional[int]
- :type smt_used: bool
- :returns: How many VPP workers fit into the given number of cores.
- :rtype: Optional[int]
- """
- if phy_cores is None:
- return None
- workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1
- workers = phy_cores * workers_per_core
- return workers
-
- @staticmethod
def cpu_node_count(node):
"""Return count of numa nodes.
@@ -502,8 +479,8 @@ class CpuUtils:
@staticmethod
def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
- """
- Get idle CPU List
+ """Get idle CPU List.
+
:param node: Node dictionary with cpuinfo.
:param cpu_node: Numa node number.
:param smt_used: True - we want to use SMT, otherwise false.
@@ -521,3 +498,97 @@ class CpuUtils:
cpu_idle_list = [i for i in cpu_list
if str(i) not in cpu_alloc_str.split(sep)]
return cpu_idle_list
+
+ @staticmethod
+ def get_affinity_vpp_vswitch(
+ nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity or VPP switch.
+
+ :param nodes: Topology nodes.
+ :param node: Topology node string.
+ :param phy_cores: Number of physical cores to allocate.
+ :param rx_queues: Number of RX queues. (Optional, Default: None)
+ :param rxd: Number of RX descriptors. (Optional, Default: None)
+ :param txd: Number of TX descriptors. (Optional, Default: None)
+ :type nodes: dict
+ :type node: str
+ :type phy_cores: int
+ :type rx_queues: int
+ :type rxd: int
+ :type txd: int
+ :returns: Compute resource information dictionary.
+ :rtype: dict
+ """
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ f"${{dp_cores_count}}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ f"${{fp_cores_count}}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ f"${{rxq_ratio}}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN,
+ smt_used=False
+ )
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else u""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ nodes[node], cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else u""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info = dict()
+ compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
+ compute_resource_info[u"smt_used"] = smt_used
+ compute_resource_info[u"cpu_main"] = cpu_main
+ compute_resource_info[u"cpu_dp"] = cpu_dp
+ compute_resource_info[u"cpu_fp"] = cpu_fp
+ compute_resource_info[u"cpu_wt"] = \
+ u",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[u"cpu_alloc_str"] = \
+ u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info[u"cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info[u"rxd_count_int"] = rxd
+ compute_resource_info[u"txd_count_int"] = txd
+ compute_resource_info[u"rxq_count_int"] = rxq_count_int
+ compute_resource_info[u"fp_count_int"] = fp_count_int
+ compute_resource_info[u"dp_count_int"] = dp_count_int
+
+ return compute_resource_info
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index e066bc9424..520cf7b932 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -29,8 +29,9 @@ from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.IPUtil import IPUtil, IpDscp, MPLS_LABEL_INVALID
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import scp_node
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
from resources.libraries.python.VatExecutor import VatExecutor
+from resources.libraries.python.VPPUtil import VPPUtil
IPSEC_UDP_PORT_NONE = 0xffff
@@ -324,27 +325,57 @@ class IPsecUtil:
@staticmethod
def vpp_ipsec_crypto_sw_scheduler_set_worker(
- node, worker_index, crypto_enable=False):
+ node, workers, crypto_enable=False):
"""Enable or disable crypto on specific vpp worker threads.
:param node: VPP node to enable or disable crypto for worker threads.
- :param worker_index: VPP worker thread index.
+ :param workers: List of VPP thread numbers.
:param crypto_enable: Disable or enable crypto work.
:type node: dict
- :type worker_index: int
+ :type workers: Iterable[int]
:type crypto_enable: bool
:raises RuntimeError: If failed to enable or disable crypto for worker
thread or if no API reply received.
"""
- cmd = u"crypto_sw_scheduler_set_worker"
- err_msg = f"Failed to disable/enable crypto for worker thread " \
- f"on host {node[u'host']}"
- args = dict(
- worker_index=worker_index,
- crypto_enable=crypto_enable
- )
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
+ for worker in workers:
+ cmd = u"crypto_sw_scheduler_set_worker"
+ err_msg = f"Failed to disable/enable crypto for worker thread " \
+ f"on host {node[u'host']}"
+ args = dict(
+ worker_index=worker - 1,
+ crypto_enable=crypto_enable
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_ipsec_crypto_sw_scheduler_set_worker_on_all_duts(
+ nodes, workers, crypto_enable=False):
+ """Enable or disable crypto on specific vpp worker threads.
+
+ :param node: VPP node to enable or disable crypto for worker threads.
+ :param workers: List of VPP thread numbers.
+ :param crypto_enable: Disable or enable crypto work.
+ :type node: dict
+ :type workers: Iterable[int]
+ :type crypto_enable: bool
+ :raises RuntimeError: If failed to enable or disable crypto for worker
+ thread or if no API reply received.
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
+ if not worker_cnt:
+ return None
+ worker_ids = list()
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+
+ IPsecUtil.vpp_ipsec_crypto_sw_scheduler_set_worker(
+ node, workers=worker_ids, crypto_enable=crypto_enable
+ )
@staticmethod
def vpp_ipsec_add_sad_entry(
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 94c78a1bef..fed2beed3b 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -20,7 +20,6 @@ from ipaddress import ip_address
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
@@ -1930,64 +1929,69 @@ class InterfaceUtil:
@staticmethod
def vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit=None):
+ node, prefix, workers=None):
"""Set Round Robin interface RX placement on all worker threads
on node.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all workers are used for data plane work.
- Note this keyword specifies workers, not cores.
:param node: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type node: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
- worker_id = 0
- worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
- if dp_worker_limit is not None:
- worker_cnt = min(worker_cnt, dp_worker_limit)
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return
+ return None
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
for interface in node[u"interfaces"].values():
if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
node, placement[u"sw_if_index"], placement[u"queue_id"],
- worker_id % worker_cnt
+ worker_ids[worker_idx % len(worker_ids)] - 1
)
- worker_id += 1
+ worker_idx += 1
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, dp_core_limit=None):
- """Set Round Robin interface RX placement on all worker threads
+ nodes, prefix, workers=None):
+ """Set Round Robin interface RX placement on worker threads
on all DUTs.
- If specified, dp_core_limit limits the number of physical cores used
+ If specified, workers limits the number of physical cores used
for data plane I/O work. Other cores are presumed to do something else,
e.g. asynchronous crypto processing.
None means all cores are used for data plane work.
- Note this keyword specifies cores, not workers.
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param dp_worker_limit: How many cores for data plane work.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type nodes: dict
:type prefix: str
- :type dp_worker_limit: Optional[int]
+ :type workers: str
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt(
- phy_cores=dp_core_limit,
- smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]),
- )
InterfaceUtil.vpp_round_robin_rx_placement(
- node, prefix, dp_worker_limit
+ node, prefix, workers
)
diff --git a/resources/libraries/python/NATUtil.py b/resources/libraries/python/NATUtil.py
index 60e0e6d1a3..8a5d8c1404 100644
--- a/resources/libraries/python/NATUtil.py
+++ b/resources/libraries/python/NATUtil.py
@@ -288,7 +288,7 @@ class NATUtil:
:rtype: int
"""
# vpp-device tests have not dedicated physical core so
- # ${thr_count_int} == 0 but we need to use one thread
+ # ${dp_count_int} == 0 but we need to use one thread
threads = 1 if not int(threads) else int(threads)
rest, mult = modf(log2(sessions/(10*threads)))
return 2 ** (int(mult) + (1 if rest else 0)) * 10