aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/CpuUtils.py
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python/CpuUtils.py')
-rw-r--r--resources/libraries/python/CpuUtils.py182
1 files changed, 167 insertions, 15 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index f261f9421e..c77d0f83b1 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,11 +13,13 @@
"""CPU utilities library."""
+from random import choice
+
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
__all__ = [u"CpuUtils"]
@@ -232,7 +234,7 @@ class CpuUtils:
cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
- f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
+ f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
else:
cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
@@ -268,7 +270,7 @@ class CpuUtils:
:returns: List of CPUs allocated to NF.
:rtype: list
:raises RuntimeError: If we require more cpus than available or if
- placement is not possible due to wrong parameters.
+ placement is not possible due to wrong parameters.
"""
if not 1 <= nf_chain <= nf_chains:
raise RuntimeError(u"ChainID is out of range!")
@@ -311,6 +313,36 @@ class CpuUtils:
return result
@staticmethod
+ def get_affinity_af_xdp(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to AF_XDP interface.
+ :rtype: list
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
+
+ @staticmethod
def get_affinity_nf(
nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
@@ -358,25 +390,25 @@ class CpuUtils:
@staticmethod
def get_affinity_trex(
- node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
"""Get affinity for T-Rex. Result will be used to pin T-Rex threads.
:param node: TG node.
- :param if1_pci: TG first interface.
- :param if2_pci: TG second interface.
+ :param if_key: TG first interface.
:param tg_mtc: TG main thread count.
:param tg_dtc: TG dataplane thread count.
:param tg_ltc: TG latency thread count.
+ :param tg_dtc_offset: TG dataplane thread offset.
:type node: dict
- :type if1_pci: str
- :type if2_pci: str
+ :type if_key: str
:type tg_mtc: int
:type tg_dtc: int
:type tg_ltc: int
+ :type tg_dtc_offset: int
:returns: List of CPUs allocated to T-Rex including numa node.
:rtype: int, int, int, list
"""
- interface_list = [if1_pci, if2_pci]
+ interface_list = [if_key]
cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
@@ -384,12 +416,11 @@ class CpuUtils:
smt_used=False)
threads = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+ cpu_cnt=tg_dtc, smt_used=False)
latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
return master_thread_id[0], latency_thread_id[0], cpu_node, threads
@@ -445,4 +476,125 @@ class CpuUtils:
return CpuUtils.cpu_slice_of_list_per_node(
node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ smt_used=False)
+
+ @staticmethod
+ def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
+ """Get idle CPU List.
+
+ :param node: Node dictionary with cpuinfo.
+ :param cpu_node: Numa node number.
+ :param smt_used: True - we want to use SMT, otherwise false.
+ :param cpu_alloc_str: vpp used cores.
+ :param sep: Separator, default: ",".
+ :type node: dict
+ :type cpu_node: int
+ :type smt_used: bool
+ :type cpu_alloc_str: str
+ :type smt_used: bool
+ :type sep: str
+ :rtype: list
+ """
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ cpu_idle_list = [i for i in cpu_list
+ if str(i) not in cpu_alloc_str.split(sep)]
+ return cpu_idle_list
+
+ @staticmethod
+ def get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity for vswitch on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param phy_cores: Number of physical cores to allocate.
+ :param rx_queues: Number of RX queues. (Optional, Default: None)
+ :param rxd: Number of RX descriptors. (Optional, Default: None)
+ :param txd: Number of TX descriptors. (Optional, Default: None)
+ :type nodes: dict
+ :type phy_cores: int
+ :type rx_queues: int
+ :type rxd: int
+ :type txd: int
+ :returns: Compute resource information dictionary.
+ :rtype: dict
+ """
+ compute_resource_info = dict()
+ for node_name, node in nodes.items():
+ if node["type"] != NodeType.DUT:
+ continue
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ "${dp_cores_count}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ "${fp_cores_count}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ "${rxq_ratio}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node_name}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+ smt_used=False
+ )
+ cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else ""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else ""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = \
+ int(rx_queues) if rx_queues \
+ else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info["buffers_numa"] = \
+ 215040 if smt_used else 107520
+ compute_resource_info["smt_used"] = smt_used
+ compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+ compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+ compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+ compute_resource_info[f"{node_name}_cpu_wt"] = \
+ ",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+ ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info["cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info["rxd_count_int"] = rxd
+ compute_resource_info["txd_count_int"] = txd
+ compute_resource_info["rxq_count_int"] = rxq_count_int
+ compute_resource_info["fp_count_int"] = fp_count_int
+ compute_resource_info["dp_count_int"] = dp_count_int
+
+ return compute_resource_info