aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
authorpmikus <peter.mikus@protonmail.ch>2023-07-25 10:47:12 +0000
committerPeter Mikus <peter.mikus@protonmail.ch>2023-07-27 08:25:02 +0000
commit9c926fdd75cc1d65faa1ee50ce9133e754fdd498 (patch)
tree2d14680dc9a7dc43b935559bb176de2f1503613f /resources/libraries/python
parentf58649004a975b8e02dd3935669fd5e15c525817 (diff)
feat(core): Core allocation
Signed-off-by: pmikus <peter.mikus@protonmail.ch> Change-Id: I782b87190dbee6e0a12c97f616b80539cd6614bd
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/CpuUtils.py154
-rw-r--r--resources/libraries/python/DPDK/L3fwdTest.py23
-rw-r--r--resources/libraries/python/DPDK/TestpmdTest.py21
-rw-r--r--resources/libraries/python/IPsecUtil.py13
-rw-r--r--resources/libraries/python/InterfaceUtil.py17
5 files changed, 119 insertions, 109 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index 1e306f025e..5f43e211a2 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -17,7 +17,7 @@ from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
__all__ = [u"CpuUtils"]
@@ -499,17 +499,15 @@ class CpuUtils:
@staticmethod
def get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
- """Get affinity for vswitch.
+ nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity for vswitch on all DUTs.
:param nodes: Topology nodes.
- :param node: Topology node string.
:param phy_cores: Number of physical cores to allocate.
:param rx_queues: Number of RX queues. (Optional, Default: None)
:param rxd: Number of RX descriptors. (Optional, Default: None)
:param txd: Number of TX descriptors. (Optional, Default: None)
:type nodes: dict
- :type node: str
:type phy_cores: int
:type rx_queues: int
:type rxd: int
@@ -517,76 +515,82 @@ class CpuUtils:
:returns: Compute resource information dictionary.
:rtype: dict
"""
- # Number of Data Plane physical cores.
- dp_cores_count = BuiltIn().get_variable_value(
- f"${{dp_cores_count}}", phy_cores
- )
- # Number of Feature Plane physical cores.
- fp_cores_count = BuiltIn().get_variable_value(
- f"${{fp_cores_count}}", phy_cores - dp_cores_count
- )
- # Ratio between RX queues and data plane threads.
- rxq_ratio = BuiltIn().get_variable_value(
- f"${{rxq_ratio}}", 1
- )
-
- dut_pf_keys = BuiltIn().get_variable_value(
- f"${{{node}_pf_keys}}"
- )
- # SMT override in case of non standard test cases.
- smt_used = BuiltIn().get_variable_value(
- f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
- )
-
- cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
- skip_cnt = Constants.CPU_CNT_SYSTEM
- cpu_main = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=Constants.CPU_CNT_MAIN,
- smt_used=False
- )
- skip_cnt += Constants.CPU_CNT_MAIN
- cpu_dp = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=int(dp_cores_count),
- smt_used=smt_used
- ) if int(dp_cores_count) else u""
- skip_cnt = skip_cnt + int(dp_cores_count)
- cpu_fp = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=int(fp_cores_count),
- smt_used=smt_used
- ) if int(fp_cores_count) else u""
-
- fp_count_int = \
- int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
- else int(fp_cores_count)
- dp_count_int = \
- int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
- else int(dp_cores_count)
-
- rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
- rxq_count_int = 1 if not rxq_count_int else rxq_count_int
-
compute_resource_info = dict()
- compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
- compute_resource_info[u"smt_used"] = smt_used
- compute_resource_info[u"cpu_main"] = cpu_main
- compute_resource_info[u"cpu_dp"] = cpu_dp
- compute_resource_info[u"cpu_fp"] = cpu_fp
- compute_resource_info[u"cpu_wt"] = \
- u",".join(filter(None, [cpu_dp, cpu_fp]))
- compute_resource_info[u"cpu_alloc_str"] = \
- u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
- compute_resource_info[u"cpu_count_int"] = \
- int(dp_cores_count) + int(fp_cores_count)
- compute_resource_info[u"rxd_count_int"] = rxd
- compute_resource_info[u"txd_count_int"] = txd
- compute_resource_info[u"rxq_count_int"] = rxq_count_int
- compute_resource_info[u"fp_count_int"] = fp_count_int
- compute_resource_info[u"dp_count_int"] = dp_count_int
+ for node_name, node in nodes.items():
+ if node["type"] != NodeType.DUT:
+ continue
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ f"${{dp_cores_count}}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ f"${{fp_cores_count}}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ f"${{rxq_ratio}}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node_name}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ f"${{smt_used}}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN,
+ smt_used=False
+ )
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else ""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else ""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = \
+ int(rx_queues) if rx_queues \
+ else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info["buffers_numa"] = \
+ 215040 if smt_used else 107520
+ compute_resource_info["smt_used"] = smt_used
+ compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+ compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+ compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+ compute_resource_info[f"{node_name}_cpu_wt"] = \
+ ",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+ ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info["cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info["rxd_count_int"] = rxd
+ compute_resource_info["txd_count_int"] = txd
+ compute_resource_info["rxq_count_int"] = rxq_count_int
+ compute_resource_info["fp_count_int"] = fp_count_int
+ compute_resource_info["dp_count_int"] = dp_count_int
return compute_resource_info
diff --git a/resources/libraries/python/DPDK/L3fwdTest.py b/resources/libraries/python/DPDK/L3fwdTest.py
index 265806c7e9..178c747da5 100644
--- a/resources/libraries/python/DPDK/L3fwdTest.py
+++ b/resources/libraries/python/DPDK/L3fwdTest.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -56,12 +56,11 @@ class L3fwdTest:
cpu_count_int = dp_count_int = int(phy_cores)
dp_cores = cpu_count_int+1
tg_flip = topology_info[f"tg_if1_pci"] > topology_info[f"tg_if2_pci"]
- for node in nodes:
- if u"DUT" in node:
- compute_resource_info = CpuUtils.get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=rx_queues,
- rxd=rxd, txd=txd
- )
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
if dp_count_int > 1:
BuiltIn().set_tags('MTHREAD')
else:
@@ -70,12 +69,12 @@ class L3fwdTest:
f"{dp_count_int}T{cpu_count_int}C"
)
- cpu_dp = compute_resource_info[u"cpu_dp"]
- rxq_count_int = compute_resource_info[u"rxq_count_int"]
- if1 = topology_info[f"{node}_pf1"][0]
- if2 = topology_info[f"{node}_pf2"][0]
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
L3fwdTest.start_l3fwd(
- nodes, nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+ nodes, node, if1=if1, if2=if2, lcores_list=cpu_dp,
nb_cores=dp_count_int, queue_nums=rxq_count_int,
jumbo_frames=jumbo_frames, tg_flip=tg_flip
)
diff --git a/resources/libraries/python/DPDK/TestpmdTest.py b/resources/libraries/python/DPDK/TestpmdTest.py
index ca98da2f86..3baba30715 100644
--- a/resources/libraries/python/DPDK/TestpmdTest.py
+++ b/resources/libraries/python/DPDK/TestpmdTest.py
@@ -60,12 +60,11 @@ class TestpmdTest:
cpu_count_int = dp_count_int = int(phy_cores)
dp_cores = cpu_count_int+1
- for node in nodes:
- if u"DUT" in node:
- compute_resource_info = CpuUtils.get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=rx_queues,
- rxd=rxd, txd=txd
- )
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
if dp_count_int > 1:
BuiltIn().set_tags('MTHREAD')
else:
@@ -74,12 +73,12 @@ class TestpmdTest:
f"{dp_count_int}T{cpu_count_int}C"
)
- cpu_dp = compute_resource_info[u"cpu_dp"]
- rxq_count_int = compute_resource_info[u"rxq_count_int"]
- if1 = topology_info[f"{node}_pf1"][0]
- if2 = topology_info[f"{node}_pf2"][0]
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
TestpmdTest.start_testpmd(
- nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+ node, if1=if1, if2=if2, lcores_list=cpu_dp,
nb_cores=dp_count_int, queue_nums=rxq_count_int,
jumbo_frames=jumbo_frames, rxq_size=nic_rxq_size,
txq_size=nic_txq_size
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 363558dcaf..39c6a4ce2f 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -22,6 +22,8 @@ from ipaddress import ip_network, ip_address
from random import choice
from string import ascii_letters
+from robot.libraries.BuiltIn import BuiltIn
+
from resources.libraries.python.Constants import Constants
from resources.libraries.python.IncrementUtil import ObjIncrement
from resources.libraries.python.InterfaceUtil import InterfaceUtil, \
@@ -358,25 +360,26 @@ class IPsecUtil:
@staticmethod
def vpp_ipsec_crypto_sw_scheduler_set_worker_on_all_duts(
- nodes, workers, crypto_enable=False):
+ nodes, crypto_enable=False):
"""Enable or disable crypto on specific vpp worker threads.
:param node: VPP node to enable or disable crypto for worker threads.
- :param workers: List of VPP thread numbers.
:param crypto_enable: Disable or enable crypto work.
:type node: dict
- :type workers: Iterable[int]
:type crypto_enable: bool
:raises RuntimeError: If failed to enable or disable crypto for worker
thread or if no API reply received.
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
thread_data = VPPUtil.vpp_show_threads(node)
worker_cnt = len(thread_data) - 1
if not worker_cnt:
return None
worker_ids = list()
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
for item in thread_data:
if str(item.cpu_id) in workers.split(u","):
worker_ids.append(item.id)
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 42474b496a..7d9164d8f3 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -18,6 +18,7 @@ from enum import IntEnum
from ipaddress import ip_address
from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
@@ -2013,7 +2014,7 @@ class InterfaceUtil:
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, workers=None):
+ nodes, prefix, use_dp_cores=False):
"""Set Round Robin interface RX placement on worker threads
on all DUTs.
@@ -2024,14 +2025,18 @@ class InterfaceUtil:
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param workers: Comma separated worker index numbers intended for
- dataplane work.
+ :param use_dp_cores: Limit to dataplane cores.
:type nodes: dict
:type prefix: str
- :type workers: str
+ :type use_dp_cores: bool
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ workers = None
+ if use_dp_cores:
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
InterfaceUtil.vpp_round_robin_rx_placement(
node, prefix, workers
)