diff options
author | Yulong Pei <yulong.pei@intel.com> | 2020-11-19 13:56:18 -0700 |
---|---|---|
committer | Vratko Polak <vrpolak@cisco.com> | 2021-02-19 14:56:13 +0000 |
commit | f0e964d35af36f0923c6ae0421e74d94022cadba (patch) | |
tree | 30356991ac278dcf52d2ba1f77d3314047293666 /resources/libraries/python | |
parent | 3a2c37ffa4755d89247684935fd27d8868fbfe4b (diff) |
Add test suites for crypto sw scheduler engine
This patch is to add test suites for vpp plugin crypto_sw_scheduler,
IPsec sync mode is to do crypto and packet forward work in same worker cores,
crypto_sw_scheduler can schedule crypto work to other async crypto cores to
improve whole crypto processing capability.
This test suites configure fixed 1 rx queues per port, then measure IPsec
performance with 1, 2, 3 crypto cores.
This patchset include 1, 2, 4, 8 ipsec tunnels test cases.
+Vratko help to change to count total physical cores instead of previous only
count crypto cores in test cases.
Change-Id: I0e67182e3d13273890a23703d838101900e25126
Signed-off-by: Yulong Pei <yulong.pei@intel.com>
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Signed-off-by: pmikus <pmikus@cisco.com>
Diffstat (limited to 'resources/libraries/python')
-rw-r--r-- | resources/libraries/python/CpuUtils.py | 23 | ||||
-rw-r--r-- | resources/libraries/python/IPsecUtil.py | 24 | ||||
-rw-r--r-- | resources/libraries/python/InterfaceUtil.py | 35 | ||||
-rw-r--r-- | resources/libraries/python/autogen/Regenerator.py | 25 |
4 files changed, 103 insertions, 4 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index 70177f5d9a..170cbe6b2e 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -85,6 +85,29 @@ class CpuUtils: ) @staticmethod + def worker_count_from_cores_and_smt(phy_cores, smt_used): + """Simple conversion utility, needs smt from caller. + + The implementation assumes we pack 1 or 2 workers per core, + depending on hyperthreading. + + Some keywords use None to indicate no core/worker limit, + so this converts None to None. + + :param phy_cores: How many physical cores to use for workers. + :param smt_used: Whether symmetric multithreading is used. + :type phy_cores: Optional[int] + :type smt_used: bool + :returns: How many VPP workers fit into the given number of cores. + :rtype: Optional[int] + """ + if phy_cores is None: + return None + workers_per_core = CpuUtils.NR_OF_THREADS if smt_used else 1 + workers = phy_cores * workers_per_core + return workers + + @staticmethod def cpu_node_count(node): """Return count of numa nodes. diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py index 99a470d934..2bc10d3ac9 100644 --- a/resources/libraries/python/IPsecUtil.py +++ b/resources/libraries/python/IPsecUtil.py @@ -321,6 +321,30 @@ class IPsecUtil: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod + def vpp_ipsec_crypto_sw_scheduler_set_worker( + node, worker_index, crypto_enable=False): + """Enable or disable crypto on specific vpp worker threads. + + :param node: VPP node to enable or disable crypto for worker threads. + :param worker_index: VPP worker thread index. + :param crypto_enable: Disable or enable crypto work. + :type node: dict + :type worker_index: int + :type crypto_enable: bool + :raises RuntimeError: If failed to enable or disable crypto for worker + thread or if no API reply received. + """ + cmd = u"crypto_sw_scheduler_set_worker" + err_msg = f"Failed to disable/enable crypto for worker thread " \ + f"on host {node[u'host']}" + args = dict( + worker_index=worker_index, + crypto_enable=crypto_enable + ) + with PapiSocketExecutor(node) as papi_exec: + papi_exec.add(cmd, **args).get_reply(err_msg) + + @staticmethod def vpp_ipsec_add_sad_entry( node, sad_id, spi, crypto_alg, crypto_key, integ_alg=None, integ_key=u"", tunnel_src=None, tunnel_dst=None): diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py index 04fdff7cac..00a1933196 100644 --- a/resources/libraries/python/InterfaceUtil.py +++ b/resources/libraries/python/InterfaceUtil.py @@ -20,6 +20,7 @@ from ipaddress import ip_address from robot.api import logger from resources.libraries.python.Constants import Constants +from resources.libraries.python.CpuUtils import CpuUtils from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.IPAddress import IPAddress from resources.libraries.python.L2Util import L2Util @@ -1711,17 +1712,29 @@ class InterfaceUtil: papi_exec.add(cmd, **args).get_reply(err_msg) @staticmethod - def vpp_round_robin_rx_placement(node, prefix): + def vpp_round_robin_rx_placement( + node, prefix, dp_worker_limit=None + ): """Set Round Robin interface RX placement on all worker threads on node. + If specified, dp_core_limit limits the number of physical cores used + for data plane I/O work. Other cores are presumed to do something else, + e.g. asynchronous crypto processing. + None means all workers are used for data plane work. + Note this keyword specifies workers, not cores. + :param node: Topology nodes. :param prefix: Interface name prefix. + :param dp_worker_limit: How many cores for data plane work. :type node: dict :type prefix: str + :type dp_worker_limit: Optional[int] """ worker_id = 0 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1 + if dp_worker_limit is not None: + worker_cnt = min(worker_cnt, dp_worker_limit) if not worker_cnt: return for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node): @@ -1735,15 +1748,31 @@ class InterfaceUtil: worker_id += 1 @staticmethod - def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix): + def vpp_round_robin_rx_placement_on_all_duts( + nodes, prefix, dp_core_limit=None + ): """Set Round Robin interface RX placement on all worker threads on all DUTs. + If specified, dp_core_limit limits the number of physical cores used + for data plane I/O work. Other cores are presumed to do something else, + e.g. asynchronous crypto processing. + None means all cores are used for data plane work. + Note this keyword specifies cores, not workers. + :param nodes: Topology nodes. :param prefix: Interface name prefix. + :param dp_worker_limit: How many cores for data plane work. :type nodes: dict :type prefix: str + :type dp_worker_limit: Optional[int] """ for node in nodes.values(): if node[u"type"] == NodeType.DUT: - InterfaceUtil.vpp_round_robin_rx_placement(node, prefix) + dp_worker_limit = CpuUtils.worker_count_from_cores_and_smt( + phy_cores=dp_core_limit, + smt_used=CpuUtils.is_smt_enabled(node[u"cpuinfo"]), + ) + InterfaceUtil.vpp_round_robin_rx_placement( + node, prefix, dp_worker_limit + ) diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py index 92860b02e9..e670b692de 100644 --- a/resources/libraries/python/autogen/Regenerator.py +++ b/resources/libraries/python/autogen/Regenerator.py @@ -536,6 +536,22 @@ class Regenerator: {u"frame_size": 128000, u"phy_cores": 2}, {u"frame_size": 128000, u"phy_cores": 4} ] + # List for tests with one dataplane core + # (and variable number of other cores). + dp1_kwargs_list = [ + {u"frame_size": min_frame_size, u"phy_cores": 2}, + {u"frame_size": min_frame_size, u"phy_cores": 3}, + {u"frame_size": min_frame_size, u"phy_cores": 4}, + {u"frame_size": 1518, u"phy_cores": 2}, + {u"frame_size": 1518, u"phy_cores": 3}, + {u"frame_size": 1518, u"phy_cores": 4}, + {u"frame_size": 9000, u"phy_cores": 2}, + {u"frame_size": 9000, u"phy_cores": 3}, + {u"frame_size": 9000, u"phy_cores": 4}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 2}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 3}, + {u"frame_size": u"IMIX_v4_1", u"phy_cores": 4} + ] for in_filename in glob(pattern): if not self.quiet: @@ -557,7 +573,14 @@ class Regenerator: file_in.read().partition(u"*** Test Cases ***")[:-1] ) if in_filename.endswith(u"-ndrpdr.robot"): - write_default_files(in_filename, in_prolog, default_kwargs_list) + if u"scheduler" in in_filename: + write_default_files( + in_filename, in_prolog, dp1_kwargs_list + ) + else: + write_default_files( + in_filename, in_prolog, default_kwargs_list + ) elif in_filename.endswith(u"-reconf.robot"): write_reconf_files(in_filename, in_prolog, default_kwargs_list) elif in_filename.endswith(u"-bps.robot"): |