aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/Classify.py30
-rw-r--r--resources/libraries/python/Constants.py493
-rw-r--r--resources/libraries/python/ContainerUtils.py89
-rw-r--r--resources/libraries/python/CoreDumpUtil.py12
-rw-r--r--resources/libraries/python/CpuUtils.py186
-rw-r--r--resources/libraries/python/DMAUtil.py213
-rw-r--r--resources/libraries/python/DPDK/DPDKTools.py30
-rw-r--r--resources/libraries/python/DPDK/L3fwdTest.py129
-rw-r--r--resources/libraries/python/DPDK/TestpmdTest.py111
-rw-r--r--resources/libraries/python/DUTSetup.py308
-rw-r--r--resources/libraries/python/Dhcp.py2
-rw-r--r--resources/libraries/python/DpdkUtil.py14
-rw-r--r--resources/libraries/python/DropRateSearch.py8
-rw-r--r--resources/libraries/python/FilteredLogger.py2
-rw-r--r--resources/libraries/python/FlowUtil.py580
-rw-r--r--resources/libraries/python/GeneveUtil.py72
-rw-r--r--resources/libraries/python/HoststackUtil.py184
-rw-r--r--resources/libraries/python/IPAddress.py2
-rw-r--r--resources/libraries/python/IPTopology.py177
-rw-r--r--resources/libraries/python/IPUtil.py176
-rw-r--r--resources/libraries/python/IPsecUtil.py2485
-rw-r--r--resources/libraries/python/IPv6Util.py2
-rw-r--r--resources/libraries/python/IncrementUtil.py74
-rw-r--r--resources/libraries/python/InterfaceUtil.py465
-rw-r--r--resources/libraries/python/Iperf3.py25
-rw-r--r--resources/libraries/python/IrqUtil.py99
-rw-r--r--resources/libraries/python/KubernetesUtils.py4
-rw-r--r--resources/libraries/python/L2Util.py20
-rw-r--r--resources/libraries/python/LimitUtil.py2
-rw-r--r--resources/libraries/python/LispSetup.py2
-rw-r--r--resources/libraries/python/LoadBalancerUtil.py14
-rw-r--r--resources/libraries/python/LocalExecution.py2
-rw-r--r--resources/libraries/python/MLRsearch/AbstractMeasurer.py32
-rw-r--r--resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py51
-rw-r--r--resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py645
-rw-r--r--resources/libraries/python/MLRsearch/NdrPdrResult.py65
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateInterval.py88
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py110
-rw-r--r--resources/libraries/python/MLRsearch/__init__.py16
-rw-r--r--resources/libraries/python/MLRsearch/candidate.py153
-rw-r--r--resources/libraries/python/MLRsearch/config.py179
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/dc_property.py173
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/field.py44
-rw-r--r--resources/libraries/python/MLRsearch/discrete_interval.py140
-rw-r--r--resources/libraries/python/MLRsearch/discrete_load.py316
-rw-r--r--resources/libraries/python/MLRsearch/discrete_result.py76
-rw-r--r--resources/libraries/python/MLRsearch/discrete_width.py197
-rw-r--r--resources/libraries/python/MLRsearch/expander.py102
-rw-r--r--resources/libraries/python/MLRsearch/global_width.py70
-rw-r--r--resources/libraries/python/MLRsearch/goal_result.py72
-rw-r--r--resources/libraries/python/MLRsearch/limit_handler.py198
-rw-r--r--resources/libraries/python/MLRsearch/load_rounding.py205
-rw-r--r--resources/libraries/python/MLRsearch/load_stats.py112
-rw-r--r--resources/libraries/python/MLRsearch/measurement_database.py126
-rw-r--r--resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py325
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/__init__.py24
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/classes.py34
-rw-r--r--resources/libraries/python/MLRsearch/relevant_bounds.py56
-rw-r--r--resources/libraries/python/MLRsearch/search_goal.py119
-rw-r--r--resources/libraries/python/MLRsearch/search_goal_tuple.py60
-rw-r--r--resources/libraries/python/MLRsearch/selector.py183
-rw-r--r--resources/libraries/python/MLRsearch/strategy/__init__.py35
-rw-r--r--resources/libraries/python/MLRsearch/strategy/base.py132
-rw-r--r--resources/libraries/python/MLRsearch/strategy/bisect.py193
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_hi.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_lo.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/halve.py83
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_hi.py55
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_lo.py53
-rw-r--r--resources/libraries/python/MLRsearch/target_scaling.py103
-rw-r--r--resources/libraries/python/MLRsearch/target_spec.py95
-rw-r--r--resources/libraries/python/MLRsearch/target_stat.py153
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py55
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py161
-rw-r--r--resources/libraries/python/MLRsearch/trimmed_stat.py52
-rw-r--r--resources/libraries/python/Memif.py20
-rw-r--r--resources/libraries/python/NATUtil.py111
-rw-r--r--resources/libraries/python/NGINX/NGINXTools.py139
-rw-r--r--resources/libraries/python/NGINX/__init__.py (renamed from resources/libraries/python/parsers/__init__.py)4
-rw-r--r--resources/libraries/python/Namespaces.py2
-rw-r--r--resources/libraries/python/NginxConfigGenerator.py244
-rw-r--r--resources/libraries/python/NginxUtil.py124
-rw-r--r--resources/libraries/python/NodePath.py62
-rw-r--r--resources/libraries/python/NsimUtil.py13
-rw-r--r--resources/libraries/python/OptionString.py2
-rw-r--r--resources/libraries/python/PLRsearch/Integrator.py59
-rw-r--r--resources/libraries/python/PLRsearch/PLRsearch.py221
-rw-r--r--resources/libraries/python/PLRsearch/__init__.py2
-rw-r--r--resources/libraries/python/PLRsearch/log_plus.py8
-rw-r--r--resources/libraries/python/PLRsearch/stat_trackers.py58
-rw-r--r--resources/libraries/python/PapiExecutor.py757
-rw-r--r--resources/libraries/python/PapiHistory.py31
-rw-r--r--resources/libraries/python/PerfUtil.py82
-rw-r--r--resources/libraries/python/Policer.py16
-rw-r--r--resources/libraries/python/QATUtil.py92
-rw-r--r--resources/libraries/python/QemuManager.py3
-rw-r--r--resources/libraries/python/QemuUtils.py44
-rw-r--r--resources/libraries/python/SRv6.py11
-rw-r--r--resources/libraries/python/SchedUtils.py2
-rw-r--r--resources/libraries/python/SetupFramework.py82
-rw-r--r--resources/libraries/python/SysctlUtil.py2
-rw-r--r--resources/libraries/python/TGSetup.py2
-rw-r--r--resources/libraries/python/TRexConfigGenerator.py301
-rw-r--r--resources/libraries/python/Tap.py6
-rw-r--r--resources/libraries/python/TelemetryUtil.py99
-rw-r--r--resources/libraries/python/TestConfig.py144
-rw-r--r--resources/libraries/python/Trace.py17
-rw-r--r--resources/libraries/python/TrafficGenerator.py855
-rw-r--r--resources/libraries/python/TrafficScriptExecutor.py2
-rw-r--r--resources/libraries/python/VPPUtil.py115
-rw-r--r--resources/libraries/python/VatExecutor.py397
-rw-r--r--resources/libraries/python/VatJsonUtil.py218
-rw-r--r--resources/libraries/python/VhostUser.py4
-rw-r--r--resources/libraries/python/VppApiCrc.py99
-rw-r--r--resources/libraries/python/VppConfigGenerator.py418
-rw-r--r--resources/libraries/python/VppCounters.py2
-rw-r--r--resources/libraries/python/WireGuardUtil.py298
-rw-r--r--resources/libraries/python/autogen/Regenerator.py473
-rw-r--r--resources/libraries/python/autogen/Testcase.py36
-rw-r--r--resources/libraries/python/autogen/__init__.py2
-rwxr-xr-xresources/libraries/python/autogen/add_suite_tag.py2
-rw-r--r--resources/libraries/python/enum_util.py67
-rw-r--r--resources/libraries/python/jumpavg/__init__.py10
-rw-r--r--resources/libraries/python/jumpavg/avg_stdev_stats.py (renamed from resources/libraries/python/jumpavg/AvgStdevStats.py)58
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_group.py (renamed from resources/libraries/python/jumpavg/BitCountingGroup.py)146
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_group_list.py (renamed from resources/libraries/python/jumpavg/BitCountingGroupList.py)140
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_stats.py (renamed from resources/libraries/python/jumpavg/BitCountingStats.py)131
-rw-r--r--resources/libraries/python/jumpavg/classify.py78
-rw-r--r--resources/libraries/python/model/ExportJson.py395
-rw-r--r--resources/libraries/python/model/ExportResult.py316
-rw-r--r--resources/libraries/python/model/MemDump.py194
-rw-r--r--resources/libraries/python/model/__init__.py16
-rw-r--r--resources/libraries/python/model/parse.py112
-rw-r--r--resources/libraries/python/model/util.py69
-rw-r--r--resources/libraries/python/model/validate.py62
-rw-r--r--resources/libraries/python/parsers/JsonParser.py54
-rw-r--r--resources/libraries/python/ssh.py60
-rw-r--r--resources/libraries/python/topology.py79
140 files changed, 13841 insertions, 5170 deletions
diff --git a/resources/libraries/python/Classify.py b/resources/libraries/python/Classify.py
index 13472617f2..dd3c78f449 100644
--- a/resources/libraries/python/Classify.py
+++ b/resources/libraries/python/Classify.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -258,15 +258,15 @@ class Classify:
:param advance: For add, advance value for session. (Default value = 0)
:param action: 0: No action (by default) metadata is not used.
1: Classified IP packets will be looked up from the specified ipv4
- fib table (configured by metadata as VRF id).
- Only valid for L3 input ACL node
+ fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
2: Classified IP packets will be looked up from the specified ipv6
- fib table (configured by metadata as VRF id).
- Only valid for L3 input ACL node
+ fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
3: Classified packet will be steered to source routing policy of
- given index (in metadata).
- This is only valid for IPv6 packets redirected to a source
- routing node.
+ given index (in metadata).
+ This is only valid for IPv6 packets redirected to a source
+ routing node.
:param metadata: Valid only if action != 0. VRF id if action is 1 or 2.
SR policy index if action is 3. (Default value = 0)
:type node: dict
@@ -453,15 +453,15 @@ class Classify:
:param opaque_index: opaque_index of new session. (Default value = ~0)
:param action: 0: No action (by default) metadata is not used.
1: Classified IP packets will be looked up from the specified ipv4
- fib table (configured by metadata as VRF id).
- Only valid for L3 input ACL node
+ fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
2: Classified IP packets will be looked up from the specified ipv6
- fib table (configured by metadata as VRF id).
- Only valid for L3 input ACL node
+ fib table (configured by metadata as VRF id).
+ Only valid for L3 input ACL node
3: Classified packet will be steered to source routing policy of
- given index (in metadata).
- This is only valid for IPv6 packets redirected to a source
- routing node.
+ given index (in metadata).
+ This is only valid for IPv6 packets redirected to a source
+ routing node.
:param metadata: Valid only if action != 0. VRF id if action is 1 or 2.
SR policy index if action is 3. (Default value = 0)
:type node: dict
diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py
index 1a548f45d5..70452e6ff4 100644
--- a/resources/libraries/python/Constants.py
+++ b/resources/libraries/python/Constants.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -39,7 +39,7 @@ def get_str_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: str
"""
- prefixes = (u"FDIO_CSIT_", u"CSIT_", u"")
+ prefixes = ("FDIO_CSIT_", "CSIT_", "")
if not isinstance(env_var_names, (list, tuple)):
env_var_names = [env_var_names]
for name in env_var_names:
@@ -62,7 +62,7 @@ def get_int_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: int
"""
- env_str = get_str_from_env(env_var_names, u"")
+ env_str = get_str_from_env(env_var_names, "")
try:
return int(env_str)
except ValueError:
@@ -81,7 +81,7 @@ def get_float_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: float
"""
- env_str = get_str_from_env(env_var_names, u"")
+ env_str = get_str_from_env(env_var_names, "")
try:
return float(env_str)
except ValueError:
@@ -98,8 +98,8 @@ def get_pessimistic_bool_from_env(env_var_names):
:returns: The value read, or False.
:rtype: bool
"""
- env_str = get_str_from_env(env_var_names, u"").lower()
- return bool(env_str in (u"true", u"yes", u"y", u"1"))
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str in ("true", "yes", "y", "1"))
def get_optimistic_bool_from_env(env_var_names):
@@ -112,50 +112,54 @@ def get_optimistic_bool_from_env(env_var_names):
:returns: The value read, or True.
:rtype: bool
"""
- env_str = get_str_from_env(env_var_names, u"").lower()
- return bool(env_str not in (u"false", u"no", u"n", u"0"))
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str not in ("false", "no", "n", "0"))
class Constants:
- """Constants used in CSIT.
+ """Constants used in CSIT."""
- TODO: Yaml files are easier for humans to edit.
- Figure out how to set the attributes by parsing a file
- that works regardless of current working directory.
- """
+ # Version for CSIT data model. See docs/model/.
+ MODEL_VERSION = "1.5.1"
+
+ # Global off-switch in case JSON export is large or slow.
+ EXPORT_JSON = get_optimistic_bool_from_env("EXPORT_JSON")
# OpenVPP testing directory location at topology nodes
- REMOTE_FW_DIR = u"/tmp/openvpp-testing"
+ REMOTE_FW_DIR = "/tmp/openvpp-testing"
# shell scripts location
- RESOURCES_LIB_SH = u"resources/libraries/bash"
+ RESOURCES_LIB_SH = "resources/libraries/bash"
- # Python API provider location
- RESOURCES_PAPI_PROVIDER = u"resources/tools/papi/vpp_papi_provider.py"
+ # python scripts location
+ RESOURCES_LIB_PY = "resources/libraries/python"
- # vat templates location
- RESOURCES_TPL_VAT = u"resources/templates/vat"
+ # shell scripts location
+ RESOURCES_TOOLS = "resources/tools"
- # Kubernetes templates location
- RESOURCES_TPL_K8S = u"resources/templates/kubernetes"
+ # Python API provider location
+ RESOURCES_PAPI_PROVIDER = "resources/tools/papi/vpp_papi_provider.py"
# Templates location
- RESOURCES_TPL = u"resources/templates"
+ RESOURCES_TPL = "resources/templates"
+
+ # Kubernetes templates location
+ RESOURCES_TPL_K8S = "resources/templates/kubernetes"
# Container templates location
- RESOURCES_TPL_CONTAINER = u"resources/templates/container"
+ RESOURCES_TPL_CONTAINER = "resources/templates/container"
# VPP Communications Library templates location
- RESOURCES_TPL_VCL = u"resources/templates/vcl"
+ RESOURCES_TPL_VCL = "resources/templates/vcl"
- # VPP Communications Library LD_PRELOAD library
- VCL_LDPRELOAD_LIBRARY = u"/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
+ # VPP Communications Library templates location
+ RESOURCES_TPL_TELEMETRY = "resources/templates/telemetry"
- # OpenVPP VAT binary name
- VAT_BIN_NAME = u"vpp_api_test"
+ # VPP Communications Library LD_PRELOAD library
+ VCL_LDPRELOAD_LIBRARY = "/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
# VPP service unit name
- VPP_UNIT = u"vpp"
+ VPP_UNIT = "vpp"
# Number of system CPU cores.
CPU_CNT_SYSTEM = 1
@@ -164,221 +168,371 @@ class Constants:
CPU_CNT_MAIN = 1
# QEMU binary path
- QEMU_BIN_PATH = u"/usr/bin"
+ QEMU_BIN_PATH = "/usr/bin"
# QEMU VM kernel image path
- QEMU_VM_KERNEL = u"/opt/boot/vmlinuz"
+ QEMU_VM_KERNEL = "/opt/boot/vmlinuz"
# QEMU VM kernel initrd path
- QEMU_VM_KERNEL_INITRD = u"/opt/boot/initrd.img"
+ QEMU_VM_KERNEL_INITRD = "/opt/boot/initrd.img"
# QEMU VM nested image path
- QEMU_VM_IMAGE = u"/var/lib/vm/image.iso"
+ QEMU_VM_IMAGE = "/var/lib/vm/image.iso"
# QEMU VM DPDK path
- QEMU_VM_DPDK = u"/opt/dpdk-20.02"
+ QEMU_VM_DPDK = "/opt/dpdk-23.11"
# Docker container SUT image
- DOCKER_SUT_IMAGE_UBUNTU = u"csit_sut-ubuntu1804:local"
+ DOCKER_SUT_IMAGE_UBUNTU = "csit_sut-ubuntu2204:local"
# Docker container arm SUT image
- DOCKER_SUT_IMAGE_UBUNTU_ARM = u"csit_sut-ubuntu1804:local"
+ DOCKER_SUT_IMAGE_UBUNTU_ARM = "csit_sut-ubuntu2204:local"
- # TRex install directory
- TREX_INSTALL_DIR = u"/opt/trex-core-2.86"
+ # TRex install directory.
+ TREX_INSTALL_DIR = "/opt/trex-core-3.03"
- # TODO: Find the right way how to use it in trex profiles
- # TRex pcap files directory
+ # TRex pcap files directory.
TREX_PCAP_DIR = f"{TREX_INSTALL_DIR}/scripts/avl"
# TRex limit memory.
- TREX_LIMIT_MEMORY = get_int_from_env(u"TREX_LIMIT_MEMORY", 8192)
+ TREX_LIMIT_MEMORY = get_int_from_env("TREX_LIMIT_MEMORY", 8192)
+
+ # TRex limit memory in case multiple dual interfaces configurations.
+ TREX_LIMIT_MEMORY_MULTI = get_int_from_env("TREX_LIMIT_MEMORY_MULTI", 16384)
+
+ # TRex number of cores.
+ TREX_CORE_COUNT = get_int_from_env("TREX_CORE_COUNT", 16)
+
+ # TRex number of cores in case multiple dual interface configurations.
+ TREX_CORE_COUNT_MULTI = get_int_from_env("TREX_CORE_COUNT_MULTI", 8)
+
+ # TRex set number of RX/TX descriptors.
+ # Set to 0 to use default values.
+ TREX_TX_DESCRIPTORS_COUNT = get_int_from_env(
+ "TREX_TX_DESCRIPTORS_COUNT", 0
+ )
- # TRex number of cores
- TREX_CORE_COUNT = get_int_from_env(u"TREX_CORE_COUNT", 8)
+ TREX_RX_DESCRIPTORS_COUNT = get_int_from_env(
+ "TREX_RX_DESCRIPTORS_COUNT", 0
+ )
- # Trex force start regardless ports state
- TREX_SEND_FORCE = get_pessimistic_bool_from_env(u"TREX_SEND_FORCE")
+ # Trex force start regardless ports state.
+ TREX_SEND_FORCE = get_pessimistic_bool_from_env("TREX_SEND_FORCE")
- # TRex extra commandline arguments
+ # TRex extra commandline arguments.
TREX_EXTRA_CMDLINE = get_str_from_env(
- u"TREX_EXTRA_CMDLINE", u"--mbuf-factor 32")
+ "TREX_EXTRA_CMDLINE", "--mbuf-factor 32"
+ )
+
+ # TRex port driver default vfio-pci or set to igb_uio.
+ TREX_PORT_DRIVER = get_str_from_env(
+ "TREX_PORT_DRIVER", "vfio-pci"
+ )
- # graph node variant value
- GRAPH_NODE_VARIANT = get_str_from_env(
- u"GRAPH_NODE_VARIANT", u"")
+ # Graph node variant value
+ GRAPH_NODE_VARIANT = get_str_from_env("GRAPH_NODE_VARIANT", "")
+
+ # Default memory page size in case multiple configured in system
+ DEFAULT_HUGEPAGE_SIZE = get_str_from_env("DEFAULT_HUGEPAGE_SIZE", "2M")
# Sysctl kernel.core_pattern
- KERNEL_CORE_PATTERN = u"/tmp/%p-%u-%g-%s-%t-%h-%e.core"
+ KERNEL_CORE_PATTERN = "/tmp/%p-%u-%g-%s-%t-%h-%e.core"
# Core dump directory
- CORE_DUMP_DIR = u"/tmp"
+ CORE_DUMP_DIR = "/tmp"
# Perf stat events (comma separated).
PERF_STAT_EVENTS = get_str_from_env(
- u"PERF_STAT_EVENTS",
- u"cpu-clock,context-switches,cpu-migrations,page-faults,"
- u"cycles,instructions,branches,branch-misses,L1-icache-load-misses")
+ "PERF_STAT_EVENTS",
+ "cpu-clock,context-switches,cpu-migrations,page-faults,"
+ "cycles,instructions,branches,branch-misses,L1-icache-load-misses")
# Equivalent to ~0 used in vpp code
BITWISE_NON_ZERO = 0xffffffff
# Default path to VPP API socket.
- SOCKSVR_PATH = u"/run/vpp/api.sock"
+ SOCKSVR_PATH = "/run/vpp/api.sock"
+
+ # Default path to VPP CLI socket.
+ SOCKCLI_PATH = "/run/vpp/cli.sock"
+
+ # Default path to VPP API Stats socket.
+ SOCKSTAT_PATH = "/run/vpp/stats.sock"
+
+ # This MTU value is used to force VPP to fragment 1518B packet into two.
+ MTU_FOR_FRAGMENTATION = 1043
# Number of trials to execute in MRR test.
- PERF_TRIAL_MULTIPLICITY = get_int_from_env(u"PERF_TRIAL_MULTIPLICITY", 10)
+ PERF_TRIAL_MULTIPLICITY = get_int_from_env("PERF_TRIAL_MULTIPLICITY", 10)
- # Duration of one trial in MRR test.
- PERF_TRIAL_DURATION = get_float_from_env(u"PERF_TRIAL_DURATION", 1.0)
+ # Duration [s] of one trial in MRR test.
+ PERF_TRIAL_DURATION = get_float_from_env("PERF_TRIAL_DURATION", 1.0)
# Whether to use latency streams in main search trials.
- PERF_USE_LATENCY = get_pessimistic_bool_from_env(u"PERF_USE_LATENCY")
+ PERF_USE_LATENCY = get_pessimistic_bool_from_env("PERF_USE_LATENCY")
# Duration of one latency-specific trial in NDRPDR test.
PERF_TRIAL_LATENCY_DURATION = get_float_from_env(
- u"PERF_TRIAL_LATENCY_DURATION", 5.0)
+ "PERF_TRIAL_LATENCY_DURATION", 5.0)
+
+ # For some testbeds TG takes longer than usual to start sending traffic.
+ # This constant [s] allows longer wait, without affecting
+ # the approximate duration. For example, use 0.098 for AWS.
+ PERF_TRIAL_STL_DELAY = get_float_from_env("PERF_TRIAL_STL_DELAY", 0.0)
+
+ # ASTF usually needs a different value for the delay.
+ PERF_TRIAL_ASTF_DELAY = get_float_from_env(
+ "PERF_TRIAL_ASTF_DELAY", 0.112
+ )
+
+ # Number of data frames in TPUT transaction, used both by TCP and UDP.
+ # The value should be 33 to keep historic continuity for UDP TPUT tests,
+ # but we are limited by TRex window of 48 KiB, so for 9000B tests
+ # it means we can send only 5 full data frames in a burst.
+ # https://github.com/cisco-system-traffic-generator/
+ # trex-core/blob/v2.88/src/44bsd/tcp_var.h#L896-L903
+ ASTF_N_DATA_FRAMES = get_int_from_env("ASTF_N_DATA_FRAMES", 5)
# Extended debug (incl. vpp packet trace, linux perf stat, ...).
# Full list is available as suite variable (__init__.robot) or is
# override by test.
- EXTENDED_DEBUG = get_pessimistic_bool_from_env(u"EXTENDED_DEBUG")
+ EXTENDED_DEBUG = get_pessimistic_bool_from_env("EXTENDED_DEBUG")
# UUID string of DUT1 /tmp volume created outside of the
# DUT1 docker in case of vpp-device test. ${EMPTY} value means that
# /tmp directory is inside the DUT1 docker.
- DUT1_UUID = get_str_from_env(u"DUT1_UUID", u"")
-
- # Default path to VPP API Stats socket.
- SOCKSTAT_PATH = u"/run/vpp/stats.sock"
+ DUT1_UUID = get_str_from_env("DUT1_UUID", "")
# Global "kill switch" for CRC checking during runtime.
FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env(
- u"FAIL_ON_CRC_MISMATCH"
+ "FAIL_ON_CRC_MISMATCH"
)
# Default IP4 prefix length (if not defined in topology file)
- DEFAULT_IP4_PREFIX_LENGTH = u"24"
+ DEFAULT_IP4_PREFIX_LENGTH = "24"
# Maximum number of interfaces in a data path
DATAPATH_INTERFACES_MAX = 100
# Mapping from NIC name to its bps limit.
NIC_NAME_TO_BPS_LIMIT = {
- u"Cisco-VIC-1227": 10000000000,
- u"Cisco-VIC-1385": 24500000000,
- u"Intel-X520-DA2": 10000000000,
- u"Intel-X553": 10000000000,
- u"Intel-X710": 10000000000,
- u"Intel-XL710": 24500000000,
- u"Intel-XXV710": 24500000000,
- u"Intel-E810CQ": 100000000000,
- u"Mellanox-CX556A": 100000000000,
- u"Amazon-Nitro-50G": 10000000000,
- u"virtual": 100000000,
+ "Intel-X520-DA2": 10000000000,
+ "Intel-X710": 10000000000,
+ "Intel-XL710": 24500000000,
+ "Intel-XXV710": 24500000000,
+ "Intel-E810XXV": 24500000000,
+ "Intel-E822CQ": 24500000000,
+ "Intel-E823C": 24500000000,
+ "Intel-E810CQ": 100000000000,
+ "Mellanox-CX556A": 100000000000,
+ "Mellanox-CX6DX": 100000000000,
+ "Mellanox-CX7VEAT": 200000000000,
+ "Amazon-Nitro-50G": 10000000000,
+ "Amazon-Nitro-100G": 10000000000,
+ "Amazon-Nitro-200G": 16000000000,
+ "virtual": 100000000,
}
# Mapping from NIC name to its pps limit.
NIC_NAME_TO_PPS_LIMIT = {
- u"Cisco-VIC-1227": 14880952,
- u"Cisco-VIC-1385": 18750000,
- u"Intel-X520-DA2": 14880952,
- u"Intel-X553": 14880952,
- u"Intel-X710": 14880952,
- u"Intel-XL710": 18750000,
- u"Intel-XXV710": 18750000,
- u"Intel-E810CQ": 58500000,
- # 2n-clx testbeds show duration stretching on high rates,
- # depending on encapsulation TRex has to generate.
- # 40 Mpps is still too much for dot1q (~8% stretching).
- # 36 Mpps is around the maximal VPP throughput (l2patch 4c8t).
- # Vxlan traffic will still show stretching at 36 Mpps (>12%),
- # but we do not care about those tests that much.
- u"Mellanox-CX556A": 36000000, # 148809523,
- u"Amazon-Nitro-50G": 1500000,
- u"virtual": 14880952,
+ "Intel-X520-DA2": 14880952,
+ "Intel-X710": 14880952,
+ "Intel-XL710": 18750000,
+ "Intel-XXV710": 18750000,
+ "Intel-E810XXV": 29000000,
+ "Intel-E822CQ": 29000000,
+ "Intel-E823C": 29000000,
+ "Intel-E810CQ": 58500000,
+ "Mellanox-CX556A": 148809523,
+ "Mellanox-CX6DX": 148809523,
+ "Mellanox-CX7VEAT": 297619046,
+ "Amazon-Nitro-50G": 1500000,
+ "Amazon-Nitro-100G": 3000000,
+ "Amazon-Nitro-200G": 6000000,
+ "virtual": 14880952,
}
# Suite file names use codes for NICs.
NIC_NAME_TO_CODE = {
- u"Cisco-VIC-1227": u"10ge2p1vic1227",
- u"Cisco-VIC-1385": u"40ge2p1vic1385",
- u"Intel-X520-DA2": u"10ge2p1x520",
- u"Intel-X553": u"10ge2p1x553",
- u"Intel-X710": u"10ge2p1x710",
- u"Intel-XL710": u"40ge2p1xl710",
- u"Intel-XXV710": u"25ge2p1xxv710",
- u"Intel-E810CQ": u"100ge2p1e810cq",
- u"Amazon-Nitro-50G": u"50ge1p1ENA",
- u"Mellanox-CX556A": u"100ge2p1cx556a",
+ "Intel-X520-DA2": "10ge2p1x520",
+ "Intel-X710": "10ge2p1x710",
+ "Intel-XL710": "40ge2p1xl710",
+ "Intel-XXV710": "25ge2p1xxv710",
+ "Intel-E810XXV": "25ge2p1e810xxv",
+ "Intel-E822CQ": "25ge2p1e822cq",
+ "Intel-E823C": "25ge2p1e823c",
+ "Intel-E810CQ": "100ge2p1e810cq",
+ "Amazon-Nitro-50G": "50ge1p1ena",
+ "Amazon-Nitro-100G": "100ge1p1ena",
+ "Amazon-Nitro-200G": "200ge1p1ena",
+ "Mellanox-CX556A": "100ge2p1cx556a",
+ "Mellanox-CX6DX": "100ge2p1cx6dx",
+ "Mellanox-CX7VEAT": "200ge2p1cx7veat",
+ "Mellanox-CX7VEAT": "200ge6p3cx7veat",
+ "virtual": "1ge1p82540em",
+ }
+ NIC_CODE_TO_NAME = {
+ "10ge2p1x520": "Intel-X520-DA2",
+ "10ge2p1x710": "Intel-X710",
+ "40ge2p1xl710": "Intel-XL710",
+ "25ge2p1xxv710": "Intel-XXV710",
+ "25ge2p1e810xxv": "Intel-E810XXV",
+ "25ge2p1e822cq": "Intel-E822CQ",
+ "25ge2p1e823c": "Intel-E823C",
+ "100ge2p1e810cq": "Intel-E810CQ",
+ "50ge1p1ena": "Amazon-Nitro-50G",
+ "100ge1p1ena": "Amazon-Nitro-100G",
+ "200ge1p1ena": "Amazon-Nitro-200G",
+ "100ge2p1cx556a": "Mellanox-CX556A",
+ "100ge2p1cx6dx": "Mellanox-CX6DX",
+ "200ge2p1cx7veat": "Mellanox-CX7VEAT",
+ "200ge6p3cx7veat": "Mellanox-CX7VEAT",
+ "1ge1p82540em": "virtual",
+ }
+
+ # Shortened lowercase NIC model name, useful for presentation.
+ NIC_CODE_TO_SHORT_NAME = {
+ "10ge2p1x520": "x520",
+ "10ge2p1x710": "x710",
+ "40ge2p1xl710": "xl710",
+ "25ge2p1xxv710": "xxv710",
+ "25ge2p1e810xxv": "e810xxv",
+ "25ge2p1e822cq": "e822cq",
+ "25ge2p1e823c": "e823c",
+ "100ge2p1e810cq": "e810cq",
+ "50ge1p1ena": "ena",
+ "100ge1p1ena": "ena100",
+ "200ge1p1ena": "ena200",
+ "100ge2p1cx556a": "cx556a",
+ "100ge2p1cx6dx": "cx6dx",
+ "200ge2p1cx7veat": "cx7veat",
+ "200ge6p3cx7veat": "cx7veat",
+ "1ge1p82540em": "82540em",
}
# Not each driver is supported by each NIC.
NIC_NAME_TO_DRIVER = {
- u"Cisco-VIC-1227": [u"vfio-pci"],
- u"Cisco-VIC-1385": [u"vfio-pci"],
- u"Intel-X520-DA2": [u"vfio-pci"],
- u"Intel-X553": [u"vfio-pci"],
- u"Intel-X710": [u"vfio-pci", u"avf"],
- u"Intel-XL710": [u"vfio-pci", u"avf"],
- u"Intel-XXV710": [u"vfio-pci", u"avf"],
- u"Intel-E810CQ": [u"vfio-pci", u"avf"],
- u"Amazon-Nitro-50G": [u"vfio-pci"],
- u"Mellanox-CX556A": [u"rdma-core"],
+ "Intel-X520-DA2": ["vfio-pci", "af_xdp"],
+ "Intel-X710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-XL710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-XXV710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E810XXV": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E822CQ": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E823C": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E810CQ": ["vfio-pci", "avf", "af_xdp"],
+ "Amazon-Nitro-50G": ["vfio-pci"],
+ "Amazon-Nitro-100G": ["vfio-pci"],
+ "Amazon-Nitro-200G": ["vfio-pci"],
+ "Mellanox-CX556A": ["rdma-core", "mlx5_core", "af_xdp"],
+ "Mellanox-CX6DX": ["rdma-core", "mlx5_core", "af_xdp"],
+ "Mellanox-CX7VEAT": ["rdma-core", "mlx5_core", "af_xdp"],
+ "virtual": ["vfio-pci"],
}
- # Each driver needs different prugin to work.
+ # Each driver needs different plugin to work.
NIC_DRIVER_TO_PLUGINS = {
- u"vfio-pci": u"dpdk_plugin.so",
- u"avf": u"avf_plugin.so",
- u"rdma-core": u"rdma_plugin.so",
+ "vfio-pci": "dpdk_plugin.so",
+ "avf": "avf_plugin.so",
+ "rdma-core": "rdma_plugin.so",
+ "mlx5_core": "dpdk_plugin.so",
+ "af_xdp": "af_xdp_plugin.so",
}
# Tags to differentiate tests for different NIC driver.
NIC_DRIVER_TO_TAG = {
- u"vfio-pci": u"DRV_VFIO_PCI",
- u"avf": u"DRV_AVF",
- u"rdma-core": u"DRV_RDMA_CORE",
+ "vfio-pci": "DRV_VFIO_PCI",
+ "avf": "DRV_AVF",
+ "rdma-core": "DRV_RDMA_CORE",
+ "mlx5_core": "DRV_MLX5_CORE",
+ "af_xdp": "DRV_AF_XDP",
}
# Suite names have to be different, add prefix.
NIC_DRIVER_TO_SUITE_PREFIX = {
- u"vfio-pci": u"",
- u"avf": u"avf-",
- u"rdma-core": u"rdma-",
+ "vfio-pci": "",
+ "avf": "avf-",
+ "rdma-core": "rdma-",
+ "mlx5_core": "mlx5-",
+ "af_xdp": "af-xdp-",
}
# Number of virtual functions of physical nic.
NIC_DRIVER_TO_VFS = {
- u"vfio-pci": u"nic_vfs}= | 0",
- u"avf": u"nic_vfs}= | 1",
- u"rdma-core": u"nic_vfs}= | 0",
+ "vfio-pci": "nic_vfs}= | 0",
+ "avf": "nic_vfs}= | 1",
+ "rdma-core": "nic_vfs}= | 0",
+ "mlx5_core": "nic_vfs}= | 0",
+ "af_xdp": "nic_vfs}= | 0",
+ }
+
+ # Number of physical interfaces of physical nic.
+ NIC_CODE_TO_PFS = {
+ "10ge2p1x520": "nic_pfs}= | 2",
+ "10ge2p1x710": "nic_pfs}= | 2",
+ "40ge2p1xl710": "nic_pfs}= | 2",
+ "25ge2p1xxv710": "nic_pfs}= | 2",
+ "25ge2p1e810xxv": "nic_pfs}= | 2",
+ "25ge2p1e822cq": "nic_pfs}= | 2",
+ "25ge2p1e823c": "nic_pfs}= | 2",
+ "100ge2p1e810cq": "nic_pfs}= | 2",
+ "50ge1p1ena": "nic_pfs}= | 2",
+ "100ge1p1ena": "nic_pfs}= | 2",
+ "200ge1p1ena": "nic_pfs}= | 2",
+ "100ge2p1cx556a": "nic_pfs}= | 2",
+ "100ge2p1cx6dx": "nic_pfs}= | 2",
+ "200ge2p1cx7veat": "nic_pfs}= | 2",
+ "200ge6p3cx7veat": "nic_pfs}= | 6",
+ "1ge1p82540em": "nic_pfs}= | 2",
+ }
+
+ NIC_CODE_TO_CORESCALE = {
+ "10ge2p1x520": 1,
+ "10ge2p1x710": 1,
+ "40ge2p1xl710": 1,
+ "25ge2p1xxv710": 1,
+ "25ge2p1e810xxv": 1,
+ "25ge2p1e822cq": 1,
+ "25ge2p1e823c": 1,
+ "100ge2p1e810cq": 1,
+ "50ge1p1ena": 1,
+ "100ge1p1ena": 1,
+ "200ge1p1ena": 1,
+ "100ge2p1cx556a": 1,
+ "100ge2p1cx6dx": 1,
+ "200ge2p1cx7veat": 1,
+ "200ge6p3cx7veat": 3,
+ "1ge1p82540em": 1,
}
# Not each driver is supported by each NIC.
DPDK_NIC_NAME_TO_DRIVER = {
- u"Cisco-VIC-1227": [u"vfio-pci"],
- u"Cisco-VIC-1385": [u"vfio-pci"],
- u"Intel-X520-DA2": [u"vfio-pci"],
- u"Intel-X553": [u"vfio-pci"],
- u"Intel-X710": [u"vfio-pci"],
- u"Intel-XL710": [u"vfio-pci"],
- u"Intel-XXV710": [u"vfio-pci"],
- u"Intel-E810CQ": [u"vfio-pci"],
- u"Amazon-Nitro-50G": [u"vfio-pci"],
- u"Mellanox-CX556A": [u"mlx5_core"],
+ "Intel-X520-DA2": ["vfio-pci"],
+ "Intel-X710": ["vfio-pci"],
+ "Intel-XL710": ["vfio-pci"],
+ "Intel-XXV710": ["vfio-pci"],
+ "Intel-E810XXV": ["vfio-pci"],
+ "Intel-E822CQ": ["vfio-pci"],
+ "Intel-E823C": ["vfio-pci"],
+ "Intel-E810CQ": ["vfio-pci"],
+ "Amazon-Nitro-50G": ["vfio-pci"],
+ "Amazon-Nitro-100G": ["vfio-pci"],
+ "Amazon-Nitro-200G": ["vfio-pci"],
+ "Mellanox-CX556A": ["mlx5_core"],
+ "Mellanox-CX6DX": ["mlx5_core"],
+ "Mellanox-CX7VEAT": ["mlx5_core"],
+ "virtual": ["vfio-pci"],
}
# Tags to differentiate tests for different NIC driver.
DPDK_NIC_DRIVER_TO_TAG = {
- u"vfio-pci": u"DRV_VFIO_PCI",
- u"mlx5_core": u"DRV_MLX5_CORE",
+ "vfio-pci": "DRV_VFIO_PCI",
+ "mlx5_core": "DRV_MLX5_CORE",
}
# Suite names have to be different, add prefix.
DPDK_NIC_DRIVER_TO_SUITE_PREFIX = {
- u"vfio-pci": u"",
- u"mlx5_core": u"mlx5-",
+ "vfio-pci": "",
+ "mlx5_core": "mlx5-",
}
# Some identifiers constructed from suite names
@@ -394,37 +548,42 @@ class Constants:
# TODO CSIT-1481: Crypto HW should be read from topology file instead.
NIC_NAME_TO_CRYPTO_HW = {
- u"Intel-X553": u"HW_C3xxx",
- u"Intel-X710": u"HW_DH895xcc",
- u"Intel-XL710": u"HW_DH895xcc",
+ "Intel-E810CQ": "HW_4xxx",
+ "Intel-E822CQ": "HW_C4xxx",
+ "Intel-E823C": "HW_C4xxx",
+ "Intel-X710": "HW_DH895xcc",
+ "Intel-XL710": "HW_DH895xcc",
+ }
+
+ DEVICE_TYPE_TO_KEYWORD = {
+ "scapy": None
}
PERF_TYPE_TO_KEYWORD = {
- u"mrr": u"Traffic should pass with maximum rate",
- u"ndrpdr": u"Find NDR and PDR intervals using optimized search",
- u"soak": u"Find critical load using PLRsearch",
+ "mrr": "Traffic should pass with maximum rate",
+ "ndrpdr": "Find NDR and PDR intervals using optimized search",
+ "soak": "Find critical load using PLRsearch",
}
PERF_TYPE_TO_SUITE_DOC_VER = {
- u"mrr": u'''fication:* In MaxReceivedRate tests TG sends traffic\\
-| ... | at line rate and reports total received packets over trial period.\\''',
- # TODO: Figure out how to include the full "*[Ver] TG verification:*"
- # while keeping this readable and without breaking line length limit.
- u"ndrpdr": u'''ication:* TG finds and reports throughput NDR (Non Drop\\
-| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\\
-| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\\
-| ... | of packets transmitted. NDR and PDR are discovered for different\\
-| ... | Ethernet L2 frame sizes using MLRsearch library.\\''',
- u"soak": u'''fication:* TG sends traffic at dynamically computed\\
-| ... | rate as PLRsearch algorithm gathers data and improves its estimate\\
-| ... | of a rate at which a prescribed small fraction of packets\\
-| ... | would be lost. After set time, the serarch stops\\
-| ... | and the algorithm reports its current estimate.\\''',
+ "mrr": u'''fication:** In MaxReceivedRate tests TG sends traffic at \\
+| ... | line rate and reports total received packets over trial period. \\''',
+ "ndrpdr": u'''rification:** TG finds and reports throughput NDR (Non \\
+| ... | Drop Rate) with zero packet loss tolerance and throughput PDR \\
+| ... | (Partial Drop Rate) with non-zero packet loss tolerance (LT) \\
+| ... | expressed in percentage of packets transmitted. NDR and PDR are \\
+| ... | discovered for different Ethernet L2 frame sizes using MLRsearch \\
+| ... | library.''',
+ "soak": u'''rification:** TG sends traffic at dynamically computed \\
+| ... | rate as PLRsearch algorithm gathers data and improves its estimate \\
+| ... | of a rate at which a prescribed small fraction of packets \\
+| ... | would be lost. After set time, the serarch stops \\
+| ... | and the algorithm reports its current estimate. \\''',
}
PERF_TYPE_TO_TEMPLATE_DOC_VER = {
- u"mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames\\
-| | ... | using burst trials throughput test.\\''',
- u"ndrpdr": u"Measure NDR and PDR values using MLRsearch algorithm.\\",
- u"soak": u"Estimate critical rate using PLRsearch algorithm.\\",
+ "mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames \\
+| | ... | using burst trials throughput test. \\''',
+ "ndrpdr": "Measure NDR and PDR values using MLRsearch algorithm.",
+ "soak": "Estimate critical rate using PLRsearch algorithm. \\",
}
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index 3d70684695..fc32248f6b 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -205,8 +205,8 @@ class ContainerManager:
dut_cnt = len(
Counter(
[
- self.containers[container].node[u"host"]
- for container in self.containers
+ f"{container.node['host']}{container.node['port']}"
+ for container in self.containers.values()
]
)
)
@@ -256,6 +256,11 @@ class ContainerManager:
self._configure_vpp_chain_ipsec(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, nf_instance=idx, **kwargs)
+ elif chain_topology == u"chain_dma":
+ self._configure_vpp_chain_dma(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
else:
raise RuntimeError(
f"Container topology {chain_topology} not implemented"
@@ -278,6 +283,25 @@ class ContainerManager:
f"{self.engine.container.name}-{kwargs[u'sid2']}"
)
+ def _configure_vpp_chain_dma(self, **kwargs):
+ """Configure VPP in chain topology with l2xc (dma).
+
+ :param kwargs: Named parameters.
+ :type kwargs: dict
+ """
+ dma_wqs = kwargs[f"dma_wqs"]
+ self.engine.create_vpp_startup_config_dma(dma_wqs)
+
+ self.engine.create_vpp_exec_config(
+ u"memif_create_chain_dma.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
+ socket1=f"{kwargs[u'guest_dir']}/memif-"
+ f"{self.engine.container.name}-{kwargs[u'sid1']}",
+ socket2=f"{kwargs[u'guest_dir']}/memif-"
+ f"{self.engine.container.name}-{kwargs[u'sid2']}"
+ )
+
def _configure_vpp_cross_horiz(self, **kwargs):
"""Configure VPP in cross horizontal topology (single memif).
@@ -581,14 +605,19 @@ class ContainerEngine:
def start_vpp(self, verify=True):
"""Start VPP inside a container."""
self.execute(
- u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
- u">/tmp/vppd.log 2>&1 < /dev/null &")
+ u"/usr/bin/vpp -c /etc/vpp/startup.conf")
topo_instance = BuiltIn().get_library_instance(
u"resources.libraries.python.topology.Topology"
)
topo_instance.add_new_socket(
self.container.node,
+ SocketType.CLI,
+ self.container.name,
+ self.container.cli_socket,
+ )
+ topo_instance.add_new_socket(
+ self.container.node,
SocketType.PAPI,
self.container.name,
self.container.api_socket,
@@ -630,7 +659,7 @@ class ContainerEngine:
# Execute puts the command into single quotes,
# so inner arguments are enclosed in qouble quotes here.
self.execute(
- u'vppctl show pci 2>&1 | '
+ u'/usr/bin/vppctl show pci 2>&1 | '
u'fgrep -v "Connection refused" | '
u'fgrep -v "No such file or directory"'
)
@@ -688,7 +717,6 @@ class ContainerEngine:
vpp_config = VppConfigGenerator()
vpp_config.set_node(self.container.node)
vpp_config.add_unix_cli_listen()
- vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec(u"/tmp/running.exec")
vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
if cpuset_cpus:
@@ -700,10 +728,12 @@ class ContainerEngine:
vpp_config.add_buffers_per_numa(215040)
vpp_config.add_plugin(u"disable", u"default")
vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+ vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
vpp_config.add_main_heap_size(u"2G")
- vpp_config.add_main_heap_page_size(u"2M")
+ vpp_config.add_main_heap_page_size(self.container.page_size)
+ vpp_config.add_default_hugepage_size(self.container.page_size)
vpp_config.add_statseg_size(u"2G")
- vpp_config.add_statseg_page_size(u"2M")
+ vpp_config.add_statseg_page_size(self.container.page_size)
vpp_config.add_statseg_per_node_counters(u"on")
return vpp_config
@@ -736,6 +766,7 @@ class ContainerEngine:
vpp_config.add_dpdk_no_tx_checksum_offload()
vpp_config.add_dpdk_dev_default_rxq(rxq)
vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+ vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
# Apply configuration
self.execute(u"mkdir -p /etc/vpp/")
@@ -753,6 +784,23 @@ class ContainerEngine:
vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+ vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
+
+ # Apply configuration
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
+ )
+
+ def create_vpp_startup_config_dma(self, dma_devices):
+ """Create startup configuration of VPP DMA.
+
+ :param dma_devices: DMA devices list.
+ :type dma_devices: list
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_plugin(u"enable", u"dma_intel_plugin.so")
+ vpp_config.add_dma_dev(dma_devices)
# Apply configuration
self.execute(u"mkdir -p /etc/vpp/")
@@ -798,31 +846,19 @@ class ContainerEngine:
:raises RuntimeError: If applying cgroup settings via cgset failed.
"""
ret, _, _ = self.container.ssh.exec_command_sudo(
- u"cgset -r cpuset.cpu_exclusive=0 /"
- )
- if int(ret) != 0:
- raise RuntimeError(u"Failed to apply cgroup settings.")
-
- ret, _, _ = self.container.ssh.exec_command_sudo(
- u"cgset -r cpuset.mem_exclusive=0 /"
- )
- if int(ret) != 0:
- raise RuntimeError(u"Failed to apply cgroup settings.")
-
- ret, _, _ = self.container.ssh.exec_command_sudo(
f"cgcreate -g cpuset:/{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to copy cgroup settings from root.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- f"cgset -r cpuset.cpu_exclusive=0 /{name}"
+ f"cgset -r cpuset.cpus=0 /{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to apply cgroup settings.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- f"cgset -r cpuset.mem_exclusive=0 /{name}"
+ f"cgset -r cpuset.mems=0 /{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to apply cgroup settings.")
@@ -853,7 +889,7 @@ class LXC(ContainerEngine):
else u"amd64"
image = self.container.image if self.container.image \
- else f"-d ubuntu -r bionic -a {target_arch}"
+ else f"-d ubuntu -r jammy -a {target_arch}"
cmd = f"lxc-create -t download --name {self.container.name} " \
f"-- {image} --no-validate"
@@ -1118,8 +1154,8 @@ class Docker(ContainerEngine):
if self.container.mnt else u""
cmd = f"docker run --privileged --detach --interactive --tty --rm " \
- f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
- f"{env} {volume} --name {self.container.name} " \
+ f"--cgroup-parent docker.slice {cpuset_cpus} {cpuset_mems} " \
+ f"{publish} {env} {volume} --name {self.container.name} " \
f"{self.container.image} {command}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
@@ -1273,6 +1309,7 @@ class Container:
path = f"/tmp/vpp_sockets/{value}"
self.__dict__[u"socket_dir"] = path
self.__dict__[u"api_socket"] = f"{path}/api.sock"
+ self.__dict__[u"cli_socket"] = f"{path}/cli.sock"
self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
self.__dict__[attr] = value
else:
diff --git a/resources/libraries/python/CoreDumpUtil.py b/resources/libraries/python/CoreDumpUtil.py
index e1c7b65765..b70afa858e 100644
--- a/resources/libraries/python/CoreDumpUtil.py
+++ b/resources/libraries/python/CoreDumpUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -139,11 +139,11 @@ class CoreDumpUtil:
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
command = (
- f"for f in {Constants.CORE_DUMP_DIR}/*.core; do "
- f"sudo gdb /usr/bin/vpp ${{f}} "
- f"-ex 'source -v {Constants.REMOTE_FW_DIR}"
- f"/resources/tools/scripts/gdb-commands' -ex quit; "
- f"sudo rm -f ${{f}}; done"
+ f"for f in {Constants.CORE_DUMP_DIR}/*.core; do"
+ f" sleep 10; sudo gdb /usr/bin/vpp ${{f}}"
+ f" -ex 'source -v {Constants.REMOTE_FW_DIR}"
+ f"/resources/tools/scripts/gdb-commands' -ex quit;"
+ f" sudo rm -f ${{f}}; done"
)
try:
exec_cmd_no_error(node, command, timeout=3600)
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index f261f9421e..518469bd31 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,11 +13,13 @@
"""CPU utilities library."""
+from random import choice
+
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
__all__ = [u"CpuUtils"]
@@ -232,7 +234,7 @@ class CpuUtils:
cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
- f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
+ f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
else:
cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
@@ -245,6 +247,9 @@ class CpuUtils:
"""Return list of DUT node related list of CPU numbers. The main
computing unit is physical core count.
+ On SMT enabled DUTs, both sibling logical cores are used,
+ unless Robot variable \${smt_used} is set to False.
+
:param node: DUT node.
:param cpu_node: Numa node number.
:param nf_chains: Number of NF chains.
@@ -268,7 +273,7 @@ class CpuUtils:
:returns: List of CPUs allocated to NF.
:rtype: list
:raises RuntimeError: If we require more cpus than available or if
- placement is not possible due to wrong parameters.
+ placement is not possible due to wrong parameters.
"""
if not 1 <= nf_chain <= nf_chains:
raise RuntimeError(u"ChainID is out of range!")
@@ -276,6 +281,7 @@ class CpuUtils:
raise RuntimeError(u"NodeID is out of range!")
smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ smt_used = BuiltIn().get_variable_value("\${smt_used}", smt_used)
cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
# CPU thread sibling offset.
sib = len(cpu_list) // CpuUtils.NR_OF_THREADS
@@ -311,6 +317,36 @@ class CpuUtils:
return result
@staticmethod
+ def get_affinity_af_xdp(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for AF_XDP interface. Result will be used to pin IRQs.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to AF_XDP interface.
+ :rtype: list
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
+
+ @staticmethod
def get_affinity_nf(
nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
@@ -358,25 +394,25 @@ class CpuUtils:
@staticmethod
def get_affinity_trex(
- node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
"""Get affinity for T-Rex. Result will be used to pin T-Rex threads.
:param node: TG node.
- :param if1_pci: TG first interface.
- :param if2_pci: TG second interface.
+ :param if_key: TG first interface.
:param tg_mtc: TG main thread count.
:param tg_dtc: TG dataplane thread count.
:param tg_ltc: TG latency thread count.
+ :param tg_dtc_offset: TG dataplane thread offset.
:type node: dict
- :type if1_pci: str
- :type if2_pci: str
+ :type if_key: str
:type tg_mtc: int
:type tg_dtc: int
:type tg_ltc: int
+ :type tg_dtc_offset: int
:returns: List of CPUs allocated to T-Rex including numa node.
:rtype: int, int, int, list
"""
- interface_list = [if1_pci, if2_pci]
+ interface_list = [if_key]
cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
@@ -384,12 +420,11 @@ class CpuUtils:
smt_used=False)
threads = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+ cpu_cnt=tg_dtc, smt_used=False)
latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
return master_thread_id[0], latency_thread_id[0], cpu_node, threads
@@ -445,4 +480,125 @@ class CpuUtils:
return CpuUtils.cpu_slice_of_list_per_node(
node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ smt_used=False)
+
+ @staticmethod
+ def get_cpu_idle_list(node, cpu_node, smt_used, cpu_alloc_str, sep=u","):
+ """Get idle CPU List.
+
+ :param node: Node dictionary with cpuinfo.
+ :param cpu_node: Numa node number.
+ :param smt_used: True - we want to use SMT, otherwise false.
+ :param cpu_alloc_str: vpp used cores.
+ :param sep: Separator, default: ",".
+ :type node: dict
+ :type cpu_node: int
+ :type smt_used: bool
+ :type cpu_alloc_str: str
+ :type smt_used: bool
+ :type sep: str
+ :rtype: list
+ """
+ cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
+ cpu_idle_list = [i for i in cpu_list
+ if str(i) not in cpu_alloc_str.split(sep)]
+ return cpu_idle_list
+
+ @staticmethod
+ def get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity for vswitch on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param phy_cores: Number of physical cores to allocate.
+ :param rx_queues: Number of RX queues. (Optional, Default: None)
+ :param rxd: Number of RX descriptors. (Optional, Default: None)
+ :param txd: Number of TX descriptors. (Optional, Default: None)
+ :type nodes: dict
+ :type phy_cores: int
+ :type rx_queues: int
+ :type rxd: int
+ :type txd: int
+ :returns: Compute resource information dictionary.
+ :rtype: dict
+ """
+ compute_resource_info = dict()
+ for node_name, node in nodes.items():
+ if node["type"] != NodeType.DUT:
+ continue
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ "${dp_cores_count}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ "${fp_cores_count}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ "${rxq_ratio}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node_name}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+ smt_used=False
+ )
+ cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else ""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else ""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = \
+ int(rx_queues) if rx_queues \
+ else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info["buffers_numa"] = \
+ 215040 if smt_used else 107520
+ compute_resource_info["smt_used"] = smt_used
+ compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+ compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+ compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+ compute_resource_info[f"{node_name}_cpu_wt"] = \
+ ",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+ ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info["cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info["rxd_count_int"] = rxd
+ compute_resource_info["txd_count_int"] = txd
+ compute_resource_info["rxq_count_int"] = rxq_count_int
+ compute_resource_info["fp_count_int"] = fp_count_int
+ compute_resource_info["dp_count_int"] = dp_count_int
+
+ return compute_resource_info
diff --git a/resources/libraries/python/DMAUtil.py b/resources/libraries/python/DMAUtil.py
new file mode 100644
index 0000000000..f904ea4e3d
--- /dev/null
+++ b/resources/libraries/python/DMAUtil.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2024 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""DMA util library."""
+
+from re import search
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+
+
+class DMAUtil:
+ """Common DMA utilities"""
+
+ @staticmethod
+ def get_dma_resource(node, dma_device):
+ """Get DMA resource from DMA device.
+
+ :param node: Topology node.
+ :param dma_device: DMA device.
+ :type node: dict
+ :type dma_device: str
+ :returns: DMA resource.
+ :rtype: dict
+ """
+
+ cmd = f"grep -H . /sys/bus/pci/devices/{dma_device}/dsa*/*"
+ _, stdout, stderr = exec_cmd(node, cmd, sudo=True)
+
+ dma_info = dict()
+ dma_info["dma_device"] = dma_device
+ dma_info["engine"] = list()
+ dma_info["wq"] = list()
+ dma_info["group"] = list()
+
+ for line in stdout.split():
+ g1 = search(r"/(dsa\d+)/(.+):(.+)", line)
+ if g1 is not None:
+ dma_info["dma_name"] = g1.group(1)
+ dma_info[f"{g1.group(2)}"] = g1.group(3)
+
+ for line in stderr.split():
+ g2 = search(r"/(dsa\d+)/((engine|group|wq)\d+\.\d+)", line)
+ if g2 is not None:
+ dev_type = g2.group(3)
+ dev = g2.group(2)
+ dma_info[dev_type].append(dev)
+
+ return dma_info
+
+ @staticmethod
+ def disable_dma_device(node, dma_name):
+ """Disable DMA device.
+
+ :param node: Topology node.
+ :param dma_name: DMA name.
+ :type node: dict
+ :type dma_name: str
+ """
+ cmd = f"cat /sys/bus/dsa/devices/{dma_name}/state"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to get dma state.")
+ if stdout.strip() == "disabled":
+ return
+
+ cmd = f"accel-config disable-device -f {dma_name}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to disable DMA on DUT.")
+
+ @staticmethod
+ def enable_dma_device(node, dma_name, groups, engines, wqs, wq_size,
+ max_batch_size, max_transfer_size):
+ """Enable DMA device.
+
+ :param node: Topology node.
+ :param dma_name: DMA name.
+ :param groups: DMA groups.
+ :param engines: DMA engines.
+ :param wqs: DMA work queues.
+ :param wq_size: DMA work queue size.
+ :param max_batch_size: Wq max batch size.
+ :param max_transfer_size: Wq max transfer size.
+ :type node: dict
+ :type dma_name: str
+ :type groups: list
+ :type engines: list
+ :type wqs: list
+ :type wq_size: int
+ :type max_batch_size: int
+ :type max_transfer_size: int
+ """
+
+ # Configure Device
+ cmd = f"accel-config config-device {dma_name}"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA device on DUT.")
+
+ # Configure DMA group
+ for i, group in enumerate(groups):
+ cmd = f"accel-config config-group " \
+ f"{dma_name}/{group} --read-buffers-reserved=0"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA group on DUT.")
+
+ # Configure DMA engine
+ for i, engine in enumerate(engines):
+ cmd = f"accel-config config-engine " \
+ f"{dma_name}/{engine} --group-id={i}"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA engine on DUT.")
+
+ # Configure DMA work queue
+ for i, wq in enumerate(wqs):
+ cmd = f"accel-config config-wq {dma_name}/{wq} " \
+ f" --group-id={i%len(engines)} --type=user " \
+ f" --priority=10 --block-on-fault=1 " \
+ f" --wq-size={wq_size} --mode=dedicated " \
+ f" --name={dma_name}_{i} " \
+ f" --max-batch-size={max_batch_size} " \
+ f" --max-transfer-size={max_transfer_size} "
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA work queue on DUT.")
+
+ # Enable DMA and work queues
+ cmd = f"accel-config enable-device {dma_name}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to enable DMA device on DUT.")
+
+ dma_wqs = [f"{dma_name}/{wq}" for wq in wqs]
+ cmd = f"accel-config enable-wq {' '.join(dma_wqs)}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to enable DMA work queue on DUT.")
+
+ @staticmethod
+ def enable_dmas_and_wqs_on_dut(node, wq_num):
+ """Enable DMAs and work queues on DUT.
+
+ :param node: Topology node.
+ :param wq_num: Number of work queues.
+ :type node: dict
+ :type wq_num: int
+ :returns: DMA work queues enabled.
+ :rtype: list
+ """
+ if node["type"] == NodeType.DUT:
+ dma_devs = Topology.get_bus(node)
+
+ enabled_wqs = list()
+
+ for dev in dma_devs.values():
+ if "Intel-DSA" not in dev["model"]:
+ continue
+
+ dev_pci = dev["pci_address"]
+ dma_info = DMAUtil.get_dma_resource(node, dev_pci)
+
+ dma_name = dma_info["dma_name"]
+ groups = dma_info["group"]
+ engines = dma_info["engine"]
+ wqs = dma_info["wq"]
+ wq_num_per_dma = wq_num//len(dma_devs) if wq_num > 1 else 1
+ max_transfer_size = \
+ int(dma_info["max_transfer_size"])//wq_num_per_dma
+ wq_size = int(dma_info["max_work_queues_size"])//wq_num_per_dma
+ max_batch_size = int(dma_info["max_batch_size"])
+
+ DMAUtil.disable_dma_device(node, dma_name)
+
+ DMAUtil.enable_dma_device(node,
+ dma_name,
+ groups[:wq_num_per_dma],
+ engines[:wq_num_per_dma],
+ wqs[:wq_num_per_dma],
+ wq_size,
+ max_batch_size,
+ max_transfer_size
+ )
+ enabled_wqs += wqs[:wq_num_per_dma]
+
+ cmd = f"lspci -vvv -s {dev_pci}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ cmd = "accel-config list"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ cmd = "cat /proc/cmdline"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ return enabled_wqs
diff --git a/resources/libraries/python/DPDK/DPDKTools.py b/resources/libraries/python/DPDK/DPDKTools.py
index 9bb89968d2..83ddae8b4a 100644
--- a/resources/libraries/python/DPDK/DPDKTools.py
+++ b/resources/libraries/python/DPDK/DPDKTools.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -80,6 +80,25 @@ class DPDKTools:
exec_cmd_no_error(node, command, timeout=1200, message=message)
@staticmethod
+ def get_dpdk_version(node):
+ """Log and return the installed DPDK version.
+
+ The logged string ends with newline, the returned one is stripped.
+
+ :param node: Node from topology file.
+ :type node: dict
+ :returns: Stripped DPDK version string.
+ :rtype: str
+ :raises RuntimeError: If command returns nonzero return code.
+ """
+ command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION"
+ message = u"Get DPDK version failed!"
+ stdout, _ = exec_cmd_no_error(node, command, message=message)
+ # TODO: PAL should already tolerate stripped value in the log.
+ logger.info(f"DPDK Version: {stdout}")
+ return stdout.strip()
+
+ @staticmethod
def install_dpdk_framework(node):
"""
Prepare the DPDK framework on the DUT node.
@@ -91,13 +110,8 @@ class DPDKTools:
command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}" \
f"/entry/install_dpdk.sh"
message = u"Install the DPDK failed!"
- exec_cmd_no_error(node, command, timeout=600, message=message)
-
- command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION"
- message = u"Get DPDK version failed!"
- stdout, _ = exec_cmd_no_error(node, command, message=message)
-
- logger.info(f"DPDK Version: {stdout}")
+ exec_cmd_no_error(node, command, timeout=3600, message=message)
+ DPDKTools.get_dpdk_version(node)
@staticmethod
def install_dpdk_framework_on_all_duts(nodes):
diff --git a/resources/libraries/python/DPDK/L3fwdTest.py b/resources/libraries/python/DPDK/L3fwdTest.py
index c33810348d..178c747da5 100644
--- a/resources/libraries/python/DPDK/L3fwdTest.py
+++ b/resources/libraries/python/DPDK/L3fwdTest.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,25 +12,100 @@
# limitations under the License.
"""
-This module exists to provide the l3fwd test for DPDK on topology nodes.
+This module exists to start l3fwd on topology nodes.
"""
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DpdkUtil import DpdkUtil
from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
from resources.libraries.python.topology import NodeType, Topology
+NB_PORTS = 2
+
class L3fwdTest:
- """Test the DPDK l3fwd performance."""
+ """This class start l3fwd on topology nodes and check if properly started.
+ """
+
+ @staticmethod
+ def start_l3fwd_on_all_duts(
+ nodes, topology_info, phy_cores, rx_queues=None, jumbo_frames=False,
+ rxd=None, txd=None):
+ """
+ Execute the l3fwd on all dut nodes.
+
+ :param nodes: All the nodes info from the topology file.
+ :param topology_info: All the info from the topology file.
+ :param phy_cores: Number of physical cores to use.
+ :param rx_queues: Number of RX queues.
+ :param jumbo_frames: Jumbo frames on/off.
+ :param rxd: Number of RX descriptors.
+ :param txd: Number of TX descriptors.
+
+ :type nodes: dict
+ :type topology_info: dict
+ :type phy_cores: int
+ :type rx_queues: int
+ :type jumbo_frames: bool
+ :type rxd: int
+ :type txd: int
+ :raises RuntimeError: If bash return code is not 0.
+ """
+ cpu_count_int = dp_count_int = int(phy_cores)
+ dp_cores = cpu_count_int+1
+ tg_flip = topology_info[f"tg_if1_pci"] > topology_info[f"tg_if2_pci"]
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ if dp_count_int > 1:
+ BuiltIn().set_tags('MTHREAD')
+ else:
+ BuiltIn().set_tags('STHREAD')
+ BuiltIn().set_tags(
+ f"{dp_count_int}T{cpu_count_int}C"
+ )
+
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
+ L3fwdTest.start_l3fwd(
+ nodes, node, if1=if1, if2=if2, lcores_list=cpu_dp,
+ nb_cores=dp_count_int, queue_nums=rxq_count_int,
+ jumbo_frames=jumbo_frames, tg_flip=tg_flip
+ )
+ for node in nodes:
+ if u"DUT" in node:
+ for i in range(3):
+ try:
+ L3fwdTest.check_l3fwd(nodes[node])
+ break
+ except RuntimeError:
+ L3fwdTest.start_l3fwd(
+ nodes, nodes[node], if1=if1, if2=if2,
+ lcores_list=cpu_dp, nb_cores=dp_count_int,
+ queue_nums=rxq_count_int, jumbo_frames=jumbo_frames,
+ tg_flip=tg_flip
+ )
+ else:
+ message = f"Failed to start l3fwd at node {node}"
+ raise RuntimeError(message)
@staticmethod
def start_l3fwd(
nodes, node, if1, if2, lcores_list, nb_cores, queue_nums,
- jumbo_frames):
+ jumbo_frames, tg_flip):
"""
Execute the l3fwd on the dut_node.
+ L3fwd uses default IP forwarding table, but sorts ports by API address.
+ When that does not match the traffic profile (depends on topology),
+ the only way to fix is is to latch and recompile l3fwd app.
+
:param nodes: All the nodes info in the topology file.
:param node: DUT node.
:param if1: The test link interface 1.
@@ -40,6 +115,7 @@ class L3fwdTest:
:param queue_nums: The queues number for the NIC
:param jumbo_frames: Indication if the jumbo frames are used (True) or
not (False).
+ :param tg_flip: Whether TG ports are reordered.
:type nodes: dict
:type node: dict
:type if1: str
@@ -48,22 +124,24 @@ class L3fwdTest:
:type nb_cores: str
:type queue_nums: str
:type jumbo_frames: bool
+ :type tg_flip: bool
"""
if node[u"type"] == NodeType.DUT:
adj_mac0, adj_mac1, if_pci0, if_pci1 = L3fwdTest.get_adj_mac(
- nodes, node, if1, if2
+ nodes, node, if1, if2, tg_flip
)
- list_cores = [int(item) for item in lcores_list.split(u",")]
+ lcores = [int(item) for item in lcores_list.split(u",")]
# prepare the port config param
nb_cores = int(nb_cores)
index = 0
port_config = ''
- for port in range(0, 2):
+ for port in range(0, NB_PORTS):
for queue in range(0, int(queue_nums)):
index = 0 if nb_cores == 1 else index
- port_config += f"({port}, {queue}, {list_cores[index]}),"
+ port_config += \
+ f"({port}, {queue}, {lcores[index % NB_PORTS]}),"
index += 1
if jumbo_frames:
@@ -77,7 +155,6 @@ class L3fwdTest:
pmd_eth_dest_0=f"\\\"0,{adj_mac0}\\\"",
pmd_eth_dest_1=f"\\\"1,{adj_mac1}\\\"",
pmd_parse_ptype=True,
- pmd_enable_jumbo=jumbo_frames,
pmd_max_pkt_len=jumbo_frames
)
else:
@@ -98,20 +175,40 @@ class L3fwdTest:
message = f"Failed to execute l3fwd test at node {node['host']}"
exec_cmd_no_error(node, command, timeout=1800, message=message)
+ @staticmethod
+ def check_l3fwd(node):
+ """
+ Execute the l3fwd check on the DUT node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If the script "check_l3fwd.sh" fails.
+ """
+ if node[u"type"] == NodeType.DUT:
+ command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}"\
+ f"/entry/check_l3fwd.sh"
+ message = "L3fwd not started properly"
+ exec_cmd_no_error(node, command, timeout=1800, message=message)
@staticmethod
- def get_adj_mac(nodes, node, if1, if2):
+ def get_adj_mac(nodes, node, if1, if2, tg_flip):
"""
Get adjacency MAC addresses of the DUT node.
+ Interfaces are re-ordered according to PCI address,
+ but the need to patch and recompile also depends on TG port order.
+ "tg_flip" signals whether TG ports are reordered.
+
:param nodes: All the nodes info in the topology file.
:param node: DUT node.
:param if1: The test link interface 1.
:param if2: The test link interface 2.
+ :param tg_flip: Whether tg ports are reordered.
:type nodes: dict
:type node: dict
:type if1: str
:type if2: str
+ :type tg_flip: bool
:returns: Returns MAC addresses of adjacency DUT nodes and PCI
addresses.
:rtype: str
@@ -121,9 +218,19 @@ class L3fwdTest:
if_pci0 = Topology.get_interface_pci_addr(node, if_key0)
if_pci1 = Topology.get_interface_pci_addr(node, if_key1)
+ # Flipping routes logic:
+ # If TG and DUT ports are reordered -> flip
+ # If TG reordered and DUT not reordered -> don't flip
+ # If DUT reordered and TG not reordered -> don't flip
+ # If DUT and TG not reordered -> flip
+
# Detect which is the port 0.
- if min(if_pci0, if_pci1) != if_pci0:
+ dut_flip = if_pci0 > if_pci1
+ if dut_flip:
if_key0, if_key1 = if_key1, if_key0
+ if tg_flip:
+ L3fwdTest.patch_l3fwd(node, u"patch_l3fwd_flip_routes")
+ elif not tg_flip:
L3fwdTest.patch_l3fwd(node, u"patch_l3fwd_flip_routes")
adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface(
diff --git a/resources/libraries/python/DPDK/TestpmdTest.py b/resources/libraries/python/DPDK/TestpmdTest.py
index dd30376fd1..3baba30715 100644
--- a/resources/libraries/python/DPDK/TestpmdTest.py
+++ b/resources/libraries/python/DPDK/TestpmdTest.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -11,18 +11,100 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module implements functionality which sets L2 forwarding for DPDK on
-DUT nodes.
+"""
+This module exists to start testpmd on topology nodes.
"""
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DpdkUtil import DpdkUtil
from resources.libraries.python.ssh import exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
class TestpmdTest:
- """Setup the DPDK for testpmd performance test."""
+ """
+ This class start testpmd on topology nodes and check if properly started.
+ """
+
+ @staticmethod
+ def start_testpmd_on_all_duts(
+ nodes, topology_info, phy_cores, rx_queues=None, jumbo_frames=False,
+ rxd=None, txd=None, nic_rxq_size=None, nic_txq_size=None):
+ """
+ Start the testpmd with M worker threads and rxqueues N and jumbo
+ support frames on/off on all DUTs.
+
+ :param nodes: All the nodes info from the topology file.
+ :param topology_info: All the info from the topology file.
+ :param phy_cores: Number of physical cores to use.
+ :param rx_queues: Number of RX queues.
+ :param jumbo_frames: Jumbo frames on/off.
+ :param rxd: Number of RX descriptors.
+ :param txd: Number of TX descriptors.
+ :param nic_rxq_size: RX queue size.
+ :param nic_txq_size: TX queue size.
+
+ :type nodes: dict
+ :type topology_info: dict
+ :type phy_cores: int
+ :type rx_queues: int
+ :type jumbo_frames: bool
+ :type rxd: int
+ :type txd: int
+ :type nic_rxq_size: int
+ :type nic_txq_size: int
+ :raises RuntimeError: If bash return code is not 0.
+ """
+
+ cpu_count_int = dp_count_int = int(phy_cores)
+ dp_cores = cpu_count_int+1
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ if dp_count_int > 1:
+ BuiltIn().set_tags('MTHREAD')
+ else:
+ BuiltIn().set_tags('STHREAD')
+ BuiltIn().set_tags(
+ f"{dp_count_int}T{cpu_count_int}C"
+ )
+
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
+ TestpmdTest.start_testpmd(
+ node, if1=if1, if2=if2, lcores_list=cpu_dp,
+ nb_cores=dp_count_int, queue_nums=rxq_count_int,
+ jumbo_frames=jumbo_frames, rxq_size=nic_rxq_size,
+ txq_size=nic_txq_size
+ )
+ for node in nodes:
+ if u"DUT" in node:
+ for i in range(3):
+ try:
+ nic_model = nodes[node]["interfaces"][if1]["model"]
+ if "Mellanox-CX7VEAT" in nic_model:
+ break
+ if "Mellanox-CX6DX" in nic_model:
+ break
+ TestpmdTest.check_testpmd(nodes[node])
+ break
+ except RuntimeError:
+ TestpmdTest.start_testpmd(
+ nodes[node], if1=if1, if2=if2,
+ lcores_list=cpu_dp, nb_cores=dp_count_int,
+ queue_nums=rxq_count_int,
+ jumbo_frames=jumbo_frames,
+ rxq_size=nic_rxq_size, txq_size=nic_txq_size
+ )
+ else:
+ message = f"Failed to start testpmd at node {node}"
+ raise RuntimeError(message)
@staticmethod
def start_testpmd(
@@ -45,7 +127,7 @@ class TestpmdTest:
:type if1: str
:type if2: str
:type lcores_list: str
- :type nb_cores: str
+ :type nb_cores: int
:type queue_nums: str
:type jumbo_frames: bool
:type rxq_size: int
@@ -63,7 +145,7 @@ class TestpmdTest:
eal_pci_whitelist0=if_pci0,
eal_pci_whitelist1=if_pci1,
eal_in_memory=True,
- pmd_num_mbufs=16384,
+ pmd_num_mbufs=32768,
pmd_fwd_mode=u"io",
pmd_nb_ports=u"2",
pmd_portmask=u"0x3",
@@ -74,7 +156,7 @@ class TestpmdTest:
pmd_rxq=queue_nums,
pmd_txq=queue_nums,
pmd_nb_cores=nb_cores,
- pmd_disable_link_check=True,
+ pmd_disable_link_check=False,
pmd_auto_start=True,
pmd_numa=True
)
@@ -83,3 +165,18 @@ class TestpmdTest:
f"/entry/run_testpmd.sh \"{testpmd_args}\""
message = f"Failed to execute testpmd at node {node['host']}"
exec_cmd_no_error(node, command, timeout=1800, message=message)
+
+ @staticmethod
+ def check_testpmd(node):
+ """
+ Execute the testpmd check on the DUT node.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If the script "check_testpmd.sh" fails.
+ """
+ if node[u"type"] == NodeType.DUT:
+ command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}"\
+ f"/entry/check_testpmd.sh"
+ message = "Testpmd not started properly"
+ exec_cmd_no_error(node, command, timeout=1800, message=message)
diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py
index 396029a04f..f9758c5f9f 100644
--- a/resources/libraries/python/DUTSetup.py
+++ b/resources/libraries/python/DUTSetup.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,8 +16,7 @@
from time import sleep
from robot.api import logger
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
@@ -33,11 +32,12 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- command = u"cat /tmp/*supervisor*.log"\
- if DUTSetup.running_in_container(node) \
- else f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl " \
+ if DUTSetup.running_in_container(node):
+ return
+ command = (
+ f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl "
f"show -p InvocationID --value {service})"
-
+ )
message = f"Node {node[u'host']} failed to get logs from unit {service}"
exec_cmd_no_error(
@@ -66,9 +66,10 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- command = f"supervisorctl restart {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} restart"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl restart {service}"
+ else:
+ command = f"systemctl restart {service}"
message = f"Node {node[u'host']} failed to restart service {service}"
exec_cmd_no_error(
@@ -99,10 +100,10 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- # TODO: change command to start once all parent function updated.
- command = f"supervisorctl restart {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} restart"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl restart {service}"
+ else:
+ command = f"systemctl restart {service}"
message = f"Node {node[u'host']} failed to start service {service}"
exec_cmd_no_error(
@@ -135,9 +136,10 @@ class DUTSetup:
"""
DUTSetup.get_service_logs(node, service)
- command = f"supervisorctl stop {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} stop"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl stop {service}"
+ else:
+ command = f"systemctl stop {service}"
message = f"Node {node[u'host']} failed to stop service {service}"
exec_cmd_no_error(
@@ -207,42 +209,25 @@ class DUTSetup:
exec_cmd_no_error(node, cmd, message=f"{program} is not installed")
@staticmethod
- def get_pid(node, process):
+ def get_pid(node, process, retries=3):
"""Get PID of running process.
:param node: DUT node.
:param process: process name.
+ :param retries: How many times to retry on failure.
:type node: dict
:type process: str
+ :type retries: int
:returns: PID
:rtype: int
:raises RuntimeError: If it is not possible to get the PID.
"""
- ssh = SSH()
- ssh.connect(node)
-
- retval = None
- for i in range(3):
- logger.trace(f"Try {i}: Get {process} PID")
- ret_code, stdout, stderr = ssh.exec_command(f"pidof {process}")
-
- if int(ret_code):
- raise RuntimeError(
- f"Not possible to get PID of {process} process on node: "
- f"{node[u'host']}\n {stdout + stderr}"
- )
-
- pid_list = stdout.split()
- if len(pid_list) == 1:
- return [int(stdout)]
- if not pid_list:
- logger.debug(f"No {process} PID found on node {node[u'host']}")
- continue
- logger.debug(f"More than one {process} PID found " \
- f"on node {node[u'host']}")
- retval = [int(pid) for pid in pid_list]
-
- return retval
+ cmd = f"pidof {process}"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, retries=retries,
+ message=f"No {process} PID found on node {node[u'host']}")
+ pid_list = stdout.split()
+ return [int(pid) for pid in pid_list]
@staticmethod
def get_vpp_pids(nodes):
@@ -260,81 +245,6 @@ class DUTSetup:
return pids
@staticmethod
- def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
- """Verify if Crypto QAT device virtual functions are initialized on all
- DUTs. If parameter force initialization is set to True, then try to
- initialize or remove VFs on QAT.
-
- :param node: DUT node.
- :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
- :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
- :param force_init: If True then try to initialize to specific value.
- :type node: dict
- :type crypto_type: string
- :type numvfs: int
- :type force_init: bool
- :returns: nothing
- :raises RuntimeError: If QAT VFs are not created and force init is set
- to False.
- """
- pci_addr = Topology.get_cryptodev(node)
- sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
-
- if sriov_numvfs != numvfs:
- if force_init:
- # QAT is not initialized and we want to initialize with numvfs
- DUTSetup.crypto_device_init(node, crypto_type, numvfs)
- else:
- raise RuntimeError(
- f"QAT device failed to create VFs on {node[u'host']}"
- )
-
- @staticmethod
- def crypto_device_init(node, crypto_type, numvfs):
- """Init Crypto QAT device virtual functions on DUT.
-
- :param node: DUT node.
- :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
- :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
- :type node: dict
- :type crypto_type: string
- :type numvfs: int
- :returns: nothing
- :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
- """
- if crypto_type == u"HW_DH895xcc":
- kernel_mod = u"qat_dh895xcc"
- kernel_drv = u"dh895xcc"
- elif crypto_type == u"HW_C3xxx":
- kernel_mod = u"qat_c3xxx"
- kernel_drv = u"c3xxx"
- else:
- raise RuntimeError(
- f"Unsupported crypto device type on {node[u'host']}"
- )
-
- pci_addr = Topology.get_cryptodev(node)
-
- # QAT device must be re-bound to kernel driver before initialization.
- DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
-
- # Stop VPP to prevent deadlock.
- DUTSetup.stop_service(node, Constants.VPP_UNIT)
-
- current_driver = DUTSetup.get_pci_dev_driver(
- node, pci_addr.replace(u":", r"\:")
- )
- if current_driver is not None:
- DUTSetup.pci_driver_unbind(node, pci_addr)
-
- # Bind to kernel driver.
- DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
-
- # Initialize QAT VFs.
- if numvfs > 0:
- DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
-
- @staticmethod
def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
"""Get PCI address of Virtual Function.
@@ -388,21 +298,37 @@ class DUTSetup:
return sriov_numvfs
@staticmethod
- def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
+ def set_sriov_numvfs(node, pf_pci_addr, path="devices", numvfs=0):
"""Init or reset SR-IOV virtual functions by setting its number on PCI
device on DUT. Setting to zero removes all VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
+ :param path: Either device or driver.
:param numvfs: Number of VFs to initialize, 0 - removes the VFs.
:type node: dict
:type pf_pci_addr: str
+ :type path: str
:type numvfs: int
:raises RuntimeError: Failed to create VFs on PCI.
"""
+ cmd = f"test -f /sys/bus/pci/{path}/{pf_pci_addr}/sriov_numvfs"
+ sriov_unsupported, _, _ = exec_cmd(node, cmd)
+ # if sriov_numvfs doesn't exist, then sriov_unsupported != 0
+ if int(sriov_unsupported):
+ if numvfs == 0:
+ # sriov is not supported and we want 0 VFs
+ # no need to do anything
+ return
+
+ raise RuntimeError(
+ f"Can't configure {numvfs} VFs on {pf_pci_addr} device "
+ f"on {node[u'host']} since it doesn't support SR-IOV."
+ )
+
pci = pf_pci_addr.replace(u":", r"\:")
command = f"sh -c \"echo {numvfs} | " \
- f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\""
+ f"tee /sys/bus/pci/{path}/{pci}/sriov_numvfs\""
message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
f"on {node[u'host']}"
@@ -430,16 +356,23 @@ class DUTSetup:
)
@staticmethod
- def pci_driver_unbind_list(node, *pci_addrs):
- """Unbind PCI devices from current driver on node.
+ def unbind_pci_devices_from_other_driver(node, driver, *pci_addrs):
+ """Unbind PCI devices from driver other than input driver on node.
:param node: DUT node.
+ :param driver: Driver to not unbind from. If None or empty string,
+ will attempt to unbind from the current driver.
:param pci_addrs: PCI device addresses.
:type node: dict
+ :type driver: str
:type pci_addrs: list
"""
for pci_addr in pci_addrs:
- DUTSetup.pci_driver_unbind(node, pci_addr)
+ cur_driver = DUTSetup.get_pci_dev_driver(node, pci_addr)
+ if not cur_driver:
+ return
+ if not driver or cur_driver != driver:
+ DUTSetup.pci_driver_unbind(node, pci_addr)
@staticmethod
def pci_driver_bind(node, pci_addr, driver):
@@ -543,61 +476,24 @@ class DUTSetup:
def get_pci_dev_driver(node, pci_addr):
"""Get current PCI device driver on node.
- .. note::
- # lspci -vmmks 0000:00:05.0
- Slot: 00:05.0
- Class: Ethernet controller
- Vendor: Red Hat, Inc
- Device: Virtio network device
- SVendor: Red Hat, Inc
- SDevice: Device 0001
- PhySlot: 5
- Driver: virtio-pci
-
:param node: DUT node.
:param pci_addr: PCI device address.
:type node: dict
:type pci_addr: str
:returns: Driver or None
- :raises RuntimeError: If PCI rescan or lspci command execution failed.
:raises RuntimeError: If it is not possible to get the interface driver
information from the node.
"""
- ssh = SSH()
- ssh.connect(node)
-
- for i in range(3):
- logger.trace(f"Try number {i}: Get PCI device driver")
-
- cmd = f"lspci -vmmks {pci_addr}"
- ret_code, stdout, _ = ssh.exec_command(cmd)
- if int(ret_code):
- raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
-
- for line in stdout.splitlines():
- if not line:
- continue
- name = None
- value = None
- try:
- name, value = line.split(u"\t", 1)
- except ValueError:
- if name == u"Driver:":
- return None
- if name == u"Driver:":
- return value
-
- if i < 2:
- logger.trace(
- f"Driver for PCI device {pci_addr} not found, "
- f"executing pci rescan and retrying"
- )
- cmd = u"sh -c \"echo 1 > /sys/bus/pci/rescan\""
- ret_code, _, _ = ssh.exec_command_sudo(cmd)
- if int(ret_code) != 0:
- raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
-
- return None
+ driver_path = f"/sys/bus/pci/devices/{pci_addr}/driver"
+ cmd = f"test -d {driver_path}"
+ ret_code, ret_val, _ = exec_cmd(node, cmd)
+ if int(ret_code):
+ # the directory doesn't exist which means the device is not bound
+ # to any driver
+ return None
+ cmd = f"basename $(readlink -f {driver_path})"
+ ret_val, _ = exec_cmd_no_error(node, cmd)
+ return ret_val.strip()
@staticmethod
def verify_kernel_module(node, module, force_load=False):
@@ -673,60 +569,6 @@ class DUTSetup:
exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
@staticmethod
- def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
- """Install VPP on all DUT nodes. Start the VPP service in case of
- systemd is not available or does not support autostart.
-
- :param nodes: Nodes in the topology.
- :param vpp_pkg_dir: Path to directory where VPP packages are stored.
- :type nodes: dict
- :type vpp_pkg_dir: str
- :raises RuntimeError: If failed to remove or install VPP.
- """
- for node in nodes.values():
- message = f"Failed to install VPP on host {node[u'host']}!"
- if node[u"type"] == NodeType.DUT:
- command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
- exec_cmd_no_error(node, command, sudo=True)
-
- command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
- stdout, _ = exec_cmd_no_error(node, command)
-
- if stdout.strip() == u"Ubuntu":
- exec_cmd_no_error(
- node, u"apt-get purge -y '*vpp*' || true",
- timeout=120, sudo=True
- )
- # workaround to avoid installation of vpp-api-python
- exec_cmd_no_error(
- node, u"rm -f {vpp_pkg_dir}vpp-api-python.deb",
- timeout=120, sudo=True
- )
- exec_cmd_no_error(
- node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
- timeout=120, sudo=True, message=message
- )
- exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
- if DUTSetup.running_in_container(node):
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
- else:
- exec_cmd_no_error(
- node, u"yum -y remove '*vpp*' || true",
- timeout=120, sudo=True
- )
- # workaround to avoid installation of vpp-api-python
- exec_cmd_no_error(
- node, u"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
- timeout=120, sudo=True
- )
- exec_cmd_no_error(
- node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
- timeout=120, sudo=True, message=message
- )
- exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
-
- @staticmethod
def running_in_container(node):
"""This method tests if topology node is running inside container.
@@ -736,18 +578,15 @@ class DUTSetup:
to detect.
:rtype: bool
"""
- command = u"fgrep docker /proc/1/cgroup"
- message = u"Failed to get cgroup settings."
+ command = "cat /.dockerenv"
try:
- exec_cmd_no_error(
- node, command, timeout=30, sudo=False, message=message
- )
+ exec_cmd_no_error(node, command, timeout=30)
except RuntimeError:
return False
return True
@staticmethod
- def get_docker_mergeddir(node, uuid):
+ def get_docker_mergeddir(node, uuid=None):
"""Get Docker overlay for MergedDir diff.
:param node: DUT node.
@@ -758,8 +597,15 @@ class DUTSetup:
:rtype: str
:raises RuntimeError: If getting output failed.
"""
- command = f"docker inspect " \
+ if not uuid:
+ command = 'fgrep "hostname" /proc/self/mountinfo | cut -f 4 -d" "'
+ message = "Failed to get UUID!"
+ stdout, _ = exec_cmd_no_error(node, command, message=message)
+ uuid = stdout.split(sep="/")[-2]
+ command = (
+ f"docker inspect "
f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
+ )
message = f"Failed to get directory of {uuid} on host {node[u'host']}"
stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
diff --git a/resources/libraries/python/Dhcp.py b/resources/libraries/python/Dhcp.py
index ec2c895bc3..2b4d03ec9e 100644
--- a/resources/libraries/python/Dhcp.py
+++ b/resources/libraries/python/Dhcp.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/DpdkUtil.py b/resources/libraries/python/DpdkUtil.py
index dcca73db1d..8ee0dde850 100644
--- a/resources/libraries/python/DpdkUtil.py
+++ b/resources/libraries/python/DpdkUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -37,10 +37,10 @@ class DpdkUtil:
)
# Add a PCI device in white list.
options.add_with_value_from_dict(
- u"w", u"eal_pci_whitelist0", kwargs
+ u"a", u"eal_pci_whitelist0", kwargs
)
options.add_with_value_from_dict(
- u"w", u"eal_pci_whitelist1", kwargs
+ u"a", u"eal_pci_whitelist1", kwargs
)
# Load an external driver. Multiple -d options are allowed.
options.add_with_value_if_from_dict(
@@ -126,7 +126,7 @@ class DpdkUtil:
options.add_equals_from_dict(
u"max-pkt-len", u"pmd_max_pkt_len", kwargs
)
- # Set the max packet length.
+ # Set the mbuf size.
options.add_equals_from_dict(
u"mbuf-size", u"pmd_mbuf_size", kwargs
)
@@ -161,7 +161,7 @@ class DpdkUtil:
:rtype: OptionString
"""
options = OptionString()
- options.add(u"testpmd")
+ options.add(u"dpdk-testpmd")
options.extend(DpdkUtil.get_eal_options(**kwargs))
options.add(u"--")
options.extend(DpdkUtil.get_testpmd_pmd_options(**kwargs))
@@ -219,10 +219,6 @@ class DpdkUtil:
options.add_equals_from_dict(
u"config", u"pmd_config", kwargs
)
- # Enables jumbo frames.
- options.add_if_from_dict(
- u"enable-jumbo", u"pmd_enable_jumbo", kwargs, False
- )
# Set the max packet length.
options.add_with_value_if_from_dict(
u"max-pkt-len", u"9200", u"pmd_max_pkt_len", kwargs, False
diff --git a/resources/libraries/python/DropRateSearch.py b/resources/libraries/python/DropRateSearch.py
index 49e64d9219..2417df8c41 100644
--- a/resources/libraries/python/DropRateSearch.py
+++ b/resources/libraries/python/DropRateSearch.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -426,10 +426,8 @@ class DropRateSearch(metaclass=ABCMeta):
self._search_result = SearchResults.SUCCESS
self._search_result_rate = rate
return
- else:
- raise RuntimeError(u"Unknown search result")
- else:
- raise Exception(u"Unknown search direction")
+ raise RuntimeError(u"Unknown search result")
+ raise Exception(u"Unknown search direction")
def verify_search_result(self):
"""Fail if search was not successful.
diff --git a/resources/libraries/python/FilteredLogger.py b/resources/libraries/python/FilteredLogger.py
index 3df5714837..42068ef58c 100644
--- a/resources/libraries/python/FilteredLogger.py
+++ b/resources/libraries/python/FilteredLogger.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/FlowUtil.py b/resources/libraries/python/FlowUtil.py
new file mode 100644
index 0000000000..054356b9a2
--- /dev/null
+++ b/resources/libraries/python/FlowUtil.py
@@ -0,0 +1,580 @@
+# copyright (c) 2023 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Flow Utilities Library."""
+
+from enum import IntEnum
+from ipaddress import ip_address
+
+from resources.libraries.python.topology import Topology
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+
+class FlowType(IntEnum):
+ """Flow types."""
+ FLOW_TYPE_ETHERNET = 1
+ FLOW_TYPE_IP4 = 2
+ FLOW_TYPE_IP6 = 3
+ FLOW_TYPE_IP4_L2TPV3OIP = 4
+ FLOW_TYPE_IP4_IPSEC_ESP = 5
+ FLOW_TYPE_IP4_IPSEC_AH = 6
+ FLOW_TYPE_IP4_N_TUPLE = 7
+ FLOW_TYPE_IP6_N_TUPLE = 8
+ FLOW_TYPE_IP4_VXLAN = 11
+ FLOW_TYPE_IP6_VXLAN = 12
+ FLOW_TYPE_IP4_GTPU = 14
+
+class FlowProto(IntEnum):
+ """Flow protocols."""
+ IP_API_PROTO_TCP = 6
+ IP_API_PROTO_UDP = 17
+ IP_API_PROTO_ESP = 50
+ IP_API_PROTO_AH = 51
+ IP_API_PROTO_L2TP = 115
+
+class FlowAction(IntEnum):
+ """Flow actions."""
+ FLOW_ACTION_MARK = 2
+ FLOW_ACTION_REDIRECT_TO_QUEUE = 16
+ FLOW_ACTION_DROP = 64
+
+class FlowUtil:
+ """Utilities for flow configuration."""
+
+ @staticmethod
+ def vpp_create_ip4_n_tuple_flow(
+ node, src_ip, dst_ip, src_port, dst_port,
+ proto, action, value=0):
+ """Create IP4_N_TUPLE flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP4 address.
+ :param dst_ip: Destination IP4 address.
+ :param src_port: Source port.
+ :param dst_port: Destination port.
+ :param proto: TCP or UDP.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type src_port: int
+ :type dst_port: int
+ :type proto: str
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip4_n_tuple"
+ flow_type = FlowType.FLOW_TYPE_IP4_N_TUPLE
+
+ if proto == u"TCP":
+ flow_proto = FlowProto.IP_API_PROTO_TCP
+ elif proto == u"UDP":
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+ else:
+ raise ValueError(f"proto error: {proto}")
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, u'mask': u"255.255.255.255"},
+ u'dst_addr': {u'addr': dst_ip, u'mask': u"255.255.255.255"},
+ u'src_port': {u'port': src_port, u'mask': 0xFFFF},
+ u'dst_port': {u'port': dst_port, u'mask': 0xFFFF},
+ u'protocol': {u'prot': flow_proto}
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip6_n_tuple_flow(
+ node, src_ip, dst_ip, src_port, dst_port,
+ proto, action, value=0):
+ """Create IP6_N_TUPLE flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP6 address.
+ :param dst_ip: Destination IP6 address.
+ :param src_port: Source port.
+ :param dst_port: Destination port.
+ :param proto: TCP or UDP.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type src_port: int
+ :type dst_port: int
+ :type proto: str
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip6_n_tuple"
+ flow_type = FlowType.FLOW_TYPE_IP6_N_TUPLE
+
+ if proto == u"TCP":
+ flow_proto = FlowProto.IP_API_PROTO_TCP
+ elif proto == u"UDP":
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+ else:
+ raise ValueError(f"proto error: {proto}")
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, \
+ u'mask': u"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"},
+ u'dst_addr': {u'addr': dst_ip, \
+ u'mask': u"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"},
+ u'src_port': {u'port': src_port, u'mask': 0xFFFF},
+ u'dst_port': {u'port': dst_port, u'mask': 0xFFFF},
+ u'protocol': {u'prot': flow_proto}
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip4_flow(
+ node, src_ip, dst_ip, proto, action, value=0):
+ """Create IP4 flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP4 address.
+ :param dst_ip: Destination IP4 address.
+ :param proto: TCP or UDP.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type proto: str
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip4"
+ flow_type = FlowType.FLOW_TYPE_IP4
+
+ if proto == u"TCP":
+ flow_proto = FlowProto.IP_API_PROTO_TCP
+ elif proto == u"UDP":
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+ else:
+ raise ValueError(f"proto error: {proto}")
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, u'mask': u"255.255.255.255"},
+ u'dst_addr': {u'addr': dst_ip, u'mask': u"255.255.255.255"},
+ u'protocol': {u'prot': flow_proto}
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip6_flow(
+ node, src_ip, dst_ip, proto, action, value=0):
+ """Create IP6 flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP6 address.
+ :param dst_ip: Destination IP6 address.
+ :param proto: TCP or UDP.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type proto: str
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip6"
+ flow_type = FlowType.FLOW_TYPE_IP6
+
+ if proto == u"TCP":
+ flow_proto = FlowProto.IP_API_PROTO_TCP
+ elif proto == u"UDP":
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+ else:
+ raise ValueError(f"proto error: {proto}")
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, \
+ u'mask': u"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"},
+ u'dst_addr': {'addr': dst_ip, \
+ u'mask': u"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"},
+ u'protocol': {u'prot': flow_proto}
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip4_gtpu_flow(
+ node, src_ip, dst_ip, teid, action, value=0):
+ """Create IP4_GTPU flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP4 address.
+ :param dst_ip: Destination IP4 address.
+ :param teid: Tunnel endpoint identifier.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type teid: int
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip4_gtpu"
+ flow_type = FlowType.FLOW_TYPE_IP4_GTPU
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, u'mask': u"255.255.255.255"},
+ u'dst_addr': {u'addr': dst_ip, u'mask': u"255.255.255.255"},
+ u'protocol': {u'prot': flow_proto},
+ u'teid': teid
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip4_ipsec_flow(node, proto, spi, action, value=0):
+ """Create IP4_IPSEC flow.
+
+ :param node: DUT node.
+ :param proto: TCP or UDP.
+ :param spi: Security Parameters Index.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type proto: str
+ :type spi: int
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ if proto == u"ESP":
+ flow = u"ip4_ipsec_esp"
+ flow_proto = FlowProto.IP_API_PROTO_ESP
+ flow_type = FlowType.FLOW_TYPE_IP4_IPSEC_ESP
+ elif proto == u"AH":
+ flow = u"ip4_ipsec_ah"
+ flow_proto = FlowProto.IP_API_PROTO_AH
+ flow_type = FlowType.FLOW_TYPE_IP4_IPSEC_AH
+ else:
+ raise ValueError(f"proto error: {proto}")
+
+ pattern = {
+ u'protocol': {u'prot': flow_proto},
+ u'spi': spi
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip4_l2tp_flow(node, session_id, action, value=0):
+ """Create IP4_L2TPV3OIP flow.
+
+ :param node: DUT node.
+ :param session_id: PPPoE session ID
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type session_id: int
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ """
+ flow = u"ip4_l2tpv3oip"
+ flow_proto = FlowProto.IP_API_PROTO_L2TP
+ flow_type = FlowType.FLOW_TYPE_IP4_L2TPV3OIP
+
+ pattern = {
+ u'protocol': {u'prot': flow_proto},
+ u'session_id': session_id
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ip4_vxlan_flow(node, src_ip, dst_ip, vni, action, value=0):
+ """Create IP4_VXLAN flow.
+
+ :param node: DUT node.
+ :param src_ip: Source IP4 address.
+ :param dst_ip: Destination IP4 address.
+ :param vni: Virtual network instance.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type src_ip: str
+ :type dst_ip: str
+ :type vni: int
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ """
+ flow = u"ip4_vxlan"
+ flow_type = FlowType.FLOW_TYPE_IP4_VXLAN
+ flow_proto = FlowProto.IP_API_PROTO_UDP
+
+ pattern = {
+ u'src_addr': {u'addr': src_ip, u'mask': u"255.255.255.255"},
+ u'dst_addr': {u'addr': dst_ip, u'mask': u"255.255.255.255"},
+ u'dst_port': {u'port': 4789, 'mask': 0xFFFF},
+ u'protocol': {u'prot': flow_proto},
+ u'vni': vni
+ }
+
+ flow_index = FlowUtil.vpp_flow_add(
+ node, flow, flow_type, pattern, action, value)
+
+ return flow_index
+
+ @staticmethod
+ def vpp_flow_add(node, flow, flow_type, pattern, action, value=0):
+ """Flow add.
+
+ :param node: DUT node.
+ :param flow: Name of flow.
+ :param flow_type: Type of flow.
+ :param pattern: Pattern of flow.
+ :param action: Mark, drop or redirect-to-queue.
+ :param value: Action value.
+
+ :type node: dict
+ :type node: str
+ :type flow_type: str
+ :type pattern: dict
+ :type action: str
+ :type value: int
+ :returns: flow_index.
+ :rtype: int
+ :raises ValueError: If action type is not supported.
+ """
+ cmd = u"flow_add_v2"
+
+ if action == u"redirect-to-queue":
+ flow_rule = {
+ u'type': flow_type,
+ u'actions': FlowAction.FLOW_ACTION_REDIRECT_TO_QUEUE,
+ u'redirect_queue': value,
+ u'flow': {flow : pattern}
+ }
+ elif action == u"mark":
+ flow_rule = {
+ u'type': flow_type,
+ u'actions': FlowAction.FLOW_ACTION_MARK,
+ u'mark_flow_id': value,
+ u'flow': {flow : pattern}
+ }
+ elif action == u"drop":
+ flow_rule = {
+ u'type': flow_type,
+ u'actions': FlowAction.FLOW_ACTION_DROP,
+ u'flow': {flow : pattern}
+ }
+ else:
+ raise ValueError(f"Unsupported action type: {action}")
+
+ err_msg = f"Failed to create {flow} flow on host {node[u'host']}."
+ args = dict(flow=flow_rule)
+ flow_index = -1
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add(cmd, **args).get_reply(err_msg)
+ flow_index = reply[u"flow_index"]
+
+ return flow_index
+
+ @staticmethod
+ def vpp_flow_enable(node, interface, flow_index=0):
+ """Flow enable.
+
+ :param node: DUT node.
+ :param interface: Interface sw_if_index.
+ :param flow_index: Flow index.
+
+ :type node: dict
+ :type interface: int
+ :type flow_index: int
+ :returns: Nothing.
+ """
+ cmd = u"flow_enable"
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+ args = dict(
+ flow_index=int(flow_index),
+ hw_if_index=int(sw_if_index)
+ )
+
+ err_msg = f"Failed to enable flow on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_flow_disable(node, interface, flow_index=0):
+ """Flow disable.
+
+ :param node: DUT node.
+ :param interface: Interface sw_if_index.
+ :param flow_index: Flow index.
+
+ :type node: dict
+ :type interface: int
+ :type flow_index: int
+ :returns: Nothing.
+ """
+ cmd = u"flow_disable"
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+ args = dict(
+ flow_index=int(flow_index),
+ hw_if_index=int(sw_if_index)
+ )
+
+ err_msg = u"Failed to disable flow on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_flow_del(node, flow_index=0):
+ """Flow delete.
+
+ :param node: DUT node.
+ :param flow_index: Flow index.
+
+ :type node: dict
+ :type flow_index: int
+ :returns: Nothing.
+ """
+ cmd = u"flow_del"
+ args = dict(
+ flow_index=int(flow_index)
+ )
+
+ err_msg = u"Failed to delete flow on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_show_flow_entry(node):
+ """Show flow entry.
+
+ :param node: DUT node.
+
+ :type node: dict
+ :returns: flow entry.
+ :rtype: str
+ """
+ cmd = u"vppctl show flow entry"
+
+ err_msg = u"Failed to show flow on host {node[u'host']}"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=False, message=err_msg, retries=120
+ )
+
+ return stdout.strip()
+
+ @staticmethod
+ def vpp_verify_flow_action(
+ node, action, value,
+ src_mac=u"11:22:33:44:55:66", dst_mac=u"11:22:33:44:55:66",
+ src_ip=None, dst_ip=None):
+ """Verify the correctness of the flow action.
+
+ :param node: DUT node.
+ :param action: Action.
+ :param value: Action value.
+ :param src_mac: Source mac address.
+ :param dst_mac: Destination mac address.
+ :param src_ip: Source IP address.
+ :param dst_ip: Destination IP address.
+
+ :type node: dict
+ :type action: str
+ :type value: int
+ :type src_mac: str
+ :type dst_mac: str
+ :type src_ip: str
+ :type dst_ip: str
+ :returns: Nothing.
+ :raises RuntimeError: If the verification of flow action fails.
+ :raises ValueError: If action type is not supported.
+ """
+ err_msg = f"Failed to show trace on host {node[u'host']}"
+ cmd = u"vppctl show trace"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=False, message=err_msg, retries=120
+ )
+
+ err_info = f"Verify flow {action} failed"
+
+ if src_ip is None:
+ expected_str = f"{src_mac} -> {dst_mac}"
+ else:
+ src_ip = ip_address(src_ip)
+ dst_ip = ip_address(dst_ip)
+ expected_str = f"{src_ip} -> {dst_ip}"
+
+ if action == u"drop":
+ if expected_str in stdout:
+ raise RuntimeError(err_info)
+ elif action == u"redirect-to-queue":
+ if f"queue {value}" not in stdout \
+ and f"qid {value}" not in stdout:
+ raise RuntimeError(err_info)
+ if expected_str not in stdout:
+ raise RuntimeError(err_info)
+ elif action == u"mark":
+ if u"PKT_RX_FDIR" not in stdout and u"flow-id 1" not in stdout:
+ raise RuntimeError(err_info)
+ if expected_str not in stdout:
+ raise RuntimeError(err_info)
+ else:
+ raise ValueError(f"Unsupported action type: {action}")
diff --git a/resources/libraries/python/GeneveUtil.py b/resources/libraries/python/GeneveUtil.py
index 3c8ebeebb3..d7266f58fa 100644
--- a/resources/libraries/python/GeneveUtil.py
+++ b/resources/libraries/python/GeneveUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,6 +18,7 @@ from ipaddress import ip_address
from resources.libraries.python.Constants import Constants
from resources.libraries.python.InterfaceUtil import InterfaceUtil
from resources.libraries.python.IPAddress import IPAddress
+from resources.libraries.python.IPUtil import IPUtil
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
@@ -124,3 +125,72 @@ class GeneveUtil:
u"geneve_tunnel_dump",
]
PapiSocketExecutor.dump_and_log(node, cmds)
+
+ @staticmethod
+ def vpp_geneve_add_multiple_tunnels(
+ node, gen_tunnel, n_tunnels, dut_if1, dut_if2, tg_if1_ip4,
+ tg_if2_ip4, tg_pf2_mac, next_idx):
+ """Create multiple GENEVE tunnels.
+
+ :param node: DUT node.
+ :param gen_tunnel: Parameters of the GENEVE tunnel.
+ :param n_tunnels: Number of tunnels.
+ :param dut_if1: The first DUT interface.
+ :param dut_if2: The second DUT interface.
+ :param tg_if1_ip4: TG interface 1 IP address.
+ :param tg_if2_ip4: TG interface 2 IP address.
+ :param tg_pf2_mac: TG interface 2 MAC address.
+ :param next_idx: The index of the next node.
+ :type node: dict
+ :type gen_tunnel: dict
+ :type n_tunnels: int
+ :type dut_if1: str
+ :type dut_if2: str
+ :type tg_if1_ip4: str
+ :type tg_if2_ip4: str
+ :type tg_pf2_mac: str
+ :type next_idx: int
+ """
+
+ src_ip_int = IPUtil.ip_to_int(gen_tunnel[u"src_ip"])
+ dst_ip_int = IPUtil.ip_to_int(gen_tunnel[u"dst_ip"])
+ if_ip_int = IPUtil.ip_to_int(gen_tunnel[u"if_ip"])
+
+ for idx in range(n_tunnels):
+ src_ip = IPUtil.int_to_ip(src_ip_int + idx * 256)
+ dst_ip = IPUtil.int_to_ip(dst_ip_int + idx * 256)
+ if_ip = IPUtil.int_to_ip(if_ip_int + idx * 256)
+
+ IPUtil.vpp_route_add(
+ node, src_ip, gen_tunnel[u"ip_mask"],
+ gateway=tg_if1_ip4, interface=dut_if1
+ )
+ tunnel_sw_index = GeneveUtil.add_geneve_tunnel(
+ node, gen_tunnel[u"local"], gen_tunnel[u"remote"],
+ gen_tunnel[u"vni"] + idx, l3_mode=True, next_index=next_idx
+ )
+ tunnel_if_key = Topology.get_interface_by_sw_index(
+ node, tunnel_sw_index
+ )
+ tunnel_if_mac = Topology.get_interface_mac(
+ node, tunnel_if_key
+ )
+ IPUtil.vpp_interface_set_ip_address(node, tunnel_if_key, if_ip, 24)
+ IPUtil.vpp_add_ip_neighbor(
+ node, tunnel_if_key, tg_if2_ip4, tg_pf2_mac
+ )
+ IPUtil.vpp_route_add(
+ node, dst_ip, gen_tunnel[u"ip_mask"],
+ gateway=tg_if2_ip4, interface=tunnel_if_key
+ )
+ IPUtil.vpp_route_add(
+ node, gen_tunnel[u"remote"], 32,
+ gateway=tg_if2_ip4, interface=dut_if2
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ node, tunnel_if_key, gen_tunnel[u"local"], tunnel_if_mac
+ )
+ IPUtil.vpp_route_add(
+ node, gen_tunnel[u"local"], 32, gateway=if_ip
+ )
+ InterfaceUtil.set_interface_state(node, tunnel_if_key, u"up")
diff --git a/resources/libraries/python/HoststackUtil.py b/resources/libraries/python/HoststackUtil.py
index c307946698..399395d41a 100644
--- a/resources/libraries/python/HoststackUtil.py
+++ b/resources/libraries/python/HoststackUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,9 +17,12 @@ from time import sleep
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
-from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.model.ExportResult import (
+ export_hoststack_results
+)
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
class HoststackUtil():
"""Utilities for Host Stack tests."""
@@ -35,14 +38,13 @@ class HoststackUtil():
'args' - command arguments.
:rtype: dict
"""
- # TODO: Use a python class instead of dictionary for the return type
proto = vpp_echo_attributes[u"uri_protocol"]
addr = vpp_echo_attributes[u"uri_ip4_addr"]
port = vpp_echo_attributes[u"uri_port"]
vpp_echo_cmd = {}
vpp_echo_cmd[u"name"] = u"vpp_echo"
vpp_echo_cmd[u"args"] = f"{vpp_echo_attributes[u'role']} " \
- f"socket-name {vpp_echo_attributes[u'vpp_api_socket']} " \
+ f"socket-name {vpp_echo_attributes[u'app_api_socket']} " \
f"{vpp_echo_attributes[u'json_output']} " \
f"uri {proto}://{addr}/{port} " \
f"nthreads {vpp_echo_attributes[u'nthreads']} " \
@@ -57,6 +59,8 @@ class HoststackUtil():
vpp_echo_cmd[u"args"] += u" rx-results-diff"
if vpp_echo_attributes[u"tx_results_diff"]:
vpp_echo_cmd[u"args"] += u" tx-results-diff"
+ if vpp_echo_attributes[u"use_app_socket_api"]:
+ vpp_echo_cmd[u"args"] += u" use-app-socket-api"
return vpp_echo_cmd
@staticmethod
@@ -71,7 +75,6 @@ class HoststackUtil():
'args' - command arguments.
:rtype: dict
"""
- # TODO: Use a python class instead of dictionary for the return type
iperf3_cmd = {}
iperf3_cmd[u"env_vars"] = f"VCL_CONFIG={Constants.REMOTE_FW_DIR}/" \
f"{Constants.RESOURCES_TPL_VCL}/" \
@@ -100,6 +103,13 @@ class HoststackUtil():
if u"time" in iperf3_attributes:
iperf3_cmd[u"args"] += \
f" --time {iperf3_attributes[u'time']}"
+ if iperf3_attributes[u"udp"]:
+ iperf3_cmd[u"args"] += u" --udp"
+ iperf3_cmd[u"args"] += \
+ f" --bandwidth {iperf3_attributes[u'bandwidth']}"
+ if iperf3_attributes[u"length"] > 0:
+ iperf3_cmd[u"args"] += \
+ f" --length {iperf3_attributes[u'length']}"
return iperf3_cmd
@staticmethod
@@ -146,15 +156,14 @@ class HoststackUtil():
raise
@staticmethod
- def get_hoststack_test_program_logs(node, program):
+ def _get_hoststack_test_program_logs(node, program_name):
"""Get HostStack test program stdout log.
:param node: DUT node.
- :param program: test program.
+ :param program_name: test program.
:type node: dict
- :type program: dict
+ :type program_name: str
"""
- program_name = program[u"name"]
cmd = f"sh -c \'cat /tmp/{program_name}_stdout.log\'"
stdout_log, _ = exec_cmd_no_error(node, cmd, sudo=True, \
message=f"Get {program_name} stdout log failed!")
@@ -162,9 +171,63 @@ class HoststackUtil():
cmd = f"sh -c \'cat /tmp/{program_name}_stderr.log\'"
stderr_log, _ = exec_cmd_no_error(node, cmd, sudo=True, \
message=f"Get {program_name} stderr log failed!")
+
return stdout_log, stderr_log
@staticmethod
+ def get_hoststack_test_program_logs(node, program):
+ """Get HostStack test program stdout log.
+
+ :param node: DUT node.
+ :param program: test program.
+ :type node: dict
+ :type program: dict
+ """
+ program_name = program[u"name"]
+ program_stdout_log, program_stderr_log = \
+ HoststackUtil._get_hoststack_test_program_logs(node,
+ program_name)
+ if len(program_stdout_log) == 0 and len(program_stderr_log) == 0:
+ logger.trace(f"Retrying {program_name} log retrieval")
+ program_stdout_log, program_stderr_log = \
+ HoststackUtil._get_hoststack_test_program_logs(node,
+ program_name)
+ return program_stdout_log, program_stderr_log
+
+ @staticmethod
+ def get_nginx_command(nginx_attributes, nginx_version, nginx_ins_dir):
+ """Construct the NGINX command using the specified attributes.
+
+ :param nginx_attributes: NGINX test program attributes.
+ :param nginx_version: NGINX version.
+ :param nginx_ins_dir: NGINX install dir.
+ :type nginx_attributes: dict
+ :type nginx_version: str
+ :type nginx_ins_dir: str
+ :returns: Command line components of the NGINX command
+ 'env_vars' - environment variables
+ 'name' - program name
+ 'args' - command arguments.
+ 'path' - program path.
+ :rtype: dict
+ """
+ nginx_cmd = dict()
+ nginx_cmd[u"env_vars"] = f"VCL_CONFIG={Constants.REMOTE_FW_DIR}/" \
+ f"{Constants.RESOURCES_TPL_VCL}/" \
+ f"{nginx_attributes[u'vcl_config']}"
+ if nginx_attributes[u"ld_preload"]:
+ nginx_cmd[u"env_vars"] += \
+ f" LD_PRELOAD={Constants.VCL_LDPRELOAD_LIBRARY}"
+ if nginx_attributes[u'transparent_tls']:
+ nginx_cmd[u"env_vars"] += u" LDP_ENV_TLS_TRANS=1"
+
+ nginx_cmd[u"name"] = u"nginx"
+ nginx_cmd[u"path"] = f"{nginx_ins_dir}nginx-{nginx_version}/sbin/"
+ nginx_cmd[u"args"] = f"-c {nginx_ins_dir}/" \
+ f"nginx-{nginx_version}/conf/nginx.conf"
+ return nginx_cmd
+
+ @staticmethod
def start_hoststack_test_program(node, namespace, core_list, program):
"""Start the specified HostStack test program.
@@ -194,9 +257,13 @@ class HoststackUtil():
env_vars = f"{program[u'env_vars']} " if u"env_vars" in program else u""
args = program[u"args"]
- cmd = f"nohup {shell_cmd} \'{env_vars}taskset --cpu-list {core_list} " \
- f"{program_name} {args} >/tmp/{program_name}_stdout.log " \
- f"2>/tmp/{program_name}_stderr.log &\'"
+ program_path = program.get(u"path", u"")
+ # NGINX used `worker_cpu_affinity` in configuration file
+ taskset_cmd = u"" if program_name == u"nginx" else \
+ f"taskset --cpu-list {core_list}"
+ cmd = f"nohup {shell_cmd} \'{env_vars}{taskset_cmd} " \
+ f"{program_path}{program_name} {args} >/tmp/{program_name}_" \
+ f"stdout.log 2>/tmp/{program_name}_stderr.log &\'"
try:
exec_cmd_no_error(node, cmd, sudo=True)
return DUTSetup.get_pid(node, program_name)[0]
@@ -231,22 +298,69 @@ class HoststackUtil():
exec_cmd_no_error(node, cmd, message=errmsg, sudo=True)
@staticmethod
- def hoststack_test_program_finished(node, program_pid):
+ def hoststack_test_program_finished(node, program_pid, program,
+ other_node, other_program):
"""Wait for the specified HostStack test program process to complete.
:param node: DUT node.
:param program_pid: test program pid.
+ :param program: test program
+ :param other_node: DUT node of other hoststack program
+ :param other_program: other test program
:type node: dict
:type program_pid: str
+ :type program: dict
+ :type other_node: dict
+ :type other_program: dict
:raises RuntimeError: If node subtype is not a DUT.
"""
if node[u"type"] != u"DUT":
raise RuntimeError(u"Node type is not a DUT!")
+ if other_node[u"type"] != u"DUT":
+ raise RuntimeError(u"Other node type is not a DUT!")
cmd = f"sh -c 'strace -qqe trace=none -p {program_pid}'"
- exec_cmd(node, cmd, sudo=True)
+ try:
+ exec_cmd(node, cmd, sudo=True)
+ except:
+ sleep(180)
+ if u"client" in program[u"args"]:
+ role = u"client"
+ else:
+ role = u"server"
+ program_stdout, program_stderr = \
+ HoststackUtil.get_hoststack_test_program_logs(node, program)
+ if len(program_stdout) > 0:
+ logger.debug(f"{program[u'name']} {role} stdout log:\n"
+ f"{program_stdout}")
+ else:
+ logger.debug(f"Empty {program[u'name']} {role} stdout log :(")
+ if len(program_stderr) > 0:
+ logger.debug(f"{program[u'name']} stderr log:\n"
+ f"{program_stderr}")
+ else:
+ logger.debug(f"Empty {program[u'name']} stderr log :(")
+ if u"client" in other_program[u"args"]:
+ role = u"client"
+ else:
+ role = u"server"
+ program_stdout, program_stderr = \
+ HoststackUtil.get_hoststack_test_program_logs(other_node,
+ other_program)
+ if len(program_stdout) > 0:
+ logger.debug(f"{other_program[u'name']} {role} stdout log:\n"
+ f"{program_stdout}")
+ else:
+ logger.debug(f"Empty {other_program[u'name']} "
+ f"{role} stdout log :(")
+ if len(program_stderr) > 0:
+ logger.debug(f"{other_program[u'name']} {role} stderr log:\n"
+ f"{program_stderr}")
+ else:
+ logger.debug(f"Empty {other_program[u'name']} "
+ f"{role} stderr log :(")
+ raise
# Wait a bit for stdout/stderr to be flushed to log files
- # TODO: see if sub-second sleep works e.g. sleep(0.1)
sleep(1)
@staticmethod
@@ -280,10 +394,6 @@ class HoststackUtil():
program_name = program[u"name"]
program_stdout, program_stderr = \
HoststackUtil.get_hoststack_test_program_logs(node, program)
- if len(program_stdout) == 0 and len(program_stderr) == 0:
- logger.trace(f"Retrying {program_name} log retrieval")
- program_stdout, program_stderr = \
- HoststackUtil.get_hoststack_test_program_logs(node, program)
env_vars = f"{program[u'env_vars']} " if u"env_vars" in program else u""
program_cmd = f"{env_vars}{program_name} {program[u'args']}"
@@ -303,7 +413,6 @@ class HoststackUtil():
f"bits/sec, pkt-drop-rate {nsim_attr[u'packets_per_drop']} " \
f"pkts/drop\n"
- # TODO: Incorporate show error stats into results analysis
test_results += \
f"\n{role} VPP 'show errors' on host {node[u'host']}:\n" \
f"{PapiSocketExecutor.run_cli_cmd(node, u'show error')}\n"
@@ -321,18 +430,28 @@ class HoststackUtil():
if u"JSON stats" in program_stdout and \
u'"has_failed": "0"' in program_stdout:
json_start = program_stdout.find(u"{")
- #TODO: Fix parsing once vpp_echo produces valid
- # JSON output. Truncate for now.
json_end = program_stdout.find(u',\n "closing"')
json_results = f"{program_stdout[json_start:json_end]}\n}}"
program_json = json.loads(json_results)
+ export_hoststack_results(
+ bandwidth=program_json["rx_bits_per_second"],
+ duration=float(program_json["time"])
+ )
else:
test_results += u"Invalid test data output!\n" + program_stdout
return (True, test_results)
elif program[u"name"] == u"iperf3":
test_results += program_stdout
- iperf3_json = json.loads(program_stdout)
- program_json = iperf3_json[u"intervals"][0][u"sum"]
+ program_json = json.loads(program_stdout)[u"intervals"][0][u"sum"]
+ try:
+ retransmits = program_json["retransmits"]
+ except KeyError:
+ retransmits = None
+ export_hoststack_results(
+ bandwidth=program_json["bits_per_second"],
+ duration=program_json["seconds"],
+ retransmits=retransmits
+ )
else:
test_results += u"Unknown HostStack Test Program!\n" + \
program_stdout
@@ -350,3 +469,18 @@ class HoststackUtil():
:rtype: bool
"""
return server_defer_fail and client_defer_fail
+
+ @staticmethod
+ def log_vpp_hoststack_data(node):
+ """Retrieve and log VPP HostStack data.
+
+ :param node: DUT node.
+ :type node: dict
+ :raises RuntimeError: If node subtype is not a DUT or startup failed.
+ """
+
+ if node[u"type"] != u"DUT":
+ raise RuntimeError(u"Node type is not a DUT!")
+
+ PapiSocketExecutor.run_cli_cmd(node, u"show error")
+ PapiSocketExecutor.run_cli_cmd(node, u"show interface")
diff --git a/resources/libraries/python/IPAddress.py b/resources/libraries/python/IPAddress.py
index b8a4d7443d..8f8ebbffc6 100644
--- a/resources/libraries/python/IPAddress.py
+++ b/resources/libraries/python/IPAddress.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/IPTopology.py b/resources/libraries/python/IPTopology.py
new file mode 100644
index 0000000000..3b459cd156
--- /dev/null
+++ b/resources/libraries/python/IPTopology.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""IP Topology Library."""
+
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.IPUtil import IPUtil
+
+
+class IPTopology:
+ """IP Topology Library."""
+
+ @staticmethod
+ def initialize_ipv4_forwarding(count=1, pfs=2, route_prefix=32):
+ """
+ Custom setup of IPv4 forwarding with scalability of IP routes on all
+ DUT nodes in 2-node / 3-node circular topology.
+
+ :param count: Number of routes to configure.
+ :param pfs: Number of physical interfaces to configure.
+ :param route_prefix: Route prefix to configure.
+ :type count: int
+ :type pfs: int
+ :type route_prefix: int
+ """
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ dut = topology["duts"][-1]
+ ifl = BuiltIn().get_variable_value("${int}")
+
+ for l, i in zip(range(pfs // 2), range(1, pfs, 2)):
+ dut1_int1 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i}}}[0]")
+ dut1_int2 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i+1}}}[0]")
+ dut_int1 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i}}}[0]")
+ dut_int2 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i+1}}}[0]")
+
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int1, f"1.{l}.1.1",
+ topology[f"TG_pf{i}_mac"][0]
+ )
+ if dut == "DUT2":
+ dut_mac1 = BuiltIn().get_variable_value(
+ f"${{{dut}_{ifl}{i}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int2, f"3.{l}.3.2", dut_mac1
+ )
+ dut_mac2 = BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{i+1}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT2"], dut_int1, f"3.{l}.3.1", dut_mac2
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology[dut], dut_int2, f"2.{l}.2.1",
+ topology[f"TG_pf{i+1}_mac"][0]
+ )
+
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int1, f"1.{l}.1.2", 30
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int2, f"3.{l}.3.1", 30
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT2"], dut_int1, f"3.{l}.3.2", 30
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology[dut], dut_int2, f"2.{l}.2.2", 30
+ )
+
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"{i}0.0.0.0", route_prefix,
+ gateway=f"1.{l}.1.1", interface=dut1_int1, count=count
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"{i+1}0.0.0.0", route_prefix,
+ gateway=f"3.{l}.3.2", interface=dut1_int2, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology["DUT2"], f"{i}0.0.0.0", route_prefix,
+ gateway=f"3.{l}.3.1", interface=dut_int1, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology[dut], f"{i+1}0.0.0.0", route_prefix,
+ gateway=f"2.{l}.2.1", interface=dut_int2, count=count
+ )
+
+
+ @staticmethod
+ def initialize_ipv6_forwarding(count=1, pfs=2, route_prefix=128):
+ """
+ Custom setup of IPv6 forwarding with scalability of IP routes on all
+ DUT nodes in 2-node / 3-node circular topology.
+
+ :param count: Number of routes to configure.
+ :param pfs: Number of physical interfaces to configure.
+ :param route_prefix: Route prefix to configure.
+ :type count: int
+ :type pfs: int
+ :type route_prefix: int
+ """
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ dut = topology["duts"][-1]
+ ifl = BuiltIn().get_variable_value("${int}")
+
+ for l, i in zip(range(pfs // 2), range(1, pfs, 2)):
+ dut1_int1 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i}}}[0]")
+ dut1_int2 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i+1}}}[0]")
+ dut_int1 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i}}}[0]")
+ dut_int2 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i+1}}}[0]")
+
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int1, f"2001:{l}::1",
+ topology[f"TG_pf{i}_mac"][0]
+ )
+ if dut == "DUT2":
+ dut_mac1 = BuiltIn().get_variable_value(
+ f"${{{dut}_{ifl}{i}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int2, f"2003:{l}::2", dut_mac1
+ )
+ dut_mac2 = BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{i+1}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT2"], dut_int1, f"2003:{l}::1", dut_mac2
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology[dut], dut_int2, f"2002:{l}::1",
+ topology[f"TG_pf{i+1}_mac"][0]
+ )
+
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int1, f"2001:{l}::2", 64
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int2, f"2003:{l}::1", 64
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT2"], dut_int1, f"2003:{l}::2", 64
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology[dut], dut_int2, f"2002:{l}::2", 64
+ )
+
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"2{i}00::0", route_prefix,
+ gateway=f"2001:{l}::1", interface=dut1_int1, count=count
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"2{i+1}00::0", route_prefix,
+ gateway=f"2003:{l}::2", interface=dut1_int2, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology["DUT2"], f"2{i}00::0", route_prefix,
+ gateway=f"2003:{l}::1", interface=dut_int1, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology[dut], f"2{i+1}00::0", route_prefix,
+ gateway=f"2002:{l}::1", interface=dut_int2, count=count
+ )
diff --git a/resources/libraries/python/IPUtil.py b/resources/libraries/python/IPUtil.py
index 4d5753ea92..933fa34211 100644
--- a/resources/libraries/python/IPUtil.py
+++ b/resources/libraries/python/IPUtil.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2023 PANTHEON.tech s.r.o.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,19 +13,20 @@
# limitations under the License.
"""Common IP utilities library."""
+
import re
from enum import IntEnum
-from ipaddress import ip_address
+from ipaddress import ip_address, ip_network
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.IncrementUtil import ObjIncrement
from resources.libraries.python.InterfaceUtil import InterfaceUtil
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
from resources.libraries.python.topology import Topology
-from resources.libraries.python.VatExecutor import VatTerminal
from resources.libraries.python.Namespaces import Namespaces
@@ -51,7 +53,6 @@ class FibPathType(IntEnum):
class FibPathFlags(IntEnum):
"""FIB path flags."""
FIB_PATH_FLAG_NONE = 0
- # TODO: Name too long for pylint, fix in VPP.
FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1
FIB_PATH_FLAG_RESOLVE_VIA_HOST = 2
@@ -67,29 +68,95 @@ class FibPathNhProto(IntEnum):
class IpDscp(IntEnum):
"""DSCP code points."""
- IP_API_DSCP_CS0 = 0,
- IP_API_DSCP_CS1 = 8,
- IP_API_DSCP_AF11 = 10,
- IP_API_DSCP_AF12 = 12,
- IP_API_DSCP_AF13 = 14,
- IP_API_DSCP_CS2 = 16,
- IP_API_DSCP_AF21 = 18,
- IP_API_DSCP_AF22 = 20,
- IP_API_DSCP_AF23 = 22,
- IP_API_DSCP_CS3 = 24,
- IP_API_DSCP_AF31 = 26,
- IP_API_DSCP_AF32 = 28,
- IP_API_DSCP_AF33 = 30,
- IP_API_DSCP_CS4 = 32,
- IP_API_DSCP_AF41 = 34,
- IP_API_DSCP_AF42 = 36,
- IP_API_DSCP_AF43 = 38,
- IP_API_DSCP_CS5 = 40,
- IP_API_DSCP_EF = 46,
- IP_API_DSCP_CS6 = 48,
+ IP_API_DSCP_CS0 = 0
+ IP_API_DSCP_CS1 = 8
+ IP_API_DSCP_AF11 = 10
+ IP_API_DSCP_AF12 = 12
+ IP_API_DSCP_AF13 = 14
+ IP_API_DSCP_CS2 = 16
+ IP_API_DSCP_AF21 = 18
+ IP_API_DSCP_AF22 = 20
+ IP_API_DSCP_AF23 = 22
+ IP_API_DSCP_CS3 = 24
+ IP_API_DSCP_AF31 = 26
+ IP_API_DSCP_AF32 = 28
+ IP_API_DSCP_AF33 = 30
+ IP_API_DSCP_CS4 = 32
+ IP_API_DSCP_AF41 = 34
+ IP_API_DSCP_AF42 = 36
+ IP_API_DSCP_AF43 = 38
+ IP_API_DSCP_CS5 = 40
+ IP_API_DSCP_EF = 46
+ IP_API_DSCP_CS6 = 48
IP_API_DSCP_CS7 = 50
+class NetworkIncrement(ObjIncrement):
+ """
+ An iterator object which accepts an IPv4Network or IPv6Network and
+ returns a new network, its address part incremented by the increment
+ number of network sizes, each time it is iterated or when inc_fmt is called.
+ The increment may be positive, negative or 0
+ (in which case the network is always the same).
+
+ Both initial and subsequent IP address can have host bits set,
+ check the initial value before creating instance if needed.
+ String formatting is configurable via constructor argument.
+ """
+ def __init__(self, initial_value, increment=1, format=u"dash"):
+ """
+ :param initial_value: The initial network. Can have host bits set.
+ :param increment: The current network will be incremented by this
+ amount of network sizes in each iteration/var_str call.
+ :param format: Type of formatting to use, "dash" or "slash" or "addr".
+ :type initial_value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
+ :type increment: int
+ :type format: str
+ """
+ super().__init__(initial_value, increment)
+ self._prefix_len = self._value.prefixlen
+ host_len = self._value.max_prefixlen - self._prefix_len
+ self._net_increment = self._increment * (1 << host_len)
+ self._format = str(format).lower()
+
+ def _incr(self):
+ """
+ Increment the network, e.g.:
+ '30.0.0.0/24' incremented by 1 (the next network) is '30.0.1.0/24'.
+ '30.0.0.0/24' incremented by 2 is '30.0.2.0/24'.
+ """
+ self._value = ip_network(
+ f"{self._value.network_address + self._net_increment}"
+ f"/{self._prefix_len}", strict=False
+ )
+
+ def _str_fmt(self):
+ """
+ The string representation of the network depends on format.
+
+ Dash format is '<ip_address_start> - <ip_address_stop>',
+ useful for 'ipsec policy add spd' CLI.
+
+ Slash format is '<ip_address_start>/<prefix_length>',
+ useful for other CLI.
+
+ Addr format is '<ip_address_start>', useful for PAPI.
+
+ :returns: Current value converted to string according to format.
+ :rtype: str
+ :raises RuntimeError: If the format is not supported.
+ """
+ if self._format == u"dash":
+ return f"{self._value.network_address} - " \
+ f"{self._value.broadcast_address}"
+ elif self._format == u"slash":
+ return f"{self._value.network_address}/{self._prefix_len}"
+ elif self._format == u"addr":
+ return f"{self._value.network_address}"
+
+ raise RuntimeError(f"Unsupported format {self._format}")
+
+
class IPUtil:
"""Common IP utilities"""
@@ -147,9 +214,6 @@ class IPUtil:
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- # TODO: CSIT currently looks only whether the list is empty.
- # Add proper value processing if values become important.
-
return details
@staticmethod
@@ -411,8 +475,6 @@ class IPUtil:
:type namespace: str
:raises RuntimeError: IP could not be deleted.
"""
- # TODO: Refactor command execution in namespaces into central
- # methods (e.g. Namespace.exec_cmd_in_namespace)
if namespace is not None:
cmd = f"ip netns exec {namespace} ip addr del " \
f"{ip_addr}/{prefix_length} dev {interface}"
@@ -436,7 +498,7 @@ class IPUtil:
:type ip_addr: str
:type prefix_length: int
:type namespace: str
- :rtype boolean
+ :rtype: boolean
:raises RuntimeError: Request fails.
"""
ip_addr_with_prefix = f"{ip_addr}/{prefix_length}"
@@ -588,10 +650,10 @@ class IPUtil:
vrf: VRF table ID. (int)
count: number of IP addresses to add starting from network IP (int)
local: The route is local with same prefix (increment is 1).
- If None, then is not used. (bool)
+ If None, then is not used. (bool)
lookup_vrf: VRF table ID for lookup. (int)
- multipath: Enable multipath routing. (bool)
weight: Weight value for unequal cost multipath routing. (int)
+ (Multipath value enters at higher level.)
:type node: dict
:type network: str
@@ -646,68 +708,52 @@ class IPUtil:
return route
@staticmethod
- def vpp_route_add(node, network, prefix_len, **kwargs):
- """Add route to the VPP node.
+ def vpp_route_add(node, network, prefix_len, strict=True, **kwargs):
+ """Add route to the VPP node. Prefer multipath behavior.
:param node: VPP node.
:param network: Route destination network address.
:param prefix_len: Route destination network prefix length.
+ :param strict: If true, fail if address has host bits set.
:param kwargs: Optional key-value arguments:
gateway: Route gateway address. (str)
interface: Route interface. (str)
vrf: VRF table ID. (int)
count: number of IP addresses to add starting from network IP (int)
- local: The route is local with same prefix (increment is 1).
- If None, then is not used. (bool)
+ local: The route is local with same prefix (increment is 1 network)
+ If None, then is not used. (bool)
lookup_vrf: VRF table ID for lookup. (int)
- multipath: Enable multipath routing. (bool)
+ multipath: Enable multipath routing. (bool) Default: True.
weight: Weight value for unequal cost multipath routing. (int)
:type node: dict
:type network: str
:type prefix_len: int
+ :type strict: bool
:type kwargs: dict
+ :raises RuntimeError: If the argument combination is not supported.
"""
count = kwargs.get(u"count", 1)
- if count > 100:
- gateway = kwargs.get(u"gateway", '')
- interface = kwargs.get(u"interface", '')
- vrf = kwargs.get(u"vrf", None)
- multipath = kwargs.get(u"multipath", False)
-
- with VatTerminal(node, json_param=False) as vat:
-
- vat.vat_terminal_exec_cmd_from_template(
- u"vpp_route_add.vat",
- network=network,
- prefix_length=prefix_len,
- via=f"via {gateway}" if gateway else u"",
- sw_if_index=f"sw_if_index "
- f"{InterfaceUtil.get_interface_index(node, interface)}"
- if interface else u"",
- vrf=f"vrf {vrf}" if vrf else u"",
- count=f"count {count}" if count else u"",
- multipath=u"multipath" if multipath else u""
- )
- return
-
- net_addr = ip_address(network)
cmd = u"ip_route_add_del"
args = dict(
is_add=True,
- is_multipath=kwargs.get(u"multipath", False),
+ is_multipath=kwargs.get(u"multipath", True),
route=None
)
err_msg = f"Failed to add route(s) on host {node[u'host']}"
- with PapiSocketExecutor(node) as papi_exec:
- for i in range(kwargs.get(u"count", 1)):
+ netiter = NetworkIncrement(
+ ip_network(f"{network}/{prefix_len}", strict=strict),
+ format=u"addr"
+ )
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
+ for i in range(count):
args[u"route"] = IPUtil.compose_vpp_route_structure(
- node, net_addr + i, prefix_len, **kwargs
+ node, netiter.inc_fmt(), prefix_len, **kwargs
)
- history = bool(not 1 < i < kwargs.get(u"count", 1))
+ history = bool(not 0 < i < count - 1)
papi_exec.add(cmd, history=history, **args)
papi_exec.get_replies(err_msg)
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 3c3997ab53..59374ab73f 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech s.r.o.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,30 +14,41 @@
"""IPsec utilities library."""
-import os
-
from enum import Enum, IntEnum
-from io import open
+from io import open, TextIOWrapper
+from ipaddress import ip_network, ip_address, IPv4Address, IPv6Address
from random import choice
from string import ascii_letters
+from typing import Iterable, List, Optional, Sequence, Tuple, Union
-from ipaddress import ip_network, ip_address
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.InterfaceUtil import InterfaceUtil, \
- InterfaceStatusFlags
+from resources.libraries.python.enum_util import get_enum_instance
+from resources.libraries.python.IncrementUtil import ObjIncrement
+from resources.libraries.python.InterfaceUtil import (
+ InterfaceUtil,
+ InterfaceStatusFlags,
+)
from resources.libraries.python.IPAddress import IPAddress
-from resources.libraries.python.IPUtil import IPUtil, IpDscp, MPLS_LABEL_INVALID
+from resources.libraries.python.IPUtil import (
+ IPUtil,
+ IpDscp,
+ MPLS_LABEL_INVALID,
+ NetworkIncrement,
+)
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import scp_node
-from resources.libraries.python.topology import Topology
-from resources.libraries.python.VatExecutor import VatExecutor
+from resources.libraries.python.topology import Topology, NodeType
+from resources.libraries.python.VPPUtil import VPPUtil
+from resources.libraries.python.FlowUtil import FlowUtil
-IPSEC_UDP_PORT_NONE = 0xffff
+IPSEC_UDP_PORT_DEFAULT = 4500
+IPSEC_REPLAY_WINDOW_DEFAULT = 64
-def gen_key(length):
+def gen_key(length: int) -> bytes:
"""Generate random string as a key.
:param length: Length of generated payload.
@@ -44,75 +56,129 @@ def gen_key(length):
:returns: The generated payload.
:rtype: bytes
"""
- return u"".join(
- choice(ascii_letters) for _ in range(length)
- ).encode(encoding=u"utf-8")
+ return "".join(choice(ascii_letters) for _ in range(length)).encode(
+ encoding="utf-8"
+ )
+
+
+# TODO: Introduce a metaclass that adds .find and .InputType automatically?
+class IpsecSpdAction(Enum):
+ """IPsec SPD actions.
+
+ Mirroring VPP: src/vnet/ipsec/ipsec_types.api enum ipsec_spd_action.
+ """
+
+ BYPASS = NONE = ("bypass", 0)
+ DISCARD = ("discard", 1)
+ RESOLVE = ("resolve", 2)
+ PROTECT = ("protect", 3)
+ def __init__(self, action_name: str, action_int_repr: int):
+ self.action_name = action_name
+ self.action_int_repr = action_int_repr
-class PolicyAction(Enum):
- """Policy actions."""
- BYPASS = (u"bypass", 0)
- DISCARD = (u"discard", 1)
- PROTECT = (u"protect", 3)
+ def __str__(self) -> str:
+ return self.action_name
- def __init__(self, policy_name, policy_int_repr):
- self.policy_name = policy_name
- self.policy_int_repr = policy_int_repr
+ def __int__(self) -> int:
+ return self.action_int_repr
class CryptoAlg(Enum):
"""Encryption algorithms."""
- AES_CBC_128 = (u"aes-cbc-128", 1, u"AES-CBC", 16)
- AES_CBC_256 = (u"aes-cbc-256", 3, u"AES-CBC", 32)
- AES_GCM_128 = (u"aes-gcm-128", 7, u"AES-GCM", 16)
- AES_GCM_256 = (u"aes-gcm-256", 9, u"AES-GCM", 32)
- def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
+ NONE = ("none", 0, "none", 0)
+ AES_CBC_128 = ("aes-cbc-128", 1, "AES-CBC", 16)
+ AES_CBC_256 = ("aes-cbc-256", 3, "AES-CBC", 32)
+ AES_GCM_128 = ("aes-gcm-128", 7, "AES-GCM", 16)
+ AES_GCM_256 = ("aes-gcm-256", 9, "AES-GCM", 32)
+
+ def __init__(
+ self, alg_name: str, alg_int_repr: int, scapy_name: str, key_len: int
+ ):
self.alg_name = alg_name
self.alg_int_repr = alg_int_repr
self.scapy_name = scapy_name
self.key_len = key_len
+ # TODO: Investigate if __int__ works with PAPI. It was not enough for "if".
+ def __bool__(self):
+ """A shorthand to enable "if crypto_alg:" constructs."""
+ return self.alg_int_repr != 0
+
class IntegAlg(Enum):
"""Integrity algorithm."""
- SHA_256_128 = (u"sha-256-128", 4, u"SHA2-256-128", 32)
- SHA_512_256 = (u"sha-512-256", 6, u"SHA2-512-256", 64)
- def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
+ NONE = ("none", 0, "none", 0)
+ SHA_256_128 = ("sha-256-128", 4, "SHA2-256-128", 32)
+ SHA_512_256 = ("sha-512-256", 6, "SHA2-512-256", 64)
+
+ def __init__(
+ self, alg_name: str, alg_int_repr: int, scapy_name: str, key_len: int
+ ):
self.alg_name = alg_name
self.alg_int_repr = alg_int_repr
self.scapy_name = scapy_name
self.key_len = key_len
+ def __bool__(self):
+ """A shorthand to enable "if integ_alg:" constructs."""
+ return self.alg_int_repr != 0
+
+# TODO: Base on Enum, so str values can be defined as in alg enums?
class IPsecProto(IntEnum):
- """IPsec protocol."""
- IPSEC_API_PROTO_ESP = 50
- IPSEC_API_PROTO_AH = 51
+ """IPsec protocol.
+
+ Mirroring VPP: src/vnet/ipsec/ipsec_types.api enum ipsec_proto.
+ """
+
+ ESP = 50
+ AH = 51
+ NONE = 255
+
+ def __str__(self) -> str:
+ """Return string suitable for CLI commands.
+
+ None is not supported.
+ :returns: Lowercase name of the proto.
+ :rtype: str
+ :raises: ValueError if the numeric value is not recognized.
+ """
+ num = int(self)
+ if num == 50:
+ return "esp"
+ if num == 51:
+ return "ah"
+ raise ValueError(f"String form not defined for IPsecProto {num}")
+
+# The rest of enums do not appear outside this file, so no no change needed yet.
class IPsecSadFlags(IntEnum):
"""IPsec Security Association Database flags."""
- IPSEC_API_SAD_FLAG_NONE = 0,
+
+ IPSEC_API_SAD_FLAG_NONE = NONE = 0
# Enable extended sequence numbers
- IPSEC_API_SAD_FLAG_USE_ESN = 0x01,
+ IPSEC_API_SAD_FLAG_USE_ESN = 0x01
# Enable Anti - replay
- IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY = 0x02,
+ IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY = 0x02
# IPsec tunnel mode if non-zero, else transport mode
- IPSEC_API_SAD_FLAG_IS_TUNNEL = 0x04,
+ IPSEC_API_SAD_FLAG_IS_TUNNEL = 0x04
# IPsec tunnel mode is IPv6 if non-zero, else IPv4 tunnel
# only valid if is_tunnel is non-zero
- IPSEC_API_SAD_FLAG_IS_TUNNEL_V6 = 0x08,
+ IPSEC_API_SAD_FLAG_IS_TUNNEL_V6 = 0x08
# Enable UDP encapsulation for NAT traversal
- IPSEC_API_SAD_FLAG_UDP_ENCAP = 0x10,
+ IPSEC_API_SAD_FLAG_UDP_ENCAP = 0x10
# IPsec SA is or inbound traffic
IPSEC_API_SAD_FLAG_IS_INBOUND = 0x40
class TunnelEncpaDecapFlags(IntEnum):
"""Flags controlling tunnel behaviour."""
- TUNNEL_API_ENCAP_DECAP_FLAG_NONE = 0
+
+ TUNNEL_API_ENCAP_DECAP_FLAG_NONE = NONE = 0
# at encap, copy the DF bit of the payload into the tunnel header
TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DF = 1
# at encap, set the DF bit in the tunnel header
@@ -127,184 +193,104 @@ class TunnelEncpaDecapFlags(IntEnum):
class TunnelMode(IntEnum):
"""Tunnel modes."""
+
# point-to-point
- TUNNEL_API_MODE_P2P = 0
+ TUNNEL_API_MODE_P2P = NONE = 0
# multi-point
TUNNEL_API_MODE_MP = 1
-class IPsecUtil:
- """IPsec utilities."""
+# Derived types for type hints, based on capabilities of get_enum_instance.
+IpsecSpdAction.InputType = Union[IpsecSpdAction, str, None]
+CryptoAlg.InputType = Union[CryptoAlg, str, None]
+IntegAlg.InputType = Union[IntegAlg, str, None]
+IPsecProto.InputType = Union[IPsecProto, str, int, None]
+# TODO: Introduce a metaclass that adds .find and .InputType automatically?
- @staticmethod
- def policy_action_bypass():
- """Return policy action bypass.
-
- :returns: PolicyAction enum BYPASS object.
- :rtype: PolicyAction
- """
- return PolicyAction.BYPASS
-
- @staticmethod
- def policy_action_discard():
- """Return policy action discard.
-
- :returns: PolicyAction enum DISCARD object.
- :rtype: PolicyAction
- """
- return PolicyAction.DISCARD
- @staticmethod
- def policy_action_protect():
- """Return policy action protect.
-
- :returns: PolicyAction enum PROTECT object.
- :rtype: PolicyAction
- """
- return PolicyAction.PROTECT
-
- @staticmethod
- def crypto_alg_aes_cbc_128():
- """Return encryption algorithm aes-cbc-128.
-
- :returns: CryptoAlg enum AES_CBC_128 object.
- :rtype: CryptoAlg
- """
- return CryptoAlg.AES_CBC_128
-
- @staticmethod
- def crypto_alg_aes_cbc_256():
- """Return encryption algorithm aes-cbc-256.
-
- :returns: CryptoAlg enum AES_CBC_256 object.
- :rtype: CryptoAlg
- """
- return CryptoAlg.AES_CBC_256
-
- @staticmethod
- def crypto_alg_aes_gcm_128():
- """Return encryption algorithm aes-gcm-128.
-
- :returns: CryptoAlg enum AES_GCM_128 object.
- :rtype: CryptoAlg
- """
- return CryptoAlg.AES_GCM_128
+class IPsecUtil:
+ """IPsec utilities."""
- @staticmethod
- def crypto_alg_aes_gcm_256():
- """Return encryption algorithm aes-gcm-256.
-
- :returns: CryptoAlg enum AES_GCM_128 object.
- :rtype: CryptoAlg
- """
- return CryptoAlg.AES_GCM_256
+ # The following 4 methods are Python one-liners,
+ # but they are useful when called as a Robot keyword.
@staticmethod
- def get_crypto_alg_key_len(crypto_alg):
+ def get_crypto_alg_key_len(crypto_alg: CryptoAlg.InputType) -> int:
"""Return encryption algorithm key length.
+ This is a Python one-liner, but useful when called as a Robot keyword.
+
:param crypto_alg: Encryption algorithm.
- :type crypto_alg: CryptoAlg
+ :type crypto_alg: CryptoAlg.InputType
:returns: Key length.
:rtype: int
"""
- return crypto_alg.key_len
+ return get_enum_instance(CryptoAlg, crypto_alg).key_len
@staticmethod
- def get_crypto_alg_scapy_name(crypto_alg):
+ def get_crypto_alg_scapy_name(crypto_alg: CryptoAlg.InputType) -> str:
"""Return encryption algorithm scapy name.
+ This is a Python one-liner, but useful when called as a Robot keyword.
+
:param crypto_alg: Encryption algorithm.
- :type crypto_alg: CryptoAlg
+ :type crypto_alg: CryptoAlg.InputType
:returns: Algorithm scapy name.
:rtype: str
"""
- return crypto_alg.scapy_name
-
- @staticmethod
- def integ_alg_sha_256_128():
- """Return integrity algorithm SHA-256-128.
-
- :returns: IntegAlg enum SHA_256_128 object.
- :rtype: IntegAlg
- """
- return IntegAlg.SHA_256_128
-
- @staticmethod
- def integ_alg_sha_512_256():
- """Return integrity algorithm SHA-512-256.
-
- :returns: IntegAlg enum SHA_512_256 object.
- :rtype: IntegAlg
- """
- return IntegAlg.SHA_512_256
+ return get_enum_instance(CryptoAlg, crypto_alg).scapy_name
+ # The below to keywords differ only by enum type conversion from str.
@staticmethod
- def get_integ_alg_key_len(integ_alg):
+ def get_integ_alg_key_len(integ_alg: IntegAlg.InputType) -> int:
"""Return integrity algorithm key length.
:param integ_alg: Integrity algorithm.
- :type integ_alg: IntegAlg
+ :type integ_alg: IntegAlg.InputType
:returns: Key length.
:rtype: int
"""
- return integ_alg.key_len
+ return get_enum_instance(IntegAlg, integ_alg).key_len
@staticmethod
- def get_integ_alg_scapy_name(integ_alg):
+ def get_integ_alg_scapy_name(integ_alg: IntegAlg.InputType) -> str:
"""Return integrity algorithm scapy name.
:param integ_alg: Integrity algorithm.
- :type integ_alg: IntegAlg
+ :type integ_alg: IntegAlg.InputType
:returns: Algorithm scapy name.
:rtype: str
"""
- return integ_alg.scapy_name
-
- @staticmethod
- def ipsec_proto_esp():
- """Return IPSec protocol ESP.
-
- :returns: IPsecProto enum ESP object.
- :rtype: IPsecProto
- """
- return int(IPsecProto.IPSEC_API_PROTO_ESP)
+ return get_enum_instance(IntegAlg, integ_alg).scapy_name
@staticmethod
- def ipsec_proto_ah():
- """Return IPSec protocol AH.
-
- :returns: IPsecProto enum AH object.
- :rtype: IPsecProto
- """
- return int(IPsecProto.IPSEC_API_PROTO_AH)
-
- @staticmethod
- def vpp_ipsec_select_backend(node, protocol, index=1):
+ def vpp_ipsec_select_backend(
+ node: dict, proto: IPsecProto.InputType, index: int = 1
+ ) -> None:
"""Select IPsec backend.
:param node: VPP node to select IPsec backend on.
- :param protocol: IPsec protocol.
+ :param proto: IPsec protocol.
:param index: Backend index.
:type node: dict
- :type protocol: IPsecProto
+ :type proto: IPsecProto.InputType
:type index: int
:raises RuntimeError: If failed to select IPsec backend or if no API
reply received.
"""
- cmd = u"ipsec_select_backend"
- err_msg = f"Failed to select IPsec backend on host {node[u'host']}"
- args = dict(
- protocol=protocol,
- index=index
- )
+ proto = get_enum_instance(IPsecProto, proto)
+ cmd = "ipsec_select_backend"
+ err_msg = f"Failed to select IPsec backend on host {node['host']}"
+ args = dict(protocol=proto, index=index)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_ipsec_set_async_mode(node, async_enable=1):
+ def vpp_ipsec_set_async_mode(node: dict, async_enable: int = 1) -> None:
"""Set IPsec async mode on|off.
+ Unconditionally, attempt to switch crypto dispatch into polling mode.
+
:param node: VPP node to set IPsec async mode.
:param async_enable: Async mode on or off.
:type node: dict
@@ -312,18 +298,90 @@ class IPsecUtil:
:raises RuntimeError: If failed to set IPsec async mode or if no API
reply received.
"""
- cmd = u"ipsec_set_async_mode"
- err_msg = f"Failed to set IPsec async mode on host {node[u'host']}"
- args = dict(
- async_enable=async_enable
- )
with PapiSocketExecutor(node) as papi_exec:
+ cmd = "ipsec_set_async_mode"
+ err_msg = f"Failed to set IPsec async mode on host {node['host']}"
+ args = dict(async_enable=async_enable)
papi_exec.add(cmd, **args).get_reply(err_msg)
+ cmd = "crypto_set_async_dispatch_v2"
+ err_msg = "Failed to set dispatch mode."
+ args = dict(mode=0, adaptive=False)
+ try:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+ except (AttributeError, RuntimeError):
+ # Expected when VPP build does not have the _v2 yet
+ # (after and before the first CRC check).
+ # TODO: Fail here when testing of pre-23.10 builds is over.
+ pass
+
+ @staticmethod
+ def vpp_ipsec_crypto_sw_scheduler_set_worker(
+ node: dict, workers: Iterable[int], crypto_enable: bool = False
+ ) -> None:
+ """Enable or disable crypto on specific vpp worker threads.
+
+ :param node: VPP node to enable or disable crypto for worker threads.
+ :param workers: List of VPP thread numbers.
+ :param crypto_enable: Disable or enable crypto work.
+ :type node: dict
+ :type workers: Iterable[int]
+ :type crypto_enable: bool
+ :raises RuntimeError: If failed to enable or disable crypto for worker
+ thread or if no API reply received.
+ """
+ for worker in workers:
+ cmd = "crypto_sw_scheduler_set_worker"
+ err_msg = (
+ "Failed to disable/enable crypto for worker thread"
+ f" on host {node['host']}"
+ )
+ args = dict(worker_index=worker - 1, crypto_enable=crypto_enable)
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_ipsec_crypto_sw_scheduler_set_worker_on_all_duts(
+ nodes: dict, crypto_enable: bool = False
+ ) -> None:
+ """Enable or disable crypto on specific vpp worker threads.
+
+ :param node: VPP node to enable or disable crypto for worker threads.
+ :param crypto_enable: Disable or enable crypto work.
+ :type node: dict
+ :type crypto_enable: bool
+ :raises RuntimeError: If failed to enable or disable crypto for worker
+ thread or if no API reply received.
+ """
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
+ if not worker_cnt:
+ return
+ worker_ids = list()
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(","):
+ worker_ids.append(item.id)
+
+ IPsecUtil.vpp_ipsec_crypto_sw_scheduler_set_worker(
+ node, workers=worker_ids, crypto_enable=crypto_enable
+ )
@staticmethod
def vpp_ipsec_add_sad_entry(
- node, sad_id, spi, crypto_alg, crypto_key, integ_alg=None,
- integ_key=u"", tunnel_src=None, tunnel_dst=None):
+ node: dict,
+ sad_id: int,
+ spi: int,
+ crypto_alg: CryptoAlg.InputType = None,
+ crypto_key: str = "",
+ integ_alg: IntegAlg.InputType = None,
+ integ_key: str = "",
+ tunnel_src: Optional[str] = None,
+ tunnel_dst: Optional[str] = None,
+ ) -> None:
"""Create Security Association Database entry on the VPP node.
:param node: VPP node to add SAD entry on.
@@ -340,25 +398,21 @@ class IPsecUtil:
:type node: dict
:type sad_id: int
:type spi: int
- :type crypto_alg: CryptoAlg
+ :type crypto_alg: CryptoAlg.InputType
:type crypto_key: str
- :type integ_alg: IntegAlg
+ :type integ_alg: IntegAlg.InputType
:type integ_key: str
- :type tunnel_src: str
- :type tunnel_dst: str
+ :type tunnel_src: Optional[str]
+ :type tunnel_dst: Optional[str]
"""
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
if isinstance(crypto_key, str):
- crypto_key = crypto_key.encode(encoding=u"utf-8")
+ crypto_key = crypto_key.encode(encoding="utf-8")
if isinstance(integ_key, str):
- integ_key = integ_key.encode(encoding=u"utf-8")
- ckey = dict(
- length=len(crypto_key),
- data=crypto_key
- )
- ikey = dict(
- length=len(integ_key),
- data=integ_key if integ_key else 0
- )
+ integ_key = integ_key.encode(encoding="utf-8")
+ ckey = dict(length=len(crypto_key), data=crypto_key)
+ ikey = dict(length=len(integ_key), data=integ_key if integ_key else 0)
flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE)
if tunnel_src and tunnel_dst:
@@ -366,40 +420,58 @@ class IPsecUtil:
src_addr = ip_address(tunnel_src)
dst_addr = ip_address(tunnel_dst)
if src_addr.version == 6:
- flags = \
- flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6)
+ flags = flags | int(
+ IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
+ )
else:
- src_addr = u""
- dst_addr = u""
+ src_addr = ""
+ dst_addr = ""
- cmd = u"ipsec_sad_entry_add_del"
- err_msg = f"Failed to add Security Association Database entry " \
- f"on host {node[u'host']}"
+ cmd = "ipsec_sad_entry_add_v2"
+ err_msg = (
+ "Failed to add Security Association Database entry"
+ f" on host {node['host']}"
+ )
sad_entry = dict(
sad_id=int(sad_id),
spi=int(spi),
crypto_algorithm=crypto_alg.alg_int_repr,
crypto_key=ckey,
- integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
+ integrity_algorithm=integ_alg.alg_int_repr,
integrity_key=ikey,
flags=flags,
- tunnel_src=str(src_addr),
- tunnel_dst=str(dst_addr),
- protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
- udp_src_port=4500, # default value in api
- udp_dst_port=4500 # default value in api
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ tunnel=dict(
+ src=str(src_addr),
+ dst=str(dst_addr),
+ table_id=0,
+ encap_decap_flags=int(
+ TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ ),
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
+ ),
+ protocol=IPsecProto.ESP,
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_ipsec_add_sad_entries(
- node, n_entries, sad_id, spi, crypto_alg, crypto_key,
- integ_alg=None, integ_key=u"", tunnel_src=None, tunnel_dst=None):
+ node: dict,
+ n_entries: int,
+ sad_id: int,
+ spi: int,
+ crypto_alg: CryptoAlg.InputType = None,
+ crypto_key: str = "",
+ integ_alg: IntegAlg.InputType = None,
+ integ_key: str = "",
+ tunnel_src: Optional[str] = None,
+ tunnel_dst: Optional[str] = None,
+ tunnel_addr_incr: bool = True,
+ ) -> None:
"""Create multiple Security Association Database entries on VPP node.
:param node: VPP node to add SAD entry on.
@@ -416,63 +488,42 @@ class IPsecUtil:
specified ESP transport mode is used.
:param tunnel_dst: Tunnel header destination IPv4 or IPv6 address. If
not specified ESP transport mode is used.
+ :param tunnel_addr_incr: Enable or disable tunnel IP address
+ incremental step.
:type node: dict
:type n_entries: int
:type sad_id: int
:type spi: int
- :type crypto_alg: CryptoAlg
+ :type crypto_alg: CryptoAlg.InputType
:type crypto_key: str
- :type integ_alg: IntegAlg
+ :type integ_alg: IntegAlg.InputType
:type integ_key: str
- :type tunnel_src: str
- :type tunnel_dst: str
+ :type tunnel_src: Optional[str]
+ :type tunnel_dst: Optional[str]
+ :type tunnel_addr_incr: bool
"""
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
if isinstance(crypto_key, str):
- crypto_key = crypto_key.encode(encoding=u"utf-8")
+ crypto_key = crypto_key.encode(encoding="utf-8")
if isinstance(integ_key, str):
- integ_key = integ_key.encode(encoding=u"utf-8")
+ integ_key = integ_key.encode(encoding="utf-8")
if tunnel_src and tunnel_dst:
src_addr = ip_address(tunnel_src)
dst_addr = ip_address(tunnel_dst)
else:
- src_addr = u""
- dst_addr = u""
-
- addr_incr = 1 << (128 - 96) if src_addr.version == 6 \
- else 1 << (32 - 24)
+ src_addr = ""
+ dst_addr = ""
- if int(n_entries) > 10:
- tmp_filename = f"/tmp/ipsec_sad_{sad_id}_add_del_entry.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- for i in range(n_entries):
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {integ_key.hex()}" \
- if integ_alg else u""
- tunnel = f"tunnel-src {src_addr + i * addr_incr} " \
- f"tunnel-dst {dst_addr + i * addr_incr}" \
- if tunnel_src and tunnel_dst else u""
- conf = f"exec ipsec sa add {sad_id + i} esp spi {spi + i} "\
- f"crypto-alg {crypto_alg.alg_name} " \
- f"crypto-key {crypto_key.hex()} " \
- f"{integ} {tunnel}\n"
- tmp_file.write(conf)
- vat = VatExecutor()
- vat.execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
+ if tunnel_addr_incr:
+ addr_incr = (
+ 1 << (128 - 96) if src_addr.version == 6 else 1 << (32 - 24)
)
- os.remove(tmp_filename)
- return
+ else:
+ addr_incr = 0
- ckey = dict(
- length=len(crypto_key),
- data=crypto_key
- )
- ikey = dict(
- length=len(integ_key),
- data=integ_key if integ_key else 0
- )
+ ckey = dict(length=len(crypto_key), data=crypto_key)
+ ikey = dict(length=len(integ_key), data=integ_key if integ_key else 0)
flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE)
if tunnel_src and tunnel_dst:
@@ -482,44 +533,64 @@ class IPsecUtil:
IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
)
- cmd = u"ipsec_sad_entry_add_del"
- err_msg = f"Failed to add Security Association Database entry " \
- f"on host {node[u'host']}"
+ cmd = "ipsec_sad_entry_add_v2"
+ err_msg = (
+ "Failed to add Security Association Database entry"
+ f" on host {node['host']}"
+ )
sad_entry = dict(
sad_id=int(sad_id),
spi=int(spi),
crypto_algorithm=crypto_alg.alg_int_repr,
crypto_key=ckey,
- integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
+ integrity_algorithm=integ_alg.alg_int_repr,
integrity_key=ikey,
flags=flags,
- tunnel_src=str(src_addr),
- tunnel_dst=str(dst_addr),
- protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
- udp_src_port=4500, # default value in api
- udp_dst_port=4500 # default value in api
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ tunnel=dict(
+ src=str(src_addr),
+ dst=str(dst_addr),
+ table_id=0,
+ encap_decap_flags=int(
+ TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ ),
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
+ ),
+ protocol=IPsecProto.ESP,
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
- with PapiSocketExecutor(node) as papi_exec:
+ args = dict(entry=sad_entry)
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(n_entries):
- args[u"entry"][u"sad_id"] = int(sad_id) + i
- args[u"entry"][u"spi"] = int(spi) + i
- args[u"entry"][u"tunnel_src"] = str(src_addr + i * addr_incr) \
- if tunnel_src and tunnel_dst else src_addr
- args[u"entry"][u"tunnel_dst"] = str(dst_addr + i * addr_incr) \
- if tunnel_src and tunnel_dst else dst_addr
+ args["entry"]["sad_id"] = int(sad_id) + i
+ args["entry"]["spi"] = int(spi) + i
+ args["entry"]["tunnel"]["src"] = (
+ str(src_addr + i * addr_incr)
+ if tunnel_src and tunnel_dst
+ else src_addr
+ )
+ args["entry"]["tunnel"]["dst"] = (
+ str(dst_addr + i * addr_incr)
+ if tunnel_src and tunnel_dst
+ else dst_addr
+ )
history = bool(not 1 < i < n_entries - 2)
papi_exec.add(cmd, history=history, **args)
papi_exec.get_replies(err_msg)
@staticmethod
def vpp_ipsec_set_ip_route(
- node, n_tunnels, tunnel_src, traffic_addr, tunnel_dst, interface,
- raddr_range):
+ node: dict,
+ n_tunnels: int,
+ tunnel_src: str,
+ traffic_addr: str,
+ tunnel_dst: str,
+ interface: str,
+ raddr_range: int,
+ dst_mac: Optional[str] = None,
+ ) -> None:
"""Set IP address and route on interface.
:param node: VPP node to add config on.
@@ -531,6 +602,7 @@ class IPsecUtil:
:param raddr_range: Mask specifying range of Policy selector Remote IP
addresses. Valid values are from 1 to 32 in case of IPv4 and to 128
in case of IPv6.
+ :param dst_mac: The MAC address of destination tunnels.
:type node: dict
:type n_tunnels: int
:type tunnel_src: str
@@ -538,65 +610,80 @@ class IPsecUtil:
:type tunnel_dst: str
:type interface: str
:type raddr_range: int
+ :type dst_mac: Optional[str]
"""
tunnel_src = ip_address(tunnel_src)
tunnel_dst = ip_address(tunnel_dst)
traffic_addr = ip_address(traffic_addr)
- addr_incr = 1 << (128 - raddr_range) if tunnel_src.version == 6 \
+ tunnel_dst_prefix = 128 if tunnel_dst.version == 6 else 32
+ addr_incr = (
+ 1 << (128 - raddr_range)
+ if tunnel_src.version == 6
else 1 << (32 - raddr_range)
+ )
- if int(n_tunnels) > 10:
- tmp_filename = u"/tmp/ipsec_set_ip.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- if_name = Topology.get_interface_name(node, interface)
- for i in range(n_tunnels):
- conf = f"exec set interface ip address {if_name} " \
- f"{tunnel_src + i * addr_incr}/{raddr_range}\n" \
- f"exec ip route add {traffic_addr + i}/" \
- f"{128 if traffic_addr.version == 6 else 32} " \
- f"via {tunnel_dst + i * addr_incr} {if_name}\n"
- tmp_file.write(conf)
- VatExecutor().execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
- )
- os.remove(tmp_filename)
- return
-
- cmd1 = u"sw_interface_add_del_address"
+ cmd1 = "sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_add=True,
del_all=False,
- prefix=None
+ prefix=None,
)
- cmd2 = u"ip_route_add_del"
- args2 = dict(
- is_add=1,
- is_multipath=0,
- route=None
+ cmd2 = "ip_route_add_del"
+ args2 = dict(is_add=1, is_multipath=0, route=None)
+ cmd3 = "ip_neighbor_add_del"
+ args3 = dict(
+ is_add=True,
+ neighbor=dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface),
+ flags=0,
+ mac_address=str(dst_mac),
+ ip_address=None,
+ ),
+ )
+ err_msg = (
+ "Failed to configure IP addresses, IP routes and"
+ f" IP neighbor on interface {interface} on host {node['host']}"
+ if dst_mac
+ else "Failed to configure IP addresses and IP routes"
+ f" on interface {interface} on host {node['host']}"
)
- err_msg = f"Failed to configure IP addresses and IP routes " \
- f"on interface {interface} on host {node[u'host']}"
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(n_tunnels):
- args1[u"prefix"] = IPUtil.create_prefix_object(
+ tunnel_dst_addr = tunnel_dst + i * addr_incr
+ args1["prefix"] = IPUtil.create_prefix_object(
tunnel_src + i * addr_incr, raddr_range
)
- args2[u"route"] = IPUtil.compose_vpp_route_structure(
- node, traffic_addr + i,
- prefix_len=128 if traffic_addr.version == 6 else 32,
- interface=interface, gateway=tunnel_dst + i * addr_incr
+ args2["route"] = IPUtil.compose_vpp_route_structure(
+ node,
+ traffic_addr + i,
+ prefix_len=tunnel_dst_prefix,
+ interface=interface,
+ gateway=tunnel_dst_addr,
)
history = bool(not 1 < i < n_tunnels - 2)
- papi_exec.add(cmd1, history=history, **args1).\
- add(cmd2, history=history, **args2)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
+
+ args2["route"] = IPUtil.compose_vpp_route_structure(
+ node,
+ tunnel_dst_addr,
+ prefix_len=tunnel_dst_prefix,
+ interface=interface,
+ gateway=tunnel_dst_addr,
+ )
+ papi_exec.add(cmd2, history=history, **args2)
+
+ if dst_mac:
+ args3["neighbor"]["ip_address"] = ip_address(
+ tunnel_dst_addr
+ )
+ papi_exec.add(cmd3, history=history, **args3)
papi_exec.get_replies(err_msg)
@staticmethod
- def vpp_ipsec_add_spd(node, spd_id):
+ def vpp_ipsec_add_spd(node: dict, spd_id: int) -> None:
"""Create Security Policy Database on the VPP node.
:param node: VPP node to add SPD on.
@@ -604,18 +691,18 @@ class IPsecUtil:
:type node: dict
:type spd_id: int
"""
- cmd = u"ipsec_spd_add_del"
- err_msg = f"Failed to add Security Policy Database " \
- f"on host {node[u'host']}"
- args = dict(
- is_add=True,
- spd_id=int(spd_id)
+ cmd = "ipsec_spd_add_del"
+ err_msg = (
+ f"Failed to add Security Policy Database on host {node['host']}"
)
+ args = dict(is_add=True, spd_id=int(spd_id))
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_ipsec_spd_add_if(node, spd_id, interface):
+ def vpp_ipsec_spd_add_if(
+ node: dict, spd_id: int, interface: Union[str, int]
+ ) -> None:
"""Add interface to the Security Policy Database.
:param node: VPP node.
@@ -625,409 +712,427 @@ class IPsecUtil:
:type spd_id: int
:type interface: str or int
"""
- cmd = u"ipsec_interface_add_del_spd"
- err_msg = f"Failed to add interface {interface} to Security Policy " \
- f"Database {spd_id} on host {node[u'host']}"
+ cmd = "ipsec_interface_add_del_spd"
+ err_msg = (
+ f"Failed to add interface {interface} to Security Policy"
+ f" Database {spd_id} on host {node['host']}"
+ )
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- spd_id=int(spd_id)
+ spd_id=int(spd_id),
)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_ipsec_policy_add(
- node, spd_id, priority, action, inbound=True, sa_id=None,
- laddr_range=None, raddr_range=None, proto=None, lport_range=None,
- rport_range=None, is_ipv6=False):
- """Create Security Policy Database entry on the VPP node.
+ def vpp_ipsec_create_spds_match_nth_entry(
+ node: dict,
+ dir1_interface: Union[str, int],
+ dir2_interface: Union[str, int],
+ entry_amount: int,
+ local_addr_range: Union[str, IPv4Address, IPv6Address],
+ remote_addr_range: Union[str, IPv4Address, IPv6Address],
+ action: IpsecSpdAction.InputType = IpsecSpdAction.BYPASS,
+ inbound: bool = False,
+ bidirectional: bool = True,
+ ) -> None:
+ """Create one matching SPD entry for inbound or outbound traffic on
+ a DUT for each traffic direction and also create entry_amount - 1
+ non-matching SPD entries. Create a Security Policy Database on each
+ outbound interface where these entries will be configured.
+ The matching SPD entry will have the lowest priority, input action and
+ will be configured to match the IP flow. The non-matching entries will
+ be the same, except with higher priority and non-matching IP flows.
+
+ Action Protect is currently not supported.
+
+ :param node: VPP node to configured the SPDs and their entries.
+ :param dir1_interface: The interface in direction 1 where the entries
+ will be checked.
+ :param dir2_interface: The interface in direction 2 where the entries
+ will be checked.
+ :param entry_amount: The number of SPD entries to configure. If
+ entry_amount == 1, no non-matching entries will be configured.
+ :param local_addr_range: Matching local address range in direction 1
+ in format IP/prefix or IP/mask. If no mask is provided, it's
+ considered to be /32.
+ :param remote_addr_range: Matching remote address range in
+ direction 1 in format IP/prefix or IP/mask. If no mask is
+ provided, it's considered to be /32.
+ :param action: IPsec SPD action.
+ :param inbound: If True policy is for inbound traffic, otherwise
+ outbound.
+ :param bidirectional: When True, will create SPDs in both directions
+ of traffic. When False, only in one direction.
+ :type node: dict
+ :type dir1_interface: Union[str, int]
+ :type dir2_interface: Union[str, int]
+ :type entry_amount: int
+ :type local_addr_range:
+ Union[str, IPv4Address, IPv6Address]
+ :type remote_addr_range:
+ Union[str, IPv4Address, IPv6Address]
+ :type action: IpsecSpdAction.InputType
+ :type inbound: bool
+ :type bidirectional: bool
+ :raises NotImplementedError: When the action is IpsecSpdAction.PROTECT.
+ """
+ action = get_enum_instance(IpsecSpdAction, action)
+ if action == IpsecSpdAction.PROTECT:
+ raise NotImplementedError(
+ "IPsec SPD action PROTECT is not supported."
+ )
- :param node: VPP node to add SPD entry on.
+ spd_id_dir1 = 1
+ spd_id_dir2 = 2
+ matching_priority = 1
+
+ IPsecUtil.vpp_ipsec_add_spd(node, spd_id_dir1)
+ IPsecUtil.vpp_ipsec_spd_add_if(node, spd_id_dir1, dir1_interface)
+ # matching entry direction 1
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ node,
+ spd_id_dir1,
+ matching_priority,
+ action,
+ inbound=inbound,
+ laddr_range=local_addr_range,
+ raddr_range=remote_addr_range,
+ )
+
+ if bidirectional:
+ IPsecUtil.vpp_ipsec_add_spd(node, spd_id_dir2)
+ IPsecUtil.vpp_ipsec_spd_add_if(node, spd_id_dir2, dir2_interface)
+
+ # matching entry direction 2, the address ranges are switched
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ node,
+ spd_id_dir2,
+ matching_priority,
+ action,
+ inbound=inbound,
+ laddr_range=remote_addr_range,
+ raddr_range=local_addr_range,
+ )
+
+ # non-matching entries
+ no_match_entry_amount = entry_amount - 1
+ if no_match_entry_amount > 0:
+ # create a NetworkIncrement representation of the network,
+ # then skip the matching network
+ no_match_local_addr_range = NetworkIncrement(
+ ip_network(local_addr_range)
+ )
+ next(no_match_local_addr_range)
+
+ no_match_remote_addr_range = NetworkIncrement(
+ ip_network(remote_addr_range)
+ )
+ next(no_match_remote_addr_range)
+
+ # non-matching entries direction 1
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ node,
+ no_match_entry_amount,
+ spd_id_dir1,
+ ObjIncrement(matching_priority + 1, 1),
+ action,
+ inbound=inbound,
+ laddr_range=no_match_local_addr_range,
+ raddr_range=no_match_remote_addr_range,
+ )
+
+ if bidirectional:
+ # reset the networks so that we're using a unified config
+ # the address ranges are switched
+ no_match_remote_addr_range = NetworkIncrement(
+ ip_network(local_addr_range)
+ )
+ next(no_match_remote_addr_range)
+
+ no_match_local_addr_range = NetworkIncrement(
+ ip_network(remote_addr_range)
+ )
+ next(no_match_local_addr_range)
+ # non-matching entries direction 2
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ node,
+ no_match_entry_amount,
+ spd_id_dir2,
+ ObjIncrement(matching_priority + 1, 1),
+ action,
+ inbound=inbound,
+ laddr_range=no_match_local_addr_range,
+ raddr_range=no_match_remote_addr_range,
+ )
+
+ IPsecUtil.vpp_ipsec_show_all(node)
+
+ @staticmethod
+ def _vpp_ipsec_add_spd_entry_internal(
+ executor: PapiSocketExecutor,
+ spd_id: int,
+ priority: int,
+ action: IpsecSpdAction.InputType,
+ inbound: bool = True,
+ sa_id: Optional[int] = None,
+ proto: IPsecProto.InputType = None,
+ laddr_range: Optional[str] = None,
+ raddr_range: Optional[str] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
+ """Prepare to create Security Policy Database entry on the VPP node.
+
+ This just adds one more command to the executor.
+ The call site shall get replies once all entries are added,
+ to get speed benefit from async PAPI.
+
+ :param executor: Open PAPI executor (async handling) to add commands to.
:param spd_id: SPD ID to add entry on.
:param priority: SPD entry priority, higher number = higher priority.
- :param action: Policy action.
+ :param action: IPsec SPD action.
:param inbound: If True policy is for inbound traffic, otherwise
outbound.
- :param sa_id: SAD entry ID for protect action.
- :param laddr_range: Policy selector local IPv4 or IPv6 address range in
- format IP/prefix or IP/mask. If no mask is provided,
+ :param sa_id: SAD entry ID for action IpsecSpdAction.PROTECT.
+ :param proto: Policy selector next layer protocol number.
+ :param laddr_range: Policy selector local IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
it's considered to be /32.
- :param raddr_range: Policy selector remote IPv4 or IPv6 address range in
- format IP/prefix or IP/mask. If no mask is provided,
+ :param raddr_range: Policy selector remote IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
it's considered to be /32.
- :param proto: Policy selector next layer protocol number.
:param lport_range: Policy selector local TCP/UDP port range in format
<port_start>-<port_end>.
:param rport_range: Policy selector remote TCP/UDP port range in format
<port_start>-<port_end>.
:param is_ipv6: True in case of IPv6 policy when IPv6 address range is
not defined so it will default to address ::/0, otherwise False.
- :type node: dict
+ :type executor: PapiSocketExecutor
:type spd_id: int
:type priority: int
- :type action: PolicyAction
+ :type action: IpsecSpdAction.InputType
:type inbound: bool
- :type sa_id: int
- :type laddr_range: string
- :type raddr_range: string
- :type proto: int
- :type lport_range: string
- :type rport_range: string
+ :type sa_id: Optional[int]
+ :type proto: IPsecProto.InputType
+ :type laddr_range: Optional[str]
+ :type raddr_range: Optional[str]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
:type is_ipv6: bool
"""
+ action = get_enum_instance(IpsecSpdAction, action)
+ proto = get_enum_instance(IPsecProto, proto)
if laddr_range is None:
- laddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ laddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
if raddr_range is None:
- raddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ raddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
+
+ local_net = ip_network(laddr_range, strict=False)
+ remote_net = ip_network(raddr_range, strict=False)
- cmd = u"ipsec_spd_entry_add_del"
- err_msg = f"Failed to add entry to Security Policy Database {spd_id} " \
- f"on host {node[u'host']}"
+ cmd = "ipsec_spd_entry_add_del_v2"
spd_entry = dict(
spd_id=int(spd_id),
priority=int(priority),
is_outbound=not inbound,
sa_id=int(sa_id) if sa_id else 0,
- policy=action.policy_int_repr,
- protocol=int(proto) if proto else 0,
+ policy=int(action),
+ protocol=proto,
remote_address_start=IPAddress.create_ip_address_object(
- ip_network(raddr_range, strict=False).network_address
+ remote_net.network_address
),
remote_address_stop=IPAddress.create_ip_address_object(
- ip_network(raddr_range, strict=False).broadcast_address
+ remote_net.broadcast_address
),
local_address_start=IPAddress.create_ip_address_object(
- ip_network(laddr_range, strict=False).network_address
+ local_net.network_address
),
local_address_stop=IPAddress.create_ip_address_object(
- ip_network(laddr_range, strict=False).broadcast_address
+ local_net.broadcast_address
+ ),
+ remote_port_start=(
+ int(rport_range.split("-")[0]) if rport_range else 0
+ ),
+ remote_port_stop=(
+ int(rport_range.split("-")[1]) if rport_range else 65535
+ ),
+ local_port_start=(
+ int(lport_range.split("-")[0]) if lport_range else 0
+ ),
+ local_port_stop=(
+ int(lport_range.split("-")[1]) if rport_range else 65535
),
- remote_port_start=int(rport_range.split(u"-")[0]) if rport_range
- else 0,
- remote_port_stop=int(rport_range.split(u"-")[1]) if rport_range
- else 65535,
- local_port_start=int(lport_range.split(u"-")[0]) if lport_range
- else 0,
- local_port_stop=int(lport_range.split(u"-")[1]) if rport_range
- else 65535
- )
- args = dict(
- is_add=True,
- entry=spd_entry
)
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
+ args = dict(is_add=True, entry=spd_entry)
+ executor.add(cmd, **args)
@staticmethod
- def vpp_ipsec_spd_add_entries(
- node, n_entries, spd_id, priority, inbound, sa_id, raddr_ip,
- raddr_range=0):
- """Create multiple Security Policy Database entries on the VPP node.
+ def vpp_ipsec_add_spd_entry(
+ node: dict,
+ spd_id: int,
+ priority: int,
+ action: IpsecSpdAction.InputType,
+ inbound: bool = True,
+ sa_id: Optional[int] = None,
+ proto: IPsecProto.InputType = None,
+ laddr_range: Optional[str] = None,
+ raddr_range: Optional[str] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
+ """Create Security Policy Database entry on the VPP node.
- :param node: VPP node to add SPD entries on.
- :param n_entries: Number of SPD entries to be added.
- :param spd_id: SPD ID to add entries on.
- :param priority: SPD entries priority, higher number = higher priority.
+ :param node: VPP node to add SPD entry on.
+ :param spd_id: SPD ID to add entry on.
+ :param priority: SPD entry priority, higher number = higher priority.
+ :param action: IPsec SPD action.
:param inbound: If True policy is for inbound traffic, otherwise
outbound.
- :param sa_id: SAD entry ID for first entry. Each subsequent entry will
- SAD entry ID incremented by 1.
- :param raddr_ip: Policy selector remote IPv4 start address for the first
- entry. Remote IPv4 end address will be calculated depending on
- raddr_range parameter. Each subsequent entry will have start address
- next after IPv4 end address of previous entry.
- :param raddr_range: Required IP addres range.
+ :param sa_id: SAD entry ID for action IpsecSpdAction.PROTECT.
+ :param proto: Policy selector next layer protocol number.
+ :param laddr_range: Policy selector local IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param raddr_range: Policy selector remote IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param lport_range: Policy selector local TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param rport_range: Policy selector remote TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param is_ipv6: True in case of IPv6 policy when IPv6 address range is
+ not defined so it will default to address ::/0, otherwise False.
:type node: dict
- :type n_entries: int
:type spd_id: int
:type priority: int
+ :type action: IpsecSpdAction.InputType
:type inbound: bool
- :type sa_id: int
- :type raddr_ip: str
- :type raddr_range: int
+ :type sa_id: Optional[int]
+ :type proto: IPsecProto.InputType
+ :type laddr_range: Optional[str]
+ :type raddr_range: Optional[str]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
+ :type is_ipv6: bool
"""
- raddr_ip = ip_address(raddr_ip)
- if int(n_entries) > 10:
- tmp_filename = f"/tmp/ipsec_spd_{sa_id}_add_del_entry.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- for i in range(n_entries):
- direction = u'inbound' if inbound else u'outbound'
- tunnel = f"exec ipsec policy add spd {spd_id} " \
- f"priority {priority} {direction} " \
- f"action protect sa {sa_id+i} " \
- f"remote-ip-range {raddr_ip + i * (raddr_range + 1)} " \
- f"- {raddr_ip + (i + 1) * raddr_range + i} " \
- f"local-ip-range 0.0.0.0 - 255.255.255.255\n"
- tmp_file.write(tunnel)
- VatExecutor().execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
- )
- os.remove(tmp_filename)
- return
-
- laddr_range = u"::/0" if raddr_ip.version == 6 else u"0.0.0.0/0"
-
- cmd = u"ipsec_spd_entry_add_del"
- err_msg = f"ailed to add entry to Security Policy Database '{spd_id} " \
- f"on host {node[u'host']}"
-
- spd_entry = dict(
- spd_id=int(spd_id),
- priority=int(priority),
- is_outbound=not inbound,
- sa_id=int(sa_id) if sa_id else 0,
- policy=getattr(PolicyAction.PROTECT, u"policy_int_repr"),
- protocol=0,
- remote_address_start=IPAddress.create_ip_address_object(raddr_ip),
- remote_address_stop=IPAddress.create_ip_address_object(raddr_ip),
- local_address_start=IPAddress.create_ip_address_object(
- ip_network(laddr_range, strict=False).network_address
- ),
- local_address_stop=IPAddress.create_ip_address_object(
- ip_network(laddr_range, strict=False).broadcast_address
- ),
- remote_port_start=0,
- remote_port_stop=65535,
- local_port_start=0,
- local_port_stop=65535
- )
- args = dict(
- is_add=True,
- entry=spd_entry
+ action = get_enum_instance(IpsecSpdAction, action)
+ proto = get_enum_instance(IPsecProto, proto)
+ err_msg = (
+ "Failed to add entry to Security Policy Database"
+ f" {spd_id} on host {node['host']}"
)
-
- with PapiSocketExecutor(node) as papi_exec:
- for i in range(n_entries):
- args[u"entry"][u"remote_address_start"][u"un"] = \
- IPAddress.union_addr(raddr_ip + i)
- args[u"entry"][u"remote_address_stop"][u"un"] = \
- IPAddress.union_addr(raddr_ip + i)
- history = bool(not 1 < i < n_entries - 2)
- papi_exec.add(cmd, history=history, **args)
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
+ IPsecUtil._vpp_ipsec_add_spd_entry_internal(
+ papi_exec,
+ spd_id,
+ priority,
+ action,
+ inbound,
+ sa_id,
+ proto,
+ laddr_range,
+ raddr_range,
+ lport_range,
+ rport_range,
+ is_ipv6,
+ )
papi_exec.get_replies(err_msg)
@staticmethod
- def _ipsec_create_tunnel_interfaces_dut1_vat(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg, integ_alg,
- raddr_ip2, addr_incr, spi_d, existing_tunnels=0):
- """Create multiple IPsec tunnel interfaces on DUT1 node using VAT.
+ def vpp_ipsec_add_spd_entries(
+ node: dict,
+ n_entries: int,
+ spd_id: int,
+ priority: Optional[ObjIncrement],
+ action: IpsecSpdAction.InputType,
+ inbound: bool,
+ sa_id: Optional[ObjIncrement] = None,
+ proto: IPsecProto.InputType = None,
+ laddr_range: Optional[NetworkIncrement] = None,
+ raddr_range: Optional[NetworkIncrement] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
+ """Create multiple Security Policy Database entries on the VPP node.
- :param nodes: VPP nodes to create tunnel interfaces.
- :param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
- IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
- IPv4/IPv6 address (ip2).
- :param if1_key: VPP node 1 interface key from topology file.
- :param if2_key: VPP node 2 / TG node (in case of 2-node topology)
- interface key from topology file.
- :param n_tunnels: Number of tunnel interfaces to be there at the end.
- :param crypto_alg: The encryption algorithm name.
- :param integ_alg: The integrity algorithm name.
- :param raddr_ip2: Policy selector remote IPv4/IPv6 start address for the
- first tunnel in direction node2->node1.
- :param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
- :param addr_incr: IP / IPv6 address incremental step.
- :param existing_tunnels: Number of tunnel interfaces before creation.
- Useful mainly for reconf tests. Default 0.
- :type nodes: dict
- :type tun_ips: dict
- :type if1_key: str
- :type if2_key: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: IntegAlg
- :type raddr_ip2: IPv4Address or IPv6Address
- :type addr_incr: int
- :type spi_d: dict
- :type existing_tunnels: int
+ :param node: VPP node to add SPD entries on.
+ :param n_entries: Number of SPD entries to be added.
+ :param spd_id: SPD ID to add entries on.
+ :param priority: SPD entries priority, higher number = higher priority.
+ :param action: IPsec SPD action.
+ :param inbound: If True policy is for inbound traffic, otherwise
+ outbound.
+ :param sa_id: SAD entry ID for action IpsecSpdAction.PROTECT.
+ :param proto: Policy selector next layer protocol number.
+ :param laddr_range: Policy selector local IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param raddr_range: Policy selector remote IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param lport_range: Policy selector local TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param rport_range: Policy selector remote TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param is_ipv6: True in case of IPv6 policy when IPv6 address range is
+ not defined so it will default to address ::/0, otherwise False.
+ :type node: dict
+ :type n_entries: int
+ :type spd_id: int
+ :type priority: Optional[ObjIncrement]
+ :type action: IpsecSpdAction.InputType
+ :type inbound: bool
+ :type sa_id: Optional[ObjIncrement]
+ :type proto: IPsecProto.InputType
+ :type laddr_range: Optional[NetworkIncrement]
+ :type raddr_range: Optional[NetworkIncrement]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
+ :type is_ipv6: bool
"""
- tmp_fn1 = u"/tmp/ipsec_create_tunnel_dut1.config"
- if1_n = Topology.get_interface_name(nodes[u"DUT1"], if1_key)
-
- ckeys = [bytes()] * existing_tunnels
- ikeys = [bytes()] * existing_tunnels
-
- vat = VatExecutor()
- with open(tmp_fn1, u"w") as tmp_f1:
- rmac = Topology.get_interface_mac(nodes[u"DUT2"], if2_key) \
- if u"DUT2" in nodes.keys() \
- else Topology.get_interface_mac(nodes[u"TG"], if2_key)
- if not existing_tunnels:
- tmp_f1.write(
- f"exec create loopback interface\n"
- f"exec set interface state loop0 up\n"
- f"exec set interface ip address {if1_n} "
- f"{tun_ips[u'ip2'] - 1}/"
- f"{len(tun_ips[u'ip2'].packed)*8*3//4}\n"
- f"exec set ip neighbor {if1_n} {tun_ips[u'ip2']} {rmac} "
- f"static\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- ckeys.append(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
- )
- if integ_alg:
- ikeys.append(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
- )
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {ikeys[i].hex()} "
- else:
- integ = u""
- tmp_f1.write(
- f"exec set interface ip address loop0 "
- f"{tun_ips[u'ip1'] + i * addr_incr}/32\n"
- f"exec create ipip tunnel "
- f"src {tun_ips[u'ip1'] + i * addr_incr} "
- f"dst {tun_ips[u'ip2']} "
- f"p2p\n"
- f"exec ipsec sa add {i} "
- f"spi {spi_d[u'spi_1'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec sa add {100000 + i} "
- f"spi {spi_d[u'spi_2'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec tunnel protect ipip{i} "
- f"sa-out {i} "
- f"sa-in {100000 + i} "
- f"add\n"
- )
- vat.execute_script(
- tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn1)
-
- with open(tmp_fn1, 'w') as tmp_f1:
- for i in range(existing_tunnels, n_tunnels):
- tmp_f1.write(
- f"exec set interface unnumbered ipip{i} use {if1_n}\n"
- f"exec set interface state ipip{i} up\n"
- f"exec ip route add "
- f"{raddr_ip2 + i}/{len(raddr_ip2.packed)*8} "
- f"via ipip{i}\n"
- )
- vat.execute_script(
- tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn1)
-
- return ckeys, ikeys
-
- @staticmethod
- def _ipsec_create_tunnel_interfaces_dut2_vat(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys, integ_alg,
- ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels=0):
- """Create multiple IPsec tunnel interfaces on DUT2 node using VAT.
+ action = get_enum_instance(IpsecSpdAction, action)
+ proto = get_enum_instance(IPsecProto, proto)
+ if laddr_range is None:
+ laddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
+ laddr_range = NetworkIncrement(ip_network(laddr_range), 0)
- :param nodes: VPP nodes to create tunnel interfaces.
- :param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
- IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
- IPv4/IPv6 address (ip2).
- :param if2_key: VPP node 2 / TG node (in case of 2-node topology)
- interface key from topology file.
- :param n_tunnels: Number of tunnel interfaces to be there at the end.
- :param crypto_alg: The encryption algorithm name.
- :param ckeys: List of encryption keys.
- :param integ_alg: The integrity algorithm name.
- :param ikeys: List of integrity keys.
- :param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
- :param addr_incr: IP / IPv6 address incremental step.
- :param existing_tunnels: Number of tunnel interfaces before creation.
- Useful mainly for reconf tests. Default 0.
- :type nodes: dict
- :type tun_ips: dict
- :type if2_key: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type ckeys: list
- :type integ_alg: IntegAlg
- :type ikeys: list
- :type addr_incr: int
- :type spi_d: dict
- :type existing_tunnels: int
- """
- tmp_fn2 = u"/tmp/ipsec_create_tunnel_dut2.config"
- if2_n = Topology.get_interface_name(nodes[u"DUT2"], if2_key)
+ if raddr_range is None:
+ raddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
+ raddr_range = NetworkIncrement(ip_network(raddr_range), 0)
- vat = VatExecutor()
- with open(tmp_fn2, 'w') as tmp_f2:
- if not existing_tunnels:
- tmp_f2.write(
- f"exec set interface ip address {if2_n}"
- f" {tun_ips[u'ip2']}/{len(tun_ips[u'ip2'].packed)*8*3/4}\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- if integ_alg:
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {ikeys[i].hex()} "
- else:
- integ = u""
- tmp_f2.write(
- f"exec create ipip tunnel "
- f"src {tun_ips[u'ip2']} "
- f"dst {tun_ips[u'ip1'] + i * addr_incr} "
- f"p2p\n"
- f"exec ipsec sa add {100000 + i} "
- f"spi {spi_d[u'spi_2'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec sa add {i} "
- f"spi {spi_d[u'spi_1'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec tunnel protect ipip{i} "
- f"sa-out {100000 + i} "
- f"sa-in {i} "
- f"add\n"
- )
- vat.execute_script(
- tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
+ err_msg = (
+ "Failed to add entry to Security Policy Database"
+ f" {spd_id} on host {node['host']}"
)
- os.remove(tmp_fn2)
-
- with open(tmp_fn2, 'w') as tmp_f2:
- if not existing_tunnels:
- tmp_f2.write(
- f"exec ip route add {tun_ips[u'ip1']}/8 "
- f"via {tun_ips[u'ip2'] - 1} {if2_n}\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- tmp_f2.write(
- f"exec set interface unnumbered ipip{i} use {if2_n}\n"
- f"exec set interface state ipip{i} up\n"
- f"exec ip route add "
- f"{raddr_ip1 + i}/{len(raddr_ip1.packed)*8} "
- f"via ipip{i}\n"
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
+ for _ in range(n_entries):
+ IPsecUtil._vpp_ipsec_add_spd_entry_internal(
+ papi_exec,
+ spd_id,
+ next(priority),
+ action,
+ inbound,
+ next(sa_id) if sa_id is not None else sa_id,
+ proto,
+ next(laddr_range),
+ next(raddr_range),
+ lport_range,
+ rport_range,
+ is_ipv6,
)
- vat.execute_script(
- tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn2)
+ papi_exec.get_replies(err_msg)
@staticmethod
- def _ipsec_create_loopback_dut1_papi(nodes, tun_ips, if1_key, if2_key):
+ def _ipsec_create_loopback_dut1_papi(
+ nodes: dict, tun_ips: dict, if1_key: str, if2_key: str
+ ) -> int:
"""Create loopback interface and set IP address on VPP node 1 interface
using PAPI.
@@ -1042,58 +1147,66 @@ class IPsecUtil:
:type tun_ips: dict
:type if1_key: str
:type if2_key: str
+ :returns: sw_if_idx Of the created loopback interface.
+ :rtype: int
"""
- with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec:
+ with PapiSocketExecutor(nodes["DUT1"]) as papi_exec:
# Create loopback interface on DUT1, set it to up state
- cmd = u"create_loopback"
+ cmd = "create_loopback_instance"
args = dict(
- mac_address=0
+ mac_address=0,
+ is_specified=False,
+ user_instance=0,
)
- err_msg = f"Failed to create loopback interface " \
- f"on host {nodes[u'DUT1'][u'host']}"
- loop_sw_if_idx = papi_exec.add(cmd, **args). \
- get_sw_if_index(err_msg)
- cmd = u"sw_interface_set_flags"
+ err_msg = (
+ "Failed to create loopback interface"
+ f" on host {nodes['DUT1']['host']}"
+ )
+ papi_exec.add(cmd, **args)
+ loop_sw_if_idx = papi_exec.get_sw_if_index(err_msg)
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=loop_sw_if_idx,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
+ )
+ err_msg = (
+ "Failed to set loopback interface state up"
+ f" on host {nodes['DUT1']['host']}"
)
- err_msg = f"Failed to set loopback interface state up " \
- f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd, **args).get_reply(err_msg)
# Set IP address on VPP node 1 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- tun_ips[u"ip2"] - 1, 96 if tun_ips[u"ip2"].version == 6
- else 24
- )
+ tun_ips["ip2"] - 1,
+ 96 if tun_ips["ip2"].version == 6 else 24,
+ ),
+ )
+ err_msg = (
+ f"Failed to set IP address on interface {if1_key}"
+ f" on host {nodes['DUT1']['host']}"
)
- err_msg = f"Failed to set IP address on interface {if1_key} " \
- f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd, **args).get_reply(err_msg)
- cmd2 = u"ip_neighbor_add_del"
+ cmd2 = "ip_neighbor_add_del"
args2 = dict(
is_add=1,
neighbor=dict(
sw_if_index=Topology.get_interface_sw_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
flags=1,
mac_address=str(
- Topology.get_interface_mac(nodes[u"DUT2"], if2_key)
- if u"DUT2" in nodes.keys()
- else Topology.get_interface_mac(
- nodes[u"TG"], if2_key
- )
+ Topology.get_interface_mac(nodes["DUT2"], if2_key)
+ if "DUT2" in nodes.keys()
+ else Topology.get_interface_mac(nodes["TG"], if2_key)
),
- ip_address=tun_ips[u"ip2"].compressed
- )
+ ip_address=tun_ips["ip2"].compressed,
+ ),
)
err_msg = f"Failed to add IP neighbor on interface {if1_key}"
papi_exec.add(cmd2, **args2).get_reply(err_msg)
@@ -1102,10 +1215,22 @@ class IPsecUtil:
@staticmethod
def _ipsec_create_tunnel_interfaces_dut1_papi(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg, integ_alg,
- raddr_ip2, addr_incr, spi_d, existing_tunnels=0):
+ nodes: dict,
+ tun_ips: dict,
+ if1_key: str,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg.InputType,
+ integ_alg: IntegAlg.InputType,
+ raddr_ip2: Union[IPv4Address, IPv6Address],
+ addr_incr: int,
+ spi_d: dict,
+ existing_tunnels: int = 0,
+ ) -> Tuple[List[bytes], List[bytes]]:
"""Create multiple IPsec tunnel interfaces on DUT1 node using PAPI.
+ Generate random keys and return them (so DUT2 or TG can decrypt).
+
:param nodes: VPP nodes to create tunnel interfaces.
:param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
@@ -1127,40 +1252,44 @@ class IPsecUtil:
:type if1_key: str
:type if2_key: str
:type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: IntegAlg
- :type raddr_ip2: IPv4Address or IPv6Address
+ :type crypto_alg: CryptoAlg.InputType
+ :type integ_alg: IntegAlg.InputType
+ :type raddr_ip2: Union[IPv4Address, IPv6Address]
:type addr_incr: int
:type spi_d: dict
:type existing_tunnels: int
+ :returns: Generated ckeys and ikeys.
+ :rtype: List[bytes], List[bytes]
"""
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
if not existing_tunnels:
loop_sw_if_idx = IPsecUtil._ipsec_create_loopback_dut1_papi(
nodes, tun_ips, if1_key, if2_key
)
else:
loop_sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(
- nodes[u"DUT1"], u"loop0"
+ nodes["DUT1"], "loop0"
)
- with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec:
+ with PapiSocketExecutor(nodes["DUT1"], is_async=True) as papi_exec:
# Configure IP addresses on loop0 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=loop_sw_if_idx,
is_add=True,
del_all=False,
- prefix=None
+ prefix=None,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"prefix"] = IPUtil.create_prefix_object(
- tun_ips[u"ip1"] + i * addr_incr,
- 128 if tun_ips[u"ip1"].version == 6 else 32
+ args["prefix"] = IPUtil.create_prefix_object(
+ tun_ips["ip1"] + i * addr_incr,
+ 128 if tun_ips["ip1"].version == 6 else 32,
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IPIP tunnel interfaces
- cmd = u"ipip_add_tunnel"
+ cmd = "ipip_add_tunnel"
ipip_tunnel = dict(
instance=Constants.BITWISE_NON_ZERO,
src=None,
@@ -1170,190 +1299,183 @@ class IPsecUtil:
TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
),
mode=int(TunnelMode.TUNNEL_API_MODE_P2P),
- dscp=int(IpDscp.IP_API_DSCP_CS0)
- )
- args = dict(
- tunnel=ipip_tunnel
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
)
+ args = dict(tunnel=ipip_tunnel)
ipip_tunnels = [None] * existing_tunnels
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"src"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip1"] + i * addr_incr
+ ipip_tunnel["src"] = IPAddress.create_ip_address_object(
+ tun_ips["ip1"] + i * addr_incr
)
- args[u"tunnel"][u"dst"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip2"]
+ ipip_tunnel["dst"] = IPAddress.create_ip_address_object(
+ tun_ips["ip2"]
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPIP tunnel interfaces on host" \
- f" {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add IPIP tunnel interfaces on host"
+ f" {nodes['DUT1']['host']}"
+ )
ipip_tunnels.extend(
[
- reply[u"sw_if_index"]
+ reply["sw_if_index"]
for reply in papi_exec.get_replies(err_msg)
- if u"sw_if_index" in reply
+ if "sw_if_index" in reply
]
)
# Configure IPSec SAD entries
ckeys = [bytes()] * existing_tunnels
ikeys = [bytes()] * existing_tunnels
- cmd = u"ipsec_sad_entry_add_del_v2"
- c_key = dict(
- length=0,
- data=None
- )
- i_key = dict(
- length=0,
- data=None
- )
+ cmd = "ipsec_sad_entry_add_v2"
+ c_key = dict(length=0, data=None)
+ i_key = dict(length=0, data=None)
+ common_flags = IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
sad_entry = dict(
sad_id=None,
spi=None,
- protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
+ protocol=IPsecProto.ESP,
crypto_algorithm=crypto_alg.alg_int_repr,
crypto_key=c_key,
- integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
+ integrity_algorithm=integ_alg.alg_int_repr,
integrity_key=i_key,
- flags=None,
- tunnel_src=0,
- tunnel_dst=0,
- tunnel_flags=int(
- TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ flags=common_flags,
+ tunnel=dict(
+ src=0,
+ dst=0,
+ table_id=0,
+ encap_decap_flags=int(
+ TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ ),
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
),
- dscp=int(IpDscp.IP_API_DSCP_CS0),
- table_id=0,
salt=0,
- udp_src_port=IPSEC_UDP_PORT_NONE,
- udp_dst_port=IPSEC_UDP_PORT_NONE
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
for i in range(existing_tunnels, n_tunnels):
- ckeys.append(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
- )
- if integ_alg:
- ikeys.append(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
- )
+ ckeys.append(gen_key(crypto_alg.key_len))
+ ikeys.append(gen_key(integ_alg.key_len))
# SAD entry for outband / tx path
- args[u"entry"][u"sad_id"] = i
- args[u"entry"][u"spi"] = spi_d[u"spi_1"] + i
+ sad_entry["sad_id"] = i
+ sad_entry["spi"] = spi_d["spi_1"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
+ sad_entry["flags"] |= IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
+ for i in range(existing_tunnels, n_tunnels):
# SAD entry for inband / rx path
- args[u"entry"][u"sad_id"] = 100000 + i
- args[u"entry"][u"spi"] = spi_d[u"spi_2"] + i
+ sad_entry["sad_id"] = 100000 + i
+ sad_entry["spi"] = spi_d["spi_2"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE |
- IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPsec SAD entries on host" \
- f" {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add IPsec SAD entries on host"
+ f" {nodes['DUT1']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Add protection for tunnels with IPSEC
- cmd = u"ipsec_tunnel_protect_update"
+ cmd = "ipsec_tunnel_protect_update"
n_hop = dict(
address=0,
via_label=MPLS_LABEL_INVALID,
- obj_id=Constants.BITWISE_NON_ZERO
+ obj_id=Constants.BITWISE_NON_ZERO,
)
ipsec_tunnel_protect = dict(
- sw_if_index=None,
- nh=n_hop,
- sa_out=None,
- n_sa_in=1,
- sa_in=None
- )
- args = dict(
- tunnel=ipsec_tunnel_protect
+ sw_if_index=None, nh=n_hop, sa_out=None, n_sa_in=1, sa_in=None
)
+ args = dict(tunnel=ipsec_tunnel_protect)
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"sw_if_index"] = ipip_tunnels[i]
- args[u"tunnel"][u"sa_out"] = i
- args[u"tunnel"][u"sa_in"] = [100000 + i]
+ args["tunnel"]["sw_if_index"] = ipip_tunnels[i]
+ args["tunnel"]["sa_out"] = i
+ args["tunnel"]["sa_in"] = [100000 + i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add protection for tunnels with IPSEC " \
- f"on host {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add protection for tunnels with IPSEC"
+ f" on host {nodes['DUT1']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Configure unnumbered interfaces
- cmd = u"sw_interface_set_unnumbered"
+ cmd = "sw_interface_set_unnumbered"
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
- unnumbered_sw_if_index=0
+ unnumbered_sw_if_index=0,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"unnumbered_sw_if_index"] = ipip_tunnels[i]
+ args["unnumbered_sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Set interfaces up
- cmd = u"sw_interface_set_flags"
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"sw_if_index"] = ipip_tunnels[i]
+ args["sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IP routes
- cmd = u"ip_route_add_del"
- args = dict(
- is_add=1,
- is_multipath=0,
- route=None
- )
+ cmd = "ip_route_add_del"
+ args = dict(is_add=1, is_multipath=0, route=None)
for i in range(existing_tunnels, n_tunnels):
- args[u"route"] = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT1"], (raddr_ip2 + i).compressed,
+ args["route"] = IPUtil.compose_vpp_route_structure(
+ nodes["DUT1"],
+ (raddr_ip2 + i).compressed,
prefix_len=128 if raddr_ip2.version == 6 else 32,
- interface=ipip_tunnels[i]
+ interface=ipip_tunnels[i],
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IP routes on host " \
- f"{nodes[u'DUT1'][u'host']}"
+ err_msg = f"Failed to add IP routes on host {nodes['DUT1']['host']}"
papi_exec.get_replies(err_msg)
return ckeys, ikeys
@staticmethod
def _ipsec_create_tunnel_interfaces_dut2_papi(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys, integ_alg,
- ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels=0):
+ nodes: dict,
+ tun_ips: dict,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg.InputType,
+ ckeys: Sequence[bytes],
+ integ_alg: IntegAlg.InputType,
+ ikeys: Sequence[bytes],
+ raddr_ip1: Union[IPv4Address, IPv6Address],
+ addr_incr: int,
+ spi_d: dict,
+ existing_tunnels: int = 0,
+ ) -> None:
"""Create multiple IPsec tunnel interfaces on DUT2 node using PAPI.
+ This method accesses keys generated by DUT1 method
+ and does not return anything.
+
:param nodes: VPP nodes to create tunnel interfaces.
:param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
@@ -1365,6 +1487,8 @@ class IPsecUtil:
:param ckeys: List of encryption keys.
:param integ_alg: The integrity algorithm name.
:param ikeys: List of integrity keys.
+ :param raddr_ip1: Policy selector remote IPv4/IPv6 start address for the
+ first tunnel in direction node1->node2.
:param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
:param addr_incr: IP / IPv6 address incremental step.
:param existing_tunnels: Number of tunnel interfaces before creation.
@@ -1373,34 +1497,39 @@ class IPsecUtil:
:type tun_ips: dict
:type if2_key: str
:type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type ckeys: list
- :type integ_alg: IntegAlg
- :type ikeys: list
+ :type crypto_alg: CryptoAlg.InputType
+ :type ckeys: Sequence[bytes]
+ :type integ_alg: IntegAlg.InputType
+ :type ikeys: Sequence[bytes]
+ :type raddr_ip1: Union[IPv4Address, IPv6Address]
:type addr_incr: int
:type spi_d: dict
:type existing_tunnels: int
"""
- with PapiSocketExecutor(nodes[u"DUT2"]) as papi_exec:
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
+ with PapiSocketExecutor(nodes["DUT2"], is_async=True) as papi_exec:
if not existing_tunnels:
# Set IP address on VPP node 2 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT2"], if2_key
+ nodes["DUT2"], if2_key
),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- tun_ips[u"ip2"], 96 if tun_ips[u"ip2"].version == 6
- else 24
- )
+ tun_ips["ip2"],
+ 96 if tun_ips["ip2"].version == 6 else 24,
+ ),
)
- err_msg = f"Failed to set IP address on interface {if2_key} " \
- f"on host {nodes[u'DUT2'][u'host']}"
- papi_exec.add(cmd, **args).get_reply(err_msg)
+ err_msg = (
+ f"Failed to set IP address on interface {if2_key}"
+ f" on host {nodes['DUT2']['host']}"
+ )
+ papi_exec.add(cmd, **args).get_replies(err_msg)
# Configure IPIP tunnel interfaces
- cmd = u"ipip_add_tunnel"
+ cmd = "ipip_add_tunnel"
ipip_tunnel = dict(
instance=Constants.BITWISE_NON_ZERO,
src=None,
@@ -1410,204 +1539,193 @@ class IPsecUtil:
TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
),
mode=int(TunnelMode.TUNNEL_API_MODE_P2P),
- dscp=int(IpDscp.IP_API_DSCP_CS0)
- )
- args = dict(
- tunnel=ipip_tunnel
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
)
+ args = dict(tunnel=ipip_tunnel)
ipip_tunnels = [None] * existing_tunnels
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"src"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip2"]
+ ipip_tunnel["src"] = IPAddress.create_ip_address_object(
+ tun_ips["ip2"]
)
- args[u"tunnel"][u"dst"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip1"] + i * addr_incr
+ ipip_tunnel["dst"] = IPAddress.create_ip_address_object(
+ tun_ips["ip1"] + i * addr_incr
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPIP tunnel interfaces on host" \
- f" {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ "Failed to add IPIP tunnel interfaces on host"
+ f" {nodes['DUT2']['host']}"
+ )
ipip_tunnels.extend(
[
- reply[u"sw_if_index"]
+ reply["sw_if_index"]
for reply in papi_exec.get_replies(err_msg)
- if u"sw_if_index" in reply
+ if "sw_if_index" in reply
]
)
# Configure IPSec SAD entries
- cmd = u"ipsec_sad_entry_add_del_v2"
- c_key = dict(
- length=0,
- data=None
- )
- i_key = dict(
- length=0,
- data=None
- )
+ cmd = "ipsec_sad_entry_add_v2"
+ c_key = dict(length=0, data=None)
+ i_key = dict(length=0, data=None)
+ common_flags = IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
sad_entry = dict(
sad_id=None,
spi=None,
- protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
-
+ protocol=IPsecProto.ESP,
crypto_algorithm=crypto_alg.alg_int_repr,
crypto_key=c_key,
- integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
+ integrity_algorithm=integ_alg.alg_int_repr,
integrity_key=i_key,
-
- flags=None,
- tunnel_src=0,
- tunnel_dst=0,
- tunnel_flags=int(
- TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ flags=common_flags,
+ tunnel=dict(
+ src=0,
+ dst=0,
+ table_id=0,
+ encap_decap_flags=int(
+ TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
+ ),
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
),
- dscp=int(IpDscp.IP_API_DSCP_CS0),
- table_id=0,
salt=0,
- udp_src_port=IPSEC_UDP_PORT_NONE,
- udp_dst_port=IPSEC_UDP_PORT_NONE
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
for i in range(existing_tunnels, n_tunnels):
- ckeys.append(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
- )
- if integ_alg:
- ikeys.append(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
- )
+ ckeys.append(gen_key(crypto_alg.key_len))
+ ikeys.append(gen_key(integ_alg.key_len))
# SAD entry for outband / tx path
- args[u"entry"][u"sad_id"] = 100000 + i
- args[u"entry"][u"spi"] = spi_d[u"spi_2"] + i
+ sad_entry["sad_id"] = 100000 + i
+ sad_entry["spi"] = spi_d["spi_2"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
+ sad_entry["flags"] |= IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
+ for i in range(existing_tunnels, n_tunnels):
# SAD entry for inband / rx path
- args[u"entry"][u"sad_id"] = i
- args[u"entry"][u"spi"] = spi_d[u"spi_1"] + i
+ sad_entry["sad_id"] = i
+ sad_entry["spi"] = spi_d["spi_1"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE |
- IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPsec SAD entries on host" \
- f" {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ f"Failed to add IPsec SAD entries on host"
+ f" {nodes['DUT2']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Add protection for tunnels with IPSEC
- cmd = u"ipsec_tunnel_protect_update"
+ cmd = "ipsec_tunnel_protect_update"
n_hop = dict(
address=0,
via_label=MPLS_LABEL_INVALID,
- obj_id=Constants.BITWISE_NON_ZERO
+ obj_id=Constants.BITWISE_NON_ZERO,
)
ipsec_tunnel_protect = dict(
- sw_if_index=None,
- nh=n_hop,
- sa_out=None,
- n_sa_in=1,
- sa_in=None
- )
- args = dict(
- tunnel=ipsec_tunnel_protect
+ sw_if_index=None, nh=n_hop, sa_out=None, n_sa_in=1, sa_in=None
)
+ args = dict(tunnel=ipsec_tunnel_protect)
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"sw_if_index"] = ipip_tunnels[i]
- args[u"tunnel"][u"sa_out"] = 100000 + i
- args[u"tunnel"][u"sa_in"] = [i]
+ args["tunnel"]["sw_if_index"] = ipip_tunnels[i]
+ args["tunnel"]["sa_out"] = 100000 + i
+ args["tunnel"]["sa_in"] = [i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add protection for tunnels with IPSEC " \
- f"on host {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ "Failed to add protection for tunnels with IPSEC"
+ f" on host {nodes['DUT2']['host']}"
+ )
papi_exec.get_replies(err_msg)
if not existing_tunnels:
# Configure IP route
- cmd = u"ip_route_add_del"
+ cmd = "ip_route_add_del"
route = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT2"], tun_ips[u"ip1"].compressed,
- prefix_len=32 if tun_ips[u"ip1"].version == 6 else 8,
+ nodes["DUT2"],
+ tun_ips["ip1"].compressed,
+ prefix_len=32 if tun_ips["ip1"].version == 6 else 8,
interface=if2_key,
- gateway=(tun_ips[u"ip2"] - 1).compressed
- )
- args = dict(
- is_add=1,
- is_multipath=0,
- route=route
+ gateway=(tun_ips["ip2"] - 1).compressed,
)
+ args = dict(is_add=1, is_multipath=0, route=route)
papi_exec.add(cmd, **args)
# Configure unnumbered interfaces
- cmd = u"sw_interface_set_unnumbered"
+ cmd = "sw_interface_set_unnumbered"
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT2"], if2_key
+ nodes["DUT2"], if2_key
),
- unnumbered_sw_if_index=0
+ unnumbered_sw_if_index=0,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"unnumbered_sw_if_index"] = ipip_tunnels[i]
+ args["unnumbered_sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Set interfaces up
- cmd = u"sw_interface_set_flags"
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"sw_if_index"] = ipip_tunnels[i]
+ args["sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IP routes
- cmd = u"ip_route_add_del"
- args = dict(
- is_add=1,
- is_multipath=0,
- route=None
- )
+ cmd = "ip_route_add_del"
+ args = dict(is_add=1, is_multipath=0, route=None)
for i in range(existing_tunnels, n_tunnels):
- args[u"route"] = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT1"], (raddr_ip1 + i).compressed,
+ args["route"] = IPUtil.compose_vpp_route_structure(
+ nodes["DUT1"],
+ (raddr_ip1 + i).compressed,
prefix_len=128 if raddr_ip1.version == 6 else 32,
- interface=ipip_tunnels[i]
+ interface=ipip_tunnels[i],
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IP routes " \
- f"on host {nodes[u'DUT2'][u'host']}"
+ err_msg = f"Failed to add IP routes on host {nodes['DUT2']['host']}"
papi_exec.get_replies(err_msg)
@staticmethod
def vpp_ipsec_create_tunnel_interfaces(
- nodes, tun_if1_ip_addr, tun_if2_ip_addr, if1_key, if2_key,
- n_tunnels, crypto_alg, integ_alg, raddr_ip1, raddr_ip2, raddr_range,
- existing_tunnels=0):
+ nodes: dict,
+ tun_if1_ip_addr: str,
+ tun_if2_ip_addr: str,
+ if1_key: str,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg.InputType,
+ integ_alg: IntegAlg.InputType,
+ raddr_ip1: str,
+ raddr_ip2: str,
+ raddr_range: int,
+ existing_tunnels: int = 0,
+ return_keys: bool = False,
+ ) -> Optional[Tuple[List[bytes], List[bytes], int, int]]:
"""Create multiple IPsec tunnel interfaces between two VPP nodes.
+ Some deployments (e.g. devicetest) need to know the generated keys.
+ But other deployments (e.g. scale perf test) would get spammed
+ if we returned keys every time.
+
:param nodes: VPP nodes to create tunnel interfaces.
:param tun_if1_ip_addr: VPP node 1 ipsec tunnel interface IPv4/IPv6
address.
@@ -1628,86 +1746,104 @@ class IPsecUtil:
and to 128 in case of IPv6.
:param existing_tunnels: Number of tunnel interfaces before creation.
Useful mainly for reconf tests. Default 0.
+ :param return_keys: Whether generated keys should be returned.
:type nodes: dict
:type tun_if1_ip_addr: str
:type tun_if2_ip_addr: str
:type if1_key: str
:type if2_key: str
:type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: IntegAlg
- :type raddr_ip1: string
- :type raddr_ip2: string
+ :type crypto_alg: CryptoAlg.InputType
+ :type integ_alg: IntegAlg.InputType
+ :type raddr_ip1: str
+ :type raddr_ip2: str
:type raddr_range: int
:type existing_tunnels: int
+ :type return_keys: bool
+ :returns: Ckeys, ikeys, spi_1, spi_2.
+ :rtype: Optional[Tuple[List[bytes], List[bytes], int, int]]
"""
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
n_tunnels = int(n_tunnels)
existing_tunnels = int(existing_tunnels)
- spi_d = dict(
- spi_1=100000,
- spi_2=200000
- )
+ spi_d = dict(spi_1=100000, spi_2=200000)
tun_ips = dict(
- ip1=ip_address(tun_if1_ip_addr),
- ip2=ip_address(tun_if2_ip_addr)
+ ip1=ip_address(tun_if1_ip_addr), ip2=ip_address(tun_if2_ip_addr)
)
raddr_ip1 = ip_address(raddr_ip1)
raddr_ip2 = ip_address(raddr_ip2)
- addr_incr = 1 << (128 - raddr_range) if tun_ips[u"ip1"].version == 6 \
+ addr_incr = (
+ 1 << (128 - raddr_range)
+ if tun_ips["ip1"].version == 6
else 1 << (32 - raddr_range)
+ )
- if n_tunnels - existing_tunnels > 10:
- ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_vat(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg,
- integ_alg, raddr_ip2, addr_incr, spi_d, existing_tunnels
- )
- if u"DUT2" not in nodes.keys():
- return ckeys[0], ikeys[0], spi_d[u"spi_1"], spi_d[u"spi_2"]
- IPsecUtil._ipsec_create_tunnel_interfaces_dut2_vat(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys,
- integ_alg, ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels
- )
- else:
- ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_papi(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg,
- integ_alg, raddr_ip2, addr_incr, spi_d, existing_tunnels
- )
- if u"DUT2" not in nodes.keys():
- return ckeys[0], ikeys[0], spi_d[u"spi_1"], spi_d[u"spi_2"]
+ ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_papi(
+ nodes,
+ tun_ips,
+ if1_key,
+ if2_key,
+ n_tunnels,
+ crypto_alg,
+ integ_alg,
+ raddr_ip2,
+ addr_incr,
+ spi_d,
+ existing_tunnels,
+ )
+ if "DUT2" in nodes.keys():
IPsecUtil._ipsec_create_tunnel_interfaces_dut2_papi(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys,
- integ_alg, ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels
+ nodes,
+ tun_ips,
+ if2_key,
+ n_tunnels,
+ crypto_alg,
+ ckeys,
+ integ_alg,
+ ikeys,
+ raddr_ip1,
+ addr_incr,
+ spi_d,
+ existing_tunnels,
)
- return None, None, None, None
+ if return_keys:
+ return ckeys, ikeys, spi_d["spi_1"], spi_d["spi_2"]
+ return None
@staticmethod
- def _create_ipsec_script_files(dut, instances):
+ def _create_ipsec_script_files(
+ dut: str, instances: int
+ ) -> List[TextIOWrapper]:
"""Create script files for configuring IPsec in containers
:param dut: DUT node on which to create the script files
:param instances: number of containers on DUT node
- :type dut: string
+ :type dut: str
:type instances: int
+ :returns: Created opened file handles.
+ :rtype: List[TextIOWrapper]
"""
scripts = []
for cnf in range(0, instances):
script_filename = (
f"/tmp/ipsec_create_tunnel_cnf_{dut}_{cnf + 1}.config"
)
- scripts.append(open(script_filename, 'w'))
+ scripts.append(open(script_filename, "w", encoding="utf-8"))
return scripts
@staticmethod
def _close_and_copy_ipsec_script_files(
- dut, nodes, instances, scripts):
+ dut: str, nodes: dict, instances: int, scripts: Sequence[TextIOWrapper]
+ ) -> None:
"""Close created scripts and copy them to containers
:param dut: DUT node on which to create the script files
:param nodes: VPP nodes
:param instances: number of containers on DUT node
:param scripts: dictionary holding the script files
- :type dut: string
+ :type dut: str
:type nodes: dict
:type instances: int
:type scripts: dict
@@ -1719,124 +1855,21 @@ class IPsecUtil:
)
scp_node(nodes[dut], script_filename, script_filename)
-
- @staticmethod
- def vpp_ipsec_create_tunnel_interfaces_in_containers(
- nodes, if1_ip_addr, if2_ip_addr, n_tunnels, crypto_alg, integ_alg,
- raddr_ip1, raddr_ip2, raddr_range, n_instances):
- """Create multiple IPsec tunnel interfaces between two VPP nodes.
-
- :param nodes: VPP nodes to create tunnel interfaces.
- :param if1_ip_addr: VPP node 1 interface IP4 address.
- :param if2_ip_addr: VPP node 2 interface IP4 address.
- :param n_tunnels: Number of tunnell interfaces to create.
- :param crypto_alg: The encryption algorithm name.
- :param integ_alg: The integrity algorithm name.
- :param raddr_ip1: Policy selector remote IPv4 start address for the
- first tunnel in direction node1->node2.
- :param raddr_ip2: Policy selector remote IPv4 start address for the
- first tunnel in direction node2->node1.
- :param raddr_range: Mask specifying range of Policy selector Remote
- IPv4 addresses. Valid values are from 1 to 32.
- :param n_instances: Number of containers.
- :type nodes: dict
- :type if1_ip_addr: str
- :type if2_ip_addr: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: IntegAlg
- :type raddr_ip1: string
- :type raddr_ip2: string
- :type raddr_range: int
- :type n_instances: int
- """
- spi_1 = 100000
- spi_2 = 200000
- addr_incr = 1 << (32 - raddr_range)
-
- dut1_scripts = IPsecUtil._create_ipsec_script_files(
- u"DUT1", n_instances
- )
- dut2_scripts = IPsecUtil._create_ipsec_script_files(
- u"DUT2", n_instances
- )
-
- for cnf in range(0, n_instances):
- dut1_scripts[cnf].write(
- u"create loopback interface\n"
- u"set interface state loop0 up\n\n"
- )
- dut2_scripts[cnf].write(
- f"ip route add {if1_ip_addr}/8 via "
- f"{ip_address(if2_ip_addr) + cnf + 100} memif1/{cnf + 1}\n\n"
- )
-
- for tnl in range(0, n_tunnels):
- cnf = tnl % n_instances
- ckey = getattr(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)), u"hex"
- )
- integ = u""
- if integ_alg:
- ikey = getattr(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)), u"hex"
- )
- integ = (
- f"integ-alg {integ_alg.alg_name} "
- f"local-integ-key {ikey} "
- f"remote-integ-key {ikey} "
- )
- # Configure tunnel end point(s) on left side
- dut1_scripts[cnf].write(
- u"set interface ip address loop0 "
- f"{ip_address(if1_ip_addr) + tnl * addr_incr}/32\n"
- f"create ipsec tunnel "
- f"local-ip {ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"local-spi {spi_1 + tnl} "
- f"remote-ip {ip_address(if2_ip_addr) + cnf} "
- f"remote-spi {spi_2 + tnl} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"local-crypto-key {ckey} "
- f"remote-crypto-key {ckey} "
- f"instance {tnl // n_instances} "
- f"salt 0x0 "
- f"{integ} \n"
- f"set interface unnumbered ipip{tnl // n_instances} use loop0\n"
- f"set interface state ipip{tnl // n_instances} up\n"
- f"ip route add {ip_address(raddr_ip2)+tnl}/32 "
- f"via ipip{tnl // n_instances}\n\n"
- )
- # Configure tunnel end point(s) on right side
- dut2_scripts[cnf].write(
- f"set ip neighbor memif1/{cnf + 1} "
- f"{ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"02:02:00:00:{17:02X}:{cnf:02X} static\n"
- f"create ipsec tunnel local-ip {ip_address(if2_ip_addr) + cnf} "
- f"local-spi {spi_2 + tnl} "
- f"remote-ip {ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"remote-spi {spi_1 + tnl} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"local-crypto-key {ckey} "
- f"remote-crypto-key {ckey} "
- f"instance {tnl // n_instances} "
- f"salt 0x0 "
- f"{integ}\n"
- f"set interface unnumbered ipip{tnl // n_instances} "
- f"use memif1/{cnf + 1}\n"
- f"set interface state ipip{tnl // n_instances} up\n"
- f"ip route add {ip_address(raddr_ip1) + tnl}/32 "
- f"via ipip{tnl // n_instances}\n\n"
- )
-
- IPsecUtil._close_and_copy_ipsec_script_files(
- u"DUT1", nodes, n_instances, dut1_scripts)
- IPsecUtil._close_and_copy_ipsec_script_files(
- u"DUT2", nodes, n_instances, dut2_scripts)
-
@staticmethod
def vpp_ipsec_add_multiple_tunnels(
- nodes, interface1, interface2, n_tunnels, crypto_alg, integ_alg,
- tunnel_ip1, tunnel_ip2, raddr_ip1, raddr_ip2, raddr_range):
+ nodes: dict,
+ interface1: Union[str, int],
+ interface2: Union[str, int],
+ n_tunnels: int,
+ crypto_alg: CryptoAlg.InputType,
+ integ_alg: IntegAlg.InputType,
+ tunnel_ip1: str,
+ tunnel_ip2: str,
+ raddr_ip1: str,
+ raddr_ip2: str,
+ raddr_range: int,
+ tunnel_addr_incr: bool = True,
+ ) -> None:
"""Create multiple IPsec tunnels between two VPP nodes.
:param nodes: VPP nodes to create tunnels.
@@ -1853,18 +1886,24 @@ class IPsecUtil:
first tunnel in direction node2->node1.
:param raddr_range: Mask specifying range of Policy selector Remote
IPv4 addresses. Valid values are from 1 to 32.
+ :param tunnel_addr_incr: Enable or disable tunnel IP address
+ incremental step.
:type nodes: dict
- :type interface1: str or int
- :type interface2: str or int
+ :type interface1: Union[str, int]
+ :type interface2: Union[str, int]
:type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: IntegAlg
+ :type crypto_alg: CryptoAlg.InputType
+ :type integ_alg: IntegAlg.InputType
:type tunnel_ip1: str
:type tunnel_ip2: str
- :type raddr_ip1: string
- :type raddr_ip2: string
+ :type raddr_ip1: str
+ :type raddr_ip2: str
:type raddr_range: int
+ :type tunnel_addr_incr: bool
"""
+ crypto_alg = get_enum_instance(CryptoAlg, crypto_alg)
+ integ_alg = get_enum_instance(IntegAlg, integ_alg)
+
spd_id = 1
p_hi = 100
p_lo = 10
@@ -1873,93 +1912,279 @@ class IPsecUtil:
spi_1 = 300000
spi_2 = 400000
- crypto_key = gen_key(
- IPsecUtil.get_crypto_alg_key_len(crypto_alg)
- ).decode()
- integ_key = gen_key(
- IPsecUtil.get_integ_alg_key_len(integ_alg)
- ).decode() if integ_alg else u""
-
- IPsecUtil.vpp_ipsec_set_ip_route(
- nodes[u"DUT1"], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2,
- interface1, raddr_range)
- IPsecUtil.vpp_ipsec_set_ip_route(
- nodes[u"DUT2"], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1,
- interface2, raddr_range)
-
- IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT1"], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT1"], spd_id, interface1)
- IPsecUtil.vpp_ipsec_policy_add(
- nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
- proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ crypto_key = gen_key(crypto_alg.key_len).decode()
+ integ_key = gen_key(integ_alg.key_len).decode()
+ rmac = (
+ Topology.get_interface_mac(nodes["DUT2"], interface2)
+ if "DUT2" in nodes.keys()
+ else Topology.get_interface_mac(nodes["TG"], interface2)
)
- IPsecUtil.vpp_ipsec_policy_add(
- nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
- proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ IPsecUtil.vpp_ipsec_set_ip_route(
+ nodes["DUT1"],
+ n_tunnels,
+ tunnel_ip1,
+ raddr_ip2,
+ tunnel_ip2,
+ interface1,
+ raddr_range,
+ rmac,
)
- IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT2"], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT2"], spd_id, interface2)
- IPsecUtil.vpp_ipsec_policy_add(
- nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
- proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
- )
- IPsecUtil.vpp_ipsec_policy_add(
- nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
- proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ IPsecUtil.vpp_ipsec_add_spd(nodes["DUT1"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes["DUT1"], spd_id, interface1)
+
+ addr_incr = (
+ 1 << (128 - 96)
+ if ip_address(tunnel_ip1).version == 6
+ else 1 << (32 - 24)
)
+ for i in range(n_tunnels // (addr_incr**2) + 1):
+ dut1_local_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip1) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+ dut1_remote_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip2) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT1"],
+ spd_id,
+ p_hi,
+ IpsecSpdAction.BYPASS,
+ inbound=False,
+ proto=IPsecProto.ESP,
+ laddr_range=dut1_local_outbound_range,
+ raddr_range=dut1_remote_outbound_range,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT1"],
+ spd_id,
+ p_hi,
+ IpsecSpdAction.BYPASS,
+ inbound=True,
+ proto=IPsecProto.ESP,
+ laddr_range=dut1_remote_outbound_range,
+ raddr_range=dut1_local_outbound_range,
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT1"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip1, tunnel_ip2
+ nodes["DUT1"],
+ n_tunnels,
+ sa_id_1,
+ spi_1,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip1,
+ tunnel_ip2,
+ tunnel_addr_incr,
)
- IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes[u"DUT1"], n_tunnels, spd_id, p_lo, False, sa_id_1, raddr_ip2
+
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ nodes["DUT1"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=IpsecSpdAction.PROTECT,
+ inbound=False,
+ sa_id=ObjIncrement(sa_id_1, 1),
+ raddr_range=NetworkIncrement(ip_network(raddr_ip2)),
)
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT2"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip1, tunnel_ip2
+ nodes["DUT1"],
+ n_tunnels,
+ sa_id_2,
+ spi_2,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip2,
+ tunnel_ip1,
+ tunnel_addr_incr,
)
- IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes[u"DUT2"], n_tunnels, spd_id, p_lo, True, sa_id_1, raddr_ip2
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ nodes["DUT1"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=IpsecSpdAction.PROTECT,
+ inbound=True,
+ sa_id=ObjIncrement(sa_id_2, 1),
+ raddr_range=NetworkIncrement(ip_network(raddr_ip1)),
)
- IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT2"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip2, tunnel_ip1
- )
+ if "DUT2" in nodes.keys():
+ rmac = Topology.get_interface_mac(nodes["DUT1"], interface1)
+ IPsecUtil.vpp_ipsec_set_ip_route(
+ nodes["DUT2"],
+ n_tunnels,
+ tunnel_ip2,
+ raddr_ip1,
+ tunnel_ip1,
+ interface2,
+ raddr_range,
+ rmac,
+ )
- IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes[u"DUT2"], n_tunnels, spd_id, p_lo, False, sa_id_2, raddr_ip1
- )
+ IPsecUtil.vpp_ipsec_add_spd(nodes["DUT2"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes["DUT2"], spd_id, interface2)
+ for i in range(n_tunnels // (addr_incr**2) + 1):
+ dut2_local_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip1) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+ dut2_remote_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip2) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT2"],
+ spd_id,
+ p_hi,
+ IpsecSpdAction.BYPASS,
+ inbound=False,
+ proto=IPsecProto.ESP,
+ laddr_range=dut2_remote_outbound_range,
+ raddr_range=dut2_local_outbound_range,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT2"],
+ spd_id,
+ p_hi,
+ IpsecSpdAction.BYPASS,
+ inbound=True,
+ proto=IPsecProto.ESP,
+ laddr_range=dut2_local_outbound_range,
+ raddr_range=dut2_remote_outbound_range,
+ )
- IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT1"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip2, tunnel_ip1
- )
+ IPsecUtil.vpp_ipsec_add_sad_entries(
+ nodes["DUT2"],
+ n_tunnels,
+ sa_id_1,
+ spi_1,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip1,
+ tunnel_ip2,
+ tunnel_addr_incr,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ nodes["DUT2"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=IpsecSpdAction.PROTECT,
+ inbound=True,
+ sa_id=ObjIncrement(sa_id_1, 1),
+ raddr_range=NetworkIncrement(ip_network(raddr_ip2)),
+ )
- IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes[u"DUT1"], n_tunnels, spd_id, p_lo, True, sa_id_2, raddr_ip1
- )
+ IPsecUtil.vpp_ipsec_add_sad_entries(
+ nodes["DUT2"],
+ n_tunnels,
+ sa_id_2,
+ spi_2,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip2,
+ tunnel_ip1,
+ tunnel_addr_incr,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entries(
+ nodes["DUT2"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=IpsecSpdAction.PROTECT,
+ inbound=False,
+ sa_id=ObjIncrement(sa_id_2, 1),
+ raddr_range=NetworkIncrement(ip_network(raddr_ip1)),
+ )
@staticmethod
- def vpp_ipsec_show(node):
- """Run "show ipsec" debug CLI command.
+ def vpp_ipsec_show_all(node: dict) -> None:
+ """Run "show ipsec all" debug CLI command.
:param node: Node to run command on.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, u"show ipsec")
+ PapiSocketExecutor.run_cli_cmd(node, "show ipsec all")
@staticmethod
- def show_ipsec_security_association(node):
+ def show_ipsec_security_association(node: dict) -> None:
"""Show IPSec security association.
:param node: DUT node.
:type node: dict
"""
- cmds = [
- u"ipsec_sa_v2_dump"
- ]
- PapiSocketExecutor.dump_and_log(node, cmds)
+ cmd = "ipsec_sa_v5_dump"
+ PapiSocketExecutor.dump_and_log(node, [cmd])
+
+ @staticmethod
+ def vpp_ipsec_flow_enable_rss(
+ node: dict,
+ proto: str = "IPSEC_ESP",
+ rss_type: str = "esp",
+ function: str = "default",
+ ) -> int:
+ """Ipsec flow enable rss action.
+
+ :param node: DUT node.
+ :param proto: The flow protocol.
+ :param rss_type: RSS type.
+ :param function: RSS function.
+ :type node: dict
+ :type proto: IPsecProto.InputType
+ :type rss_type: str
+ :type function: str
+ :returns: flow_index.
+ :rtype: int
+ """
+ # The proto argument does not correspond to IPsecProto.
+ # The allowed values come from src/vnet/ip/protocols.def
+ # and we do not have a good enum for that yet.
+ # FlowUti. and FlowUtil. are close but not exactly the same.
+
+ # TODO: to be fixed to use full PAPI when it is ready in VPP
+ cmd = (
+ f"test flow add src-ip any proto {proto} rss function"
+ f" {function} rss types {rss_type}"
+ )
+ stdout = PapiSocketExecutor.run_cli_cmd(node, cmd)
+ flow_index = stdout.split()[1]
+
+ return flow_index
+
+ @staticmethod
+ def vpp_create_ipsec_flows_on_dut(
+ node: dict, n_flows: int, rx_queues: int, spi_start: int, interface: str
+ ) -> None:
+ """Create mutiple ipsec flows and enable flows onto interface.
+
+ :param node: DUT node.
+ :param n_flows: Number of flows to create.
+ :param rx_queues: NUmber of RX queues.
+ :param spi_start: The start spi.
+ :param interface: Name of the interface.
+
+ :type node: dict
+ :type n_flows: int
+ :type rx_queues: int
+ :type spi_start: int
+ :type interface: str
+ """
+
+ for i in range(0, n_flows):
+ rx_queue = i % rx_queues
+ spi = spi_start + i
+ flow_index = FlowUtil.vpp_create_ip4_ipsec_flow(
+ node, "ESP", spi, "redirect-to-queue", value=rx_queue
+ )
+ FlowUtil.vpp_flow_enable(node, interface, flow_index)
diff --git a/resources/libraries/python/IPv6Util.py b/resources/libraries/python/IPv6Util.py
index 883304487d..b383695480 100644
--- a/resources/libraries/python/IPv6Util.py
+++ b/resources/libraries/python/IPv6Util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/IncrementUtil.py b/resources/libraries/python/IncrementUtil.py
new file mode 100644
index 0000000000..45260ae06f
--- /dev/null
+++ b/resources/libraries/python/IncrementUtil.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2021 PANTHEON.tech s.r.o.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Increment utilities library."""
+
+
+class ObjIncrement():
+ """
+ An iterator class used to generate incremented values in each iteration
+ or when inc_fmt is called.
+
+ Subclasses should override:
+ _incr: when a simple '+' binary operation isn't sufficient.
+ _str_fmt: when a simple str representation of the incremented object
+ isn't the proper format.
+ """
+ def __init__(self, initial_value, increment):
+ """
+ :param initial_value: The first value to be returned.
+ :param increment: Each iteration/inc_fmt call will return the previous
+ value incremented by this.
+ :type initial_value: object supporting the '+' binary operation
+ :type increment: object supporting the '+' binary operation
+ """
+ self._value = initial_value
+ self._increment = increment
+
+ def _incr(self):
+ """
+ This function will be called in each iteration/inc_fmt call. Subclasses
+ should override this when their object is incremented differently.
+ The function must compute the next iterated value and store it in
+ self._value.
+ """
+ self._value += self._increment
+
+ def __next__(self):
+ """
+ Each iteration returns the current object and stores the incremented
+ object (which will be returned in the next iteration). The first
+ iteration returns the initial value.
+ """
+ return_value = self._value
+ self._incr()
+ return return_value
+
+ def __iter__(self):
+ return self
+
+ def _str_fmt(self):
+ """
+ The string representation is a standard string representation of the
+ incremented object. Subclasses may override this for a different
+ string representation.
+ """
+ return str(self._value)
+
+ def inc_fmt(self):
+ """
+ Return a string representation and increment the current value.
+ """
+ return_value = self._str_fmt()
+ self._incr()
+ return return_value
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 10778ed49a..ff013307bc 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,18 +13,19 @@
"""Interface util library."""
+from json import loads
from time import sleep
from enum import IntEnum
from ipaddress import ip_address
from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.parsers.JsonParser import JsonParser
from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.VPPUtil import VPPUtil
@@ -110,6 +111,13 @@ class RdmaMode(IntEnum):
RDMA_API_MODE_DV = 2
+class AfXdpMode(IntEnum):
+ """AF_XDP interface mode."""
+ AF_XDP_API_MODE_AUTO = 0
+ AF_XDP_API_MODE_COPY = 1
+ AF_XDP_API_MODE_ZERO_COPY = 2
+
+
class InterfaceUtil:
"""General utilities for managing interfaces"""
@@ -205,6 +213,10 @@ class InterfaceUtil:
raise ValueError(f"Unknown if_type: {if_type}")
if node[u"type"] == NodeType.DUT:
+ if sw_if_index is None:
+ raise ValueError(
+ f"Interface index for {interface} not assigned by VPP."
+ )
if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
elif state == u"down":
@@ -228,6 +240,26 @@ class InterfaceUtil:
)
@staticmethod
+ def set_interface_state_pci(
+ node, pf_pcis, namespace=None, state=u"up"):
+ """Set operational state for interface specified by PCI address.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param namespace: Exec command in namespace. (Optional, Default: none)
+ :param state: Up/Down. (Optional, default: up)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type namespace: str
+ :type state: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ InterfaceUtil.set_linux_interface_state(
+ node, pf_eth, namespace=namespace, state=state
+ )
+
+ @staticmethod
def set_interface_mtu(node, pf_pcis, mtu=9200):
"""Set Ethernet MTU for specified interfaces.
@@ -245,25 +277,58 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def set_interface_flow_control(node, pf_pcis, rx=u"off", tx=u"off"):
+ def set_interface_channels(
+ node, pf_pcis, num_queues=1, channel=u"combined"):
+ """Set interface channels for specified interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :param num_queues: Number of channels. (Optional, Default: 1)
+ :param channel: Channel type. (Optional, Default: combined)
+ :type nodes: dict
+ :type pf_pcis: list
+ :type num_queues: int
+ :type channel: str
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ethtool --set-channels {pf_eth} {channel} {num_queues}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_interface_xdp_off(node, pf_pcis):
+ """Detaches any currently attached XDP/BPF program from the specified
+ interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :type nodes: dict
+ :type pf_pcis: list
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ip link set dev {pf_eth} xdp off"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
+ def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
"""Set Ethernet flow control for specified interfaces.
:param node: Topology node.
:param pf_pcis: List of node's interfaces PCI addresses.
- :param rx: RX flow. Default: off.
- :param tx: TX flow. Default: off.
+ :param rxf: RX flow. (Optional, Default: off).
+ :param txf: TX flow. (Optional, Default: off).
:type nodes: dict
:type pf_pcis: list
- :type rx: str
- :type tx: str
+ :type rxf: str
+ :type txf: str
"""
for pf_pci in pf_pcis:
pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
- cmd = f"ethtool -A {pf_eth} rx off tx off"
+ cmd = f"ethtool -A {pf_eth} rx {rxf} tx {txf}"
ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
if int(ret_code) not in (0, 78):
- raise RuntimeError("Failed to set MTU on {pf_eth}!")
-
+ raise RuntimeError("Failed to set flow control on {pf_eth}!")
@staticmethod
def set_pci_parameter(node, pf_pcis, key, value):
@@ -283,11 +348,13 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_set_interface_mtu(node, interface, mtu=9200):
- """Set Ethernet MTU on interface.
+ def vpp_set_interface_mtu(node, interface, mtu):
+ """Apply new MTU value to a VPP hardware interface.
+
+ The interface should be down when this is called.
:param node: VPP node.
- :param interface: Interface to setup MTU. Default: 9200.
+ :param interface: Interface to set MTU on.
:param mtu: Ethernet MTU size in Bytes.
:type node: dict
:type interface: str or int
@@ -297,44 +364,11 @@ class InterfaceUtil:
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
-
cmd = u"hw_interface_set_mtu"
err_msg = f"Failed to set interface MTU on host {node[u'host']}"
- args = dict(
- sw_if_index=sw_if_index,
- mtu=int(mtu)
- )
- try:
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
- except AssertionError as err:
- # TODO: Make failure tolerance optional.
- logger.debug(f"Setting MTU failed. Expected?\n{err}")
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
- """Set Ethernet MTU on all interfaces.
-
- :param node: VPP node.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type node: dict
- :type mtu: int
- """
- for interface in node[u"interfaces"]:
- InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
-
- @staticmethod
- def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
- """Set Ethernet MTU on all interfaces on all DUTs.
-
- :param nodes: VPP nodes.
- :param mtu: Ethernet MTU size in Bytes. Default: 9200.
- :type nodes: dict
- :type mtu: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
+ args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_node_interfaces_ready_wait(node, retries=15):
@@ -689,9 +723,8 @@ class InterfaceUtil:
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code) != 0:
raise RuntimeError(u"Get interface name and MAC failed")
- tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
- interfaces = JsonParser().parse_data(tmp)
+ interfaces = loads("{" + stdout.rstrip().replace("\n", ",") + "}")
for interface in node[u"interfaces"].values():
name = interfaces.get(interface[u"mac_address"])
if name is None:
@@ -805,7 +838,7 @@ class InterfaceUtil:
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- cmd = u"vxlan_add_del_tunnel"
+ cmd = u"vxlan_add_del_tunnel_v3"
args = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
@@ -859,7 +892,7 @@ class InterfaceUtil:
err_msg = f"Failed to set VXLAN bypass on interface " \
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_replies(err_msg)
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vxlan_dump(node, interface=None):
@@ -1017,6 +1050,76 @@ class InterfaceUtil:
return ifc_name, sw_if_index
@staticmethod
+ def create_gtpu_tunnel_interface(node, teid, source_ip, destination_ip):
+ """Create GTPU interface and return sw if index of created interface.
+
+ :param node: Node where to create GTPU interface.
+ :param teid: GTPU Tunnel Endpoint Identifier.
+ :param source_ip: Source IP of a GTPU Tunnel End Point.
+ :param destination_ip: Destination IP of a GTPU Tunnel End Point.
+ :type node: dict
+ :type teid: int
+ :type source_ip: str
+ :type destination_ip: str
+ :returns: SW IF INDEX of created interface.
+ :rtype: int
+ :raises RuntimeError: if it is unable to create GTPU interface on the
+ node.
+ """
+ cmd = u"gtpu_add_del_tunnel_v2"
+ args = dict(
+ is_add=True,
+ src_address=IPAddress.create_ip_address_object(
+ ip_address(source_ip)
+ ),
+ dst_address=IPAddress.create_ip_address_object(
+ ip_address(destination_ip)
+ ),
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=2, # ipv4
+ teid=teid,
+ # pdu_extension: Unused, false by default.
+ # qfi: Irrelevant when pdu_extension is not used.
+ )
+ err_msg = f"Failed to create GTPU tunnel interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+
+ if_key = Topology.add_new_port(node, u"gtpu_tunnel")
+ Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
+ ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
+ Topology.update_interface_name(node, if_key, ifc_name)
+
+ return sw_if_index
+
+ @staticmethod
+ def vpp_enable_gtpu_offload_rx(node, interface, gtpu_if_index):
+ """Enable GTPU offload RX onto interface.
+
+ :param node: Node to run command on.
+ :param interface: Name of the specific interface.
+ :param gtpu_if_index: Index of GTPU tunnel interface.
+
+ :type node: dict
+ :type interface: str
+ :type gtpu_interface: int
+ """
+ sw_if_index = Topology.get_interface_sw_index(node, interface)
+
+ cmd = u"gtpu_offload_rx"
+ args = dict(
+ hw_if_index=sw_if_index,
+ sw_if_index=gtpu_if_index,
+ enable=True
+ )
+
+ err_msg = f"Failed to enable GTPU offload RX on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
def vpp_create_loopback(node, mac=None):
"""Create loopback interface on VPP node.
@@ -1029,9 +1132,11 @@ class InterfaceUtil:
:raises RuntimeError: If it is not possible to create loopback on the
node.
"""
- cmd = u"create_loopback"
+ cmd = u"create_loopback_instance"
args = dict(
- mac_address=L2Util.mac_to_bin(mac) if mac else 0
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0,
+ is_specified=False,
+ user_instance=0,
)
err_msg = f"Failed to create loopback interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
@@ -1169,11 +1274,74 @@ class InterfaceUtil:
txq_size=txq_size
)
err_msg = f"Failed to create AVF interface on host {node[u'host']}"
+
+ # FIXME: Remove once the fw/driver is upgraded.
+ for _ in range(10):
+ with PapiSocketExecutor(node) as papi_exec:
+ try:
+ sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(
+ err_msg
+ )
+ break
+ except AssertionError:
+ logger.error(err_msg)
+ else:
+ raise AssertionError(err_msg)
+
+ InterfaceUtil.add_eth_interface(
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ host_if_key=if_key
+ )
+
+ return Topology.get_interface_by_sw_index(node, sw_if_index)
+
+ @staticmethod
+ def vpp_create_af_xdp_interface(
+ node, if_key, num_rx_queues=None, rxq_size=0, txq_size=0,
+ mode=u"auto"):
+ """Create AF_XDP interface on VPP node.
+
+ :param node: DUT node from topology.
+ :param if_key: Physical interface key from topology file of interface
+ to be bound to compatible driver.
+ :param num_rx_queues: Number of RX queues. (Optional, Default: none)
+ :param rxq_size: Size of RXQ (0 = Default API; 512 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 512 = Default VPP).
+ :param mode: AF_XDP interface mode. (Optional, Default: auto).
+ :type node: dict
+ :type if_key: str
+ :type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type mode: str
+ :returns: Interface key (name) in topology file.
+ :rtype: str
+ :raises RuntimeError: If it is not possible to create AF_XDP interface
+ on the node.
+ """
+ PapiSocketExecutor.run_cli_cmd(
+ node, u"set logging class af_xdp level debug"
+ )
+
+ cmd = u"af_xdp_create_v3"
+ pci_addr = Topology.get_interface_pci_addr(node, if_key)
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=rxq_size,
+ txq_size=txq_size,
+ mode=getattr(AfXdpMode, f"AF_XDP_API_MODE_{mode.upper()}").value
+ )
+ err_msg = f"Failed to create AF_XDP interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+ InterfaceUtil.vpp_set_interface_mac(
+ node, sw_if_index, Topology.get_interface_mac(node, if_key)
+ )
InterfaceUtil.add_eth_interface(
- node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf",
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_af_xdp",
host_if_key=if_key
)
@@ -1207,7 +1375,7 @@ class InterfaceUtil:
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create"
+ cmd = u"rdma_create_v4"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1216,6 +1384,12 @@ class InterfaceUtil:
rxq_size=rxq_size,
txq_size=txq_size,
mode=getattr(RdmaMode, f"RDMA_API_MODE_{mode.upper()}").value,
+ # Note: Set True for non-jumbo packets.
+ no_multi_seg=False,
+ max_pktlen=0,
+ # TODO: Apply desired RSS flags.
+ # rss4 kept 0 (auto) as API default.
+ # rss6 kept 0 (auto) as API default.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
@@ -1511,6 +1685,29 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
+ def set_linux_interface_promisc(
+ node, interface, namespace=None, vf_id=None, state=u"on"):
+ """Set promisc state for interface in linux.
+
+ :param node: Node where to execute command.
+ :param interface: Interface in namespace.
+ :param namespace: Exec command in namespace. (Optional, Default: None)
+ :param vf_id: Virtual Function id. (Optional, Default: None)
+ :param state: State of feature. (Optional, Default: on)
+ :type node: dict
+ :type interface: str
+ :type namespace: str
+ :type vf_id: int
+ :type state: str
+ """
+ promisc_str = f"vf {vf_id} promisc {state}" if vf_id is not None \
+ else f"promisc {state}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
+
+ cmd = f"{ns_str} ip link set dev {interface} {promisc_str}"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
def set_linux_interface_trust_on(
node, interface, namespace=None, vf_id=None):
"""Set trust on (promisc) for interface in linux.
@@ -1571,9 +1768,57 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
- """Init PCI device by creating VIFs and bind them to vfio-pci for AVF
- driver testing on DUT.
+ def init_interface(node, ifc_key, driver, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Check driver compatibility and bind to proper
+ drivers. Optionally create NIC VFs.
+
+ :param node: DUT node.
+ :param ifc_key: Interface key from topology file.
+ :param driver: Base driver to use.
+ :param numvfs: Number of VIFs to initialize, 0 - disable the VIFs.
+ :param osi_layer: OSI Layer type to initialize TG with.
+ Default value "L2" sets linux interface spoof off.
+ :type node: dict
+ :type ifc_key: str
+ :type driver: str
+ :type numvfs: int
+ :type osi_layer: str
+ :returns: Virtual Function topology interface keys.
+ :rtype: list
+ :raises RuntimeError: If a reason preventing initialization is found.
+ """
+ kernel_driver = Topology.get_interface_driver(node, ifc_key)
+ vf_keys = []
+ if driver == u"avf":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf"):
+ raise RuntimeError(
+ f"AVF needs ice or i40e compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ elif driver == u"af_xdp":
+ if kernel_driver not in (
+ u"ice", u"iavf", u"i40e", u"i40evf", u"mlx5_core",
+ u"ixgbe"):
+ raise RuntimeError(
+ f"AF_XDP needs ice/i40e/rdma/ixgbe compatible driver, not "
+ f"{kernel_driver} at node {node[u'host']} ifc {ifc_key}"
+ )
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ elif driver == u"rdma-core":
+ vf_keys = InterfaceUtil.init_generic_interface(
+ node, ifc_key, numvfs=numvfs, osi_layer=osi_layer
+ )
+ return vf_keys
+
+ @staticmethod
+ def init_generic_interface(node, ifc_key, numvfs=0, osi_layer=u"L2"):
+ """Init PCI device. Bind to proper drivers. Optionally create NIC VFs.
:param node: DUT node.
:param ifc_key: Interface key from topology file.
@@ -1593,26 +1838,27 @@ class InterfaceUtil:
pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
uio_driver = Topology.get_uio_driver(node)
kernel_driver = Topology.get_interface_driver(node, ifc_key)
- if kernel_driver not in (u"ice", u"iavf", u"i40e", u"i40evf"):
- raise RuntimeError(
- f"AVF needs ice or i40e compatible driver, not {kernel_driver}"
- f"at node {node[u'host']} ifc {ifc_key}"
- )
current_driver = DUTSetup.get_pci_dev_driver(
node, pf_pci_addr.replace(u":", r"\:"))
+ pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
VPPUtil.stop_vpp_service(node)
if current_driver != kernel_driver:
# PCI device must be re-bound to kernel driver before creating VFs.
DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
# Stop VPP to prevent deadlock.
- # Unbind from current driver.
- DUTSetup.pci_driver_unbind(node, pf_pci_addr)
+ # Unbind from current driver if bound.
+ if current_driver:
+ DUTSetup.pci_driver_unbind(node, pf_pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
# Initialize PCI VFs.
- DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs)
+
+ if not numvfs:
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_promisc(node, pf_dev)
vf_ifc_keys = []
# Set MAC address and bind each virtual function to uio driver.
@@ -1623,7 +1869,6 @@ class InterfaceUtil:
]
)
- pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
InterfaceUtil.set_linux_interface_trust_on(
node, pf_dev, vf_id=vf_id
)
@@ -1638,12 +1883,20 @@ class InterfaceUtil:
node, pf_dev, state=u"up"
)
- DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
- DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, vf_pci_addr.replace(":", r"\:")
+ )
+ if current_driver:
+ DUTSetup.pci_vf_driver_unbind(
+ node, pf_pci_addr, vf_id
+ )
+ DUTSetup.pci_vf_driver_bind(
+ node, pf_pci_addr, vf_id, uio_driver
+ )
# Add newly created ports into topology file
vf_ifc_name = f"{ifc_key}_vif"
- vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
Topology.update_interface_name(
node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
@@ -1678,6 +1931,19 @@ class InterfaceUtil:
return sorted(details, key=lambda k: k[u"sw_if_index"])
@staticmethod
+ def vpp_sw_interface_rx_placement_dump_on_all_duts(nodes):
+ """Dump VPP interface RX placement on all given nodes.
+
+ :param nodes: Nodes to run command on.
+ :type nodes: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ InterfaceUtil.vpp_sw_interface_rx_placement_dump(node)
+
+ @staticmethod
def vpp_sw_interface_set_rx_placement(
node, sw_if_index, queue_id, worker_id):
"""Set interface RX placement to worker on node.
@@ -1706,39 +1972,74 @@ class InterfaceUtil:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_round_robin_rx_placement(node, prefix):
+ def vpp_round_robin_rx_placement(
+ node, prefix, workers=None):
"""Set Round Robin interface RX placement on all worker threads
on node.
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all workers are used for data plane work.
+
:param node: Topology nodes.
:param prefix: Interface name prefix.
+ :param workers: Comma separated worker index numbers intended for
+ dataplane work.
:type node: dict
:type prefix: str
+ :type workers: str
"""
- worker_id = 0
- worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+ thread_data = VPPUtil.vpp_show_threads(node)
+ worker_cnt = len(thread_data) - 1
if not worker_cnt:
return
+ worker_ids = list()
+ if workers:
+ for item in thread_data:
+ if str(item.cpu_id) in workers.split(u","):
+ worker_ids.append(item.id)
+ else:
+ for item in thread_data:
+ if u"vpp_main" not in item.name:
+ worker_ids.append(item.id)
+
+ worker_idx = 0
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
for interface in node[u"interfaces"].values():
if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
node, placement[u"sw_if_index"], placement[u"queue_id"],
- worker_id % worker_cnt
+ worker_ids[worker_idx % len(worker_ids)] - 1
)
- worker_id += 1
+ worker_idx += 1
@staticmethod
- def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
- """Set Round Robin interface RX placement on all worker threads
+ def vpp_round_robin_rx_placement_on_all_duts(
+ nodes, prefix, use_dp_cores=False):
+ """Set Round Robin interface RX placement on worker threads
on all DUTs.
+ If specified, workers limits the number of physical cores used
+ for data plane I/O work. Other cores are presumed to do something else,
+ e.g. asynchronous crypto processing.
+ None means all cores are used for data plane work.
+
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
+ :param use_dp_cores: Limit to dataplane cores.
:type nodes: dict
:type prefix: str
+ :type use_dp_cores: bool
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ workers = None
+ if use_dp_cores:
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
+ InterfaceUtil.vpp_round_robin_rx_placement(
+ node, prefix, workers
+ )
diff --git a/resources/libraries/python/Iperf3.py b/resources/libraries/python/Iperf3.py
index ed186f0757..a881ec9f06 100644
--- a/resources/libraries/python/Iperf3.py
+++ b/resources/libraries/python/Iperf3.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -33,6 +33,29 @@ class Iperf3:
# Computed affinity for iPerf client.
self._c_affinity = None
+ @staticmethod
+ def get_iperf_type():
+ """Log and return the installed traffic generator type.
+
+ :returns: Traffic generator type string.
+ :rtype: str
+ """
+ return "IPERF"
+
+ @staticmethod
+ def get_iperf_version(node):
+ """Log and return the installed traffic generator version.
+
+ :param node: Node from topology file.
+ :type node: dict
+ :returns: Traffic generator version string.
+ :rtype: str
+ """
+ command = f"iperf3 --version | head -1"
+ message = u"Get iPerf version failed!"
+ stdout, _ = exec_cmd_no_error(node, command, message=message)
+ return stdout.strip()
+
def initialize_iperf_server(
self, node, pf_key, interface, bind, bind_gw, bind_mask,
namespace=None, cpu_skip_cnt=0, cpu_cnt=1, instances=1):
diff --git a/resources/libraries/python/IrqUtil.py b/resources/libraries/python/IrqUtil.py
new file mode 100644
index 0000000000..1ef228eeac
--- /dev/null
+++ b/resources/libraries/python/IrqUtil.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""IRQ handling library."""
+
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import Topology
+
+
+class IrqUtil:
+ """Contains methods for managing IRQs."""
+
+ @staticmethod
+ def get_pci_interface_irqs(node, pci_addr):
+ """Get IRQs for interface in linux specified by PCI address.
+
+ :param node: Topology node.
+ :param pci_addr: Linux interface PCI address.
+ :type node: dict
+ :type pci_addr: str
+ :returns: List of IRQs attached to specified interface.
+ :rtype: list
+ """
+ interface = InterfaceUtil.pci_to_eth(node, pci_addr)
+ return IrqUtil.get_interface_irqs(node, interface)
+
+ @staticmethod
+ def get_interface_irqs(node, interface):
+ """Get IRQs for interface in linux.
+
+ :param node: Topology node.
+ :param interface: Linux interface name.
+ :type node: dict
+ :type interface: str
+ :returns: List of IRQs attached to specified interface.
+ :rtype: list
+ """
+ irqs = []
+
+ command = f"grep '{interface}-.*TxRx' /proc/interrupts | cut -f1 -d:"
+ message = f"Failed to get IRQs for {interface} on {node['host']}!"
+ stdout, _ = exec_cmd_no_error(
+ node, command, timeout=30, sudo=True, message=message
+ )
+
+ for line in stdout.splitlines():
+ irqs.append(int(line.strip()))
+
+ return irqs
+
+ @staticmethod
+ def set_interface_irqs_affinity(node, interface, cpu_skip_cnt=0, cpu_cnt=1):
+ """Set IRQs affinity for interface in linux.
+
+ :param node: Topology node.
+ :param interface: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count. (Optional, Default: 0)
+ :param cpu_list: List of CPUs. (Optional, Default: 1)
+ :type node: dict
+ :type interface: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ """
+ cpu_list = CpuUtils.get_affinity_af_xdp(
+ node, interface, cpu_skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt
+ )
+ interface = Topology.get_interface_name(node, interface)
+ irq_list = IrqUtil.get_interface_irqs(node, interface)
+
+ for irq, cpu in zip(irq_list, cpu_list):
+ if cpu < 32:
+ mask = 1 << cpu
+ mask = f"{mask:x}"
+ else:
+ groups = int(cpu/32)
+ mask_fill = u""
+ for _ in range(groups):
+ mask_fill = f"{mask_fill},00000000"
+ mask = 1 << (cpu - (32 * groups))
+ mask = f"{mask:x}{mask_fill}"
+
+ command = f"sh -c 'echo {mask} > /proc/irq/{irq}/smp_affinity'"
+ message = f"Failed to set IRQ affinity for {irq} on {node['host']}!"
+ exec_cmd_no_error(
+ node, command, timeout=30, sudo=True, message=message
+ )
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
index d0d72a39a1..9ded0e8b9e 100644
--- a/resources/libraries/python/KubernetesUtils.py
+++ b/resources/libraries/python/KubernetesUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -488,8 +488,6 @@ class KubernetesUtils:
vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
vpp_config.add_unix_nodaemon()
vpp_config.add_socksvr()
- vpp_config.add_heapsize(u"4G")
- vpp_config.add_ip_heap_size(u"4G")
vpp_config.add_ip6_heap_size(u"4G")
vpp_config.add_ip6_hash_buckets(u"2000000")
if not kwargs[u"jumbo"]:
diff --git a/resources/libraries/python/L2Util.py b/resources/libraries/python/L2Util.py
index eea66b82b1..92c93ed9dd 100644
--- a/resources/libraries/python/L2Util.py
+++ b/resources/libraries/python/L2Util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -156,7 +156,7 @@ class L2Util:
:type learn: bool
:type arp_term: bool
"""
- cmd = u"bridge_domain_add_del"
+ cmd = u"bridge_domain_add_del_v2"
err_msg = f"Failed to create L2 bridge domain on host {node[u'host']}"
args = dict(
bd_id=int(bd_id),
@@ -222,7 +222,7 @@ class L2Util:
sw_if_index1 = Topology.get_interface_sw_index(node, port_1)
sw_if_index2 = Topology.get_interface_sw_index(node, port_2)
- cmd1 = u"bridge_domain_add_del"
+ cmd1 = u"bridge_domain_add_del_v2"
args1 = dict(
bd_id=int(bd_id),
flood=True,
@@ -254,8 +254,10 @@ class L2Util:
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3)
- papi_exec.get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd1, **args1).get_reply(err_msg)
+ papi_exec.add(cmd2, **args2).get_reply(err_msg)
+ papi_exec.add(cmd2, **args3).get_reply(err_msg)
@staticmethod
def vpp_setup_bidirectional_cross_connect(node, interface1, interface2):
@@ -293,7 +295,9 @@ class L2Util:
f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd, **args1).get_reply(err_msg)
+ papi_exec.add(cmd, **args2).get_reply(err_msg)
@staticmethod
def vpp_setup_bidirectional_l2_patch(node, interface1, interface2):
@@ -331,7 +335,9 @@ class L2Util:
f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd, **args1).get_reply(err_msg)
+ papi_exec.add(cmd, **args2).get_reply(err_msg)
@staticmethod
def linux_add_bridge(node, br_name, if_1, if_2, set_up=True):
diff --git a/resources/libraries/python/LimitUtil.py b/resources/libraries/python/LimitUtil.py
index c34109c2b7..6559f6aeb0 100644
--- a/resources/libraries/python/LimitUtil.py
+++ b/resources/libraries/python/LimitUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/LispSetup.py b/resources/libraries/python/LispSetup.py
index 6579764596..9e3ef97aa3 100644
--- a/resources/libraries/python/LispSetup.py
+++ b/resources/libraries/python/LispSetup.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/LoadBalancerUtil.py b/resources/libraries/python/LoadBalancerUtil.py
index 340afe50a3..471bc87e80 100644
--- a/resources/libraries/python/LoadBalancerUtil.py
+++ b/resources/libraries/python/LoadBalancerUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Intel and/or its affiliates.
+# Copyright (c) 2023 Intel and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -84,13 +84,13 @@ class LoadBalancerUtil:
protocol: tcp or udp. (int)
port: destination port. (int)
encap: encap is ip4 GRE(0) or ip6 (1GRE) or L3DSR(2) or NAT4(3) or
- NAT6(4). (int)
+ NAT6(4). (int)
dscp: dscp bit corresponding to VIP
type: service type
target_port: Pod's port corresponding to specific service
node_port: Node's port
new_len: Size of the new connections flow table used
- for this VIP
+ for this VIP
is_del: 1 if the VIP should be removed otherwise 0.
:type node: dict
@@ -108,9 +108,10 @@ class LoadBalancerUtil:
target_port = kwargs.pop(u"target_port", 0)
node_port = kwargs.pop(u"node_port", 0)
new_len = kwargs.pop(u"new_len", 1024)
+ src_ip_sticky = kwargs.pop(u"src_ip_sticky", 0)
is_del = kwargs.pop(u"is_del", 0)
- cmd = u"lb_add_del_vip"
+ cmd = u"lb_add_del_vip_v2"
err_msg = f"Failed to add vip on host {node[u'host']}"
vip_addr = ip_address(vip_addr).packed
@@ -127,7 +128,8 @@ class LoadBalancerUtil:
target_port=target_port,
node_port=node_port,
new_flows_table_length=int(new_len),
- is_del=is_del
+ src_ip_sticky=src_ip_sticky,
+ is_del=is_del,
)
with PapiSocketExecutor(node) as papi_exec:
@@ -150,7 +152,7 @@ class LoadBalancerUtil:
as_addr: The application server address. (str)
is_del: 1 if the VIP should be removed otherwise 0. (int)
is_flush: 1 if the sessions related to this AS should be flushed
- otherwise 0. (int)
+ otherwise 0. (int)
:type node: dict
:type kwargs: dict
diff --git a/resources/libraries/python/LocalExecution.py b/resources/libraries/python/LocalExecution.py
index ea40156404..98dfce1375 100644
--- a/resources/libraries/python/LocalExecution.py
+++ b/resources/libraries/python/LocalExecution.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/MLRsearch/AbstractMeasurer.py b/resources/libraries/python/MLRsearch/AbstractMeasurer.py
deleted file mode 100644
index 82116f2e43..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractMeasurer.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractMeasurer class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractMeasurer(metaclass=ABCMeta):
- """Abstract class defining common API for measurement providers."""
-
- @abstractmethod
- def measure(self, duration, transmit_rate):
- """Perform trial measurement and return the result.
-
- :param duration: Trial duration [s].
- :param transmit_rate: Target transmit rate [tps].
- :type duration: float
- :type transmit_rate: float
- :returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
diff --git a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
deleted file mode 100644
index f2bf04e1b1..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractSearchAlgorithm class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractSearchAlgorithm(metaclass=ABCMeta):
- """Abstract class defining common API for search algorithms."""
-
- def __init__(self, measurer):
- """Store the rate provider.
-
- :param measurer: Object able to perform trial or composite measurements.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- """
- # TODO: Type check for AbstractMeasurer?
- self.measurer = measurer
-
- @abstractmethod
- def narrow_down_ndr_and_pdr(
- self, min_rate, max_rate, packet_loss_ratio):
- """Perform measurements to narrow down intervals, return them.
-
- This will be renamed when custom loss ratio lists are supported.
-
- :param min_rate: Minimal target transmit rate [tps].
- Usually, tests are set to fail if search reaches this or below.
- :param max_rate: Maximal target transmit rate [tps].
- Usually computed from line rate and various other limits,
- to prevent failures or duration stretching in Traffic Generator.
- :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratio: float
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: NdrPdrResult.NdrPdrResult
- """
- # TODO: Do we agree on arguments related to precision or trial duration?
diff --git a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
deleted file mode 100644
index 87dc784cbc..0000000000
--- a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
+++ /dev/null
@@ -1,645 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MultipleLossRatioSearch class."""
-
-import logging
-import math
-import time
-
-from .AbstractSearchAlgorithm import AbstractSearchAlgorithm
-from .NdrPdrResult import NdrPdrResult
-from .ReceiveRateInterval import ReceiveRateInterval
-
-
-class MultipleLossRatioSearch(AbstractSearchAlgorithm):
- """Optimized binary search algorithm for finding NDR and PDR bounds.
-
- Traditional binary search algorithm needs initial interval
- (lower and upper bound), and returns final interval after bisecting
- (until some exit condition is met).
- The exit condition is usually related to the interval width,
- (upper bound value minus lower bound value).
-
- The optimized algorithm contains several improvements
- aimed to reduce overall search time.
-
- One improvement is searching for two intervals at once.
- The intervals are for NDR (No Drop Rate) and PDR (Partial Drop Rate).
-
- Next improvement is that the initial interval does not need to be valid.
- Imagine initial interval (10, 11) where 11 is smaller
- than the searched value.
- The algorithm will try (11, 13) interval next, and if 13 is still smaller,
- (13, 17) and so on, doubling width until the upper bound is valid.
- The part when interval expands is called external search,
- the part when interval is bisected is called internal search.
-
- Next improvement is that trial measurements at small trial duration
- can be used to find a reasonable interval for full trial duration search.
- This results in more trials performed, but smaller overall duration
- in general.
-
- Next improvement is bisecting in logarithmic quantities,
- so that exit criteria can be independent of measurement units.
-
- Next improvement is basing the initial interval on receive rates.
-
- Final improvement is exiting early if the minimal value
- is not a valid lower bound.
-
- The complete search consist of several phases,
- each phase performing several trial measurements.
- Initial phase creates initial interval based on receive rates
- at maximum rate and at maximum receive rate (MRR).
- Final phase and preceding intermediate phases are performing
- external and internal search steps,
- each resulting interval is the starting point for the next phase.
- The resulting interval of final phase is the result of the whole algorithm.
-
- Each non-initial phase uses its own trial duration and width goal.
- Any non-initial phase stops searching (for NDR or PDR independently)
- when minimum is not a valid lower bound (at current duration),
- or all of the following is true:
- Both bounds are valid, bound bounds are measured at the current phase
- trial duration, interval width is less than the width goal
- for current phase.
-
- TODO: Review and update this docstring according to rst docs.
- TODO: Support configurable number of Packet Loss Ratios.
- """
-
- class ProgressState:
- """Structure containing data to be passed around in recursion."""
-
- def __init__(
- self, result, phases, duration, width_goal, packet_loss_ratio,
- minimum_transmit_rate, maximum_transmit_rate):
- """Convert and store the argument values.
-
- :param result: Current measured NDR and PDR intervals.
- :param phases: How many intermediate phases to perform
- before the current one.
- :param duration: Trial duration to use in the current phase [s].
- :param width_goal: The goal relative width for the curreent phase.
- :param packet_loss_ratio: PDR fraction for the current search.
- :param minimum_transmit_rate: Minimum target transmit rate
- for the current search [pps].
- :param maximum_transmit_rate: Maximum target transmit rate
- for the current search [pps].
- :type result: NdrPdrResult.NdrPdrResult
- :type phases: int
- :type duration: float
- :type width_goal: float
- :type packet_loss_ratio: float
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- """
- self.result = result
- self.phases = int(phases)
- self.duration = float(duration)
- self.width_goal = float(width_goal)
- self.packet_loss_ratio = float(packet_loss_ratio)
- self.minimum_transmit_rate = float(minimum_transmit_rate)
- self.maximum_transmit_rate = float(maximum_transmit_rate)
-
- def __init__(
- self, measurer, final_relative_width=0.005,
- final_trial_duration=30.0, initial_trial_duration=1.0,
- number_of_intermediate_phases=2, timeout=600.0, doublings=1):
- """Store the measurer object and additional arguments.
-
- :param measurer: Rate provider to use by this search object.
- :param final_relative_width: Final lower bound transmit rate
- cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
- :param initial_trial_duration: Trial duration for the initial phase
- and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
- to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
- :param doublings: How many doublings to do in external search step.
- Default 1 is suitable for fairly stable tests,
- less stable tests might get better overal duration with 2 or more.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- :type final_relative_width: float
- :type final_trial_duration: float
- :type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
- :type doublings: int
- """
- super(MultipleLossRatioSearch, self).__init__(measurer)
- self.final_trial_duration = float(final_trial_duration)
- self.final_relative_width = float(final_relative_width)
- self.number_of_intermediate_phases = int(number_of_intermediate_phases)
- self.initial_trial_duration = float(initial_trial_duration)
- self.timeout = float(timeout)
- self.doublings = int(doublings)
-
- @staticmethod
- def double_relative_width(relative_width):
- """Return relative width corresponding to double logarithmic width.
-
- :param relative_width: The base relative width to double.
- :type relative_width: float
- :returns: The relative width of double logarithmic size.
- :rtype: float
- """
- return 1.99999 * relative_width - relative_width * relative_width
- # The number should be 2.0, but we want to avoid rounding errors,
- # and ensure half of double is not larger than the original value.
-
- @staticmethod
- def double_step_down(relative_width, current_bound):
- """Return rate of double logarithmic width below.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound * (
- 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
- )
-
- @staticmethod
- def expand_down(relative_width, doublings, current_bound):
- """Return rate of expanded logarithmic width below.
-
- :param relative_width: The base relative width to double.
- :param doublings: How many doublings to do for expansion.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type doublings: int
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- for _ in range(doublings):
- relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width
- )
- return current_bound * (1.0 - relative_width)
-
- @staticmethod
- def double_step_up(relative_width, current_bound):
- """Return rate of double logarithmic width above.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound / (
- 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
- )
-
- @staticmethod
- def expand_up(relative_width, doublings, current_bound):
- """Return rate of expanded logarithmic width above.
-
- :param relative_width: The base relative width to double.
- :param doublings: How many doublings to do for expansion.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type doublings: int
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- for _ in range(doublings):
- relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width
- )
- return current_bound / (1.0 - relative_width)
-
- @staticmethod
- def half_relative_width(relative_width):
- """Return relative width corresponding to half logarithmic width.
-
- :param relative_width: The base relative width to halve.
- :type relative_width: float
- :returns: The relative width of half logarithmic size.
- :rtype: float
- """
- return 1.0 - math.sqrt(1.0 - relative_width)
-
- @staticmethod
- def half_step_up(relative_width, current_bound):
- """Return rate of half logarithmic width above.
-
- :param relative_width: The base relative width to halve.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically half width [pps].
- :rtype: float
- """
- return current_bound / (
- 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width)
- )
-
- def narrow_down_ndr_and_pdr(self, min_rate, max_rate, packet_loss_ratio):
- """Perform initial phase, create state object, proceed with next phases.
-
- :param min_rate: Minimal target transmit rate [tps].
- :param max_rate: Maximal target transmit rate [tps].
- :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratio: float
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: NdrPdrResult.NdrPdrResult
- :raises RuntimeError: If total duration is larger than timeout.
- """
- minimum_transmit_rate = float(min_rate)
- maximum_transmit_rate = float(max_rate)
- packet_loss_ratio = float(packet_loss_ratio)
- max_measurement = self.measurer.measure(
- self.initial_trial_duration, maximum_transmit_rate)
- initial_width_goal = self.final_relative_width
- for _ in range(self.number_of_intermediate_phases):
- initial_width_goal = self.double_relative_width(initial_width_goal)
- max_lo = maximum_transmit_rate * (1.0 - initial_width_goal)
- mrr = max(minimum_transmit_rate, min(
- max_lo, max_measurement.relative_receive_rate
- ))
- mrr_measurement = self.measurer.measure(
- self.initial_trial_duration, mrr
- )
- # Attempt to get narrower width.
- if mrr_measurement.loss_fraction > 0.0:
- max2_lo = mrr * (1.0 - initial_width_goal)
- mrr2 = min(max2_lo, mrr_measurement.relative_receive_rate)
- else:
- mrr2 = mrr / (1.0 - initial_width_goal)
- if minimum_transmit_rate < mrr2 < maximum_transmit_rate:
- max_measurement = mrr_measurement
- mrr_measurement = self.measurer.measure(
- self.initial_trial_duration, mrr2)
- if mrr2 > mrr:
- max_measurement, mrr_measurement = \
- (mrr_measurement, max_measurement)
- starting_interval = ReceiveRateInterval(
- mrr_measurement, max_measurement)
- starting_result = NdrPdrResult(starting_interval, starting_interval)
- state = self.ProgressState(
- starting_result, self.number_of_intermediate_phases,
- self.final_trial_duration, self.final_relative_width,
- packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate
- )
- state = self.ndrpdr(state)
- return state.result
-
- def _measure_and_update_state(self, state, transmit_rate):
- """Perform trial measurement, update bounds, return new state.
-
- :param state: State before this measurement.
- :param transmit_rate: Target transmit rate for this measurement [pps].
- :type state: ProgressState
- :type transmit_rate: float
- :returns: State after the measurement.
- :rtype: ProgressState
- """
- # TODO: Implement https://stackoverflow.com/a/24683360
- # to avoid the string manipulation if log verbosity is too low.
- logging.info(f"result before update: {state.result}")
- logging.debug(
- f"relative widths in goals: "
- f"{state.result.width_in_goals(self.final_relative_width)}"
- )
- measurement = self.measurer.measure(state.duration, transmit_rate)
- ndr_interval = self._new_interval(
- state.result.ndr_interval, measurement, 0.0
- )
- pdr_interval = self._new_interval(
- state.result.pdr_interval, measurement, state.packet_loss_ratio
- )
- state.result = NdrPdrResult(ndr_interval, pdr_interval)
- return state
-
- @staticmethod
- def _new_interval(old_interval, measurement, packet_loss_ratio):
- """Return new interval with bounds updated according to the measurement.
-
- :param old_interval: The current interval before the measurement.
- :param measurement: The new meaqsurement to take into account.
- :param packet_loss_ratio: Fraction for PDR (or zero for NDR).
- :type old_interval: ReceiveRateInterval.ReceiveRateInterval
- :type measurement: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type packet_loss_ratio: float
- :returns: The updated interval.
- :rtype: ReceiveRateInterval.ReceiveRateInterval
- """
- old_lo, old_hi = old_interval.measured_low, old_interval.measured_high
- new_lo = new_hi = None
- # Priority zero: direct replace if the target Tr is the same.
- if measurement.target_tr in (old_lo.target_tr, old_hi.target_tr):
- if measurement.target_tr == old_lo.target_tr:
- new_lo = measurement
- else:
- new_hi = measurement
- # Priority one: invalid lower bound allows only one type of update.
- elif old_lo.loss_fraction > packet_loss_ratio:
- # We can only expand down, old bound becomes valid upper one.
- if measurement.target_tr < old_lo.target_tr:
- new_lo, new_hi = measurement, old_lo
- else:
- return old_interval
-
- # Lower bound is now valid.
- # Next priorities depend on target Tr.
- elif measurement.target_tr < old_lo.target_tr:
- # Lower external measurement, relevant only
- # if the new measurement has high loss rate.
- if measurement.loss_fraction > packet_loss_ratio:
- # Returning the broader interval as old_lo
- # would be invalid upper bound.
- new_lo = measurement
- elif measurement.target_tr > old_hi.target_tr:
- # Upper external measurement, only relevant for invalid upper bound.
- if old_hi.loss_fraction <= packet_loss_ratio:
- # Old upper bound becomes valid new lower bound.
- new_lo, new_hi = old_hi, measurement
- else:
- # Internal measurement, replaced boundary
- # depends on measured loss fraction.
- if measurement.loss_fraction > packet_loss_ratio:
- # We have found a narrow valid interval,
- # regardless of whether old upper bound was valid.
- new_hi = measurement
- else:
- # In ideal world, we would not want to shrink interval
- # if upper bound is not valid.
- # In the real world, we want to shrink it for
- # "invalid upper bound at maximal rate" case.
- new_lo = measurement
-
- return ReceiveRateInterval(
- old_lo if new_lo is None else new_lo,
- old_hi if new_hi is None else new_hi
- )
-
- def ndrpdr(self, state):
- """Perform trials for this phase. Return the new state when done.
-
- :param state: State before this phase.
- :type state: ProgressState
- :returns: The updated state.
- :rtype: ProgressState
- :raises RuntimeError: If total duration is larger than timeout.
- """
- start_time = time.time()
- if state.phases > 0:
- # We need to finish preceding intermediate phases first.
- saved_phases = state.phases
- state.phases -= 1
- # Preceding phases have shorter duration.
- saved_duration = state.duration
- duration_multiplier = state.duration / self.initial_trial_duration
- phase_exponent = float(state.phases) / saved_phases
- state.duration = self.initial_trial_duration * math.pow(
- duration_multiplier, phase_exponent
- )
- # Shorter durations do not need that narrow widths.
- saved_width = state.width_goal
- state.width_goal = self.double_relative_width(state.width_goal)
- # Recurse.
- state = self.ndrpdr(state)
- # Restore the state for current phase.
- state.duration = saved_duration
- state.width_goal = saved_width
- state.phases = saved_phases # Not needed, but just in case.
-
- logging.info(
- f"starting iterations with duration {state.duration} and relative "
- f"width goal {state.width_goal}"
- )
- while 1:
- if time.time() > start_time + self.timeout:
- raise RuntimeError(u"Optimized search takes too long.")
- # Order of priorities: invalid bounds (nl, pl, nh, ph),
- # then narrowing relative Tr widths.
- # Durations are not priorities yet,
- # they will settle on their own hopefully.
- ndr_lo = state.result.ndr_interval.measured_low
- ndr_hi = state.result.ndr_interval.measured_high
- pdr_lo = state.result.pdr_interval.measured_low
- pdr_hi = state.result.pdr_interval.measured_high
- ndr_rel_width = max(
- state.width_goal, state.result.ndr_interval.rel_tr_width
- )
- pdr_rel_width = max(
- state.width_goal, state.result.pdr_interval.rel_tr_width
- )
- # If we are hitting maximal or minimal rate, we cannot shift,
- # but we can re-measure.
- new_tr = self._ndrpdr_loss_fraction(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # If we are hitting maximum_transmit_rate,
- # it is still worth narrowing width,
- # hoping large enough loss fraction will happen.
- # But if we are hitting the minimal rate (at current duration),
- # no additional measurement will help with that,
- # so we can stop narrowing in this phase.
- if (ndr_lo.target_tr <= state.minimum_transmit_rate
- and ndr_lo.loss_fraction > 0.0):
- ndr_rel_width = 0.0
- if (pdr_lo.target_tr <= state.minimum_transmit_rate
- and pdr_lo.loss_fraction > state.packet_loss_ratio):
- pdr_rel_width = 0.0
-
- new_tr = self._ndrpdr_width_goal(
- state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # We do not need to improve width, but there still might be
- # some measurements with smaller duration.
- new_tr = self._ndrpdr_duration(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # Widths are narrow (or lower bound minimal), bound measurements
- # are long enough, we can return.
- logging.info(u"phase done")
- break
- return state
-
- def _ndrpdr_loss_fraction(
- self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width):
- """Perform loss_fraction-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param ndr_hi: ndr interval measured high
- :param pdr_lo: pdr interval measured low
- :param pdr_hi: pdr interval measured high
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- """
- result = None
- if ndr_lo.loss_fraction > 0.0:
- if ndr_lo.target_tr > state.minimum_transmit_rate:
- result = max(
- state.minimum_transmit_rate, self.expand_down(
- ndr_rel_width, self.doublings, ndr_lo.target_tr
- )
- )
- logging.info(f"ndr lo external {result}")
- elif ndr_lo.duration < state.duration:
- result = state.minimum_transmit_rate
- logging.info(u"ndr lo minimal re-measure")
-
- if result is None and pdr_lo.loss_fraction > state.packet_loss_ratio:
- if pdr_lo.target_tr > state.minimum_transmit_rate:
- result = max(
- state.minimum_transmit_rate, self.expand_down(
- pdr_rel_width, self.doublings, pdr_lo.target_tr
- )
- )
- logging.info(f"pdr lo external {result}")
- elif pdr_lo.duration < state.duration:
- result = state.minimum_transmit_rate
- logging.info(u"pdr lo minimal re-measure")
-
- if result is None and ndr_hi.loss_fraction <= 0.0:
- if ndr_hi.target_tr < state.maximum_transmit_rate:
- result = min(
- state.maximum_transmit_rate, self.expand_up(
- ndr_rel_width, self.doublings, ndr_hi.target_tr
- )
- )
- logging.info(f"ndr hi external {result}")
- elif ndr_hi.duration < state.duration:
- result = state.maximum_transmit_rate
- logging.info(u"ndr hi maximal re-measure")
-
- if result is None and pdr_hi.loss_fraction <= state.packet_loss_ratio:
- if pdr_hi.target_tr < state.maximum_transmit_rate:
- result = min(
- state.maximum_transmit_rate, self.expand_up(
- pdr_rel_width, self.doublings, pdr_hi.target_tr
- )
- )
- logging.info(f"pdr hi external {result}")
- elif pdr_hi.duration < state.duration:
- result = state.maximum_transmit_rate
- logging.info(u"ndr hi maximal re-measure")
- return result
-
- def _ndrpdr_width_goal(
- self, state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width):
- """Perform width_goal-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param pdr_lo: pdr interval measured low
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- Return a new transmit rate if one should be applied.
- """
- if ndr_rel_width > state.width_goal:
- # We have to narrow NDR width first, as NDR internal search
- # can invalidate PDR (but not vice versa).
- result = self.half_step_up(ndr_rel_width, ndr_lo.target_tr)
- logging.info(f"Bisecting for NDR at {result}")
- elif pdr_rel_width > state.width_goal:
- # PDR internal search.
- result = self.half_step_up(pdr_rel_width, pdr_lo.target_tr)
- logging.info(f"Bisecting for PDR at {result}")
- else:
- result = None
- return result
-
- @staticmethod
- def _ndrpdr_duration(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width):
- """Perform duration-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param ndr_hi: ndr interval measured high
- :param pdr_lo: pdr interval measured low
- :param pdr_hi: pdr interval measured high
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- """
- # We need to re-measure with full duration, possibly
- # creating invalid bounds to resolve (thus broadening width).
- if ndr_lo.duration < state.duration:
- result = ndr_lo.target_tr
- logging.info(u"re-measuring NDR lower bound")
- elif pdr_lo.duration < state.duration:
- result = pdr_lo.target_tr
- logging.info(u"re-measuring PDR lower bound")
- # Except when lower bounds have high loss fraction, in that case
- # we do not need to re-measure _upper_ bounds.
- elif ndr_hi.duration < state.duration and ndr_rel_width > 0.0:
- result = ndr_hi.target_tr
- logging.info(u"re-measuring NDR upper bound")
- elif pdr_hi.duration < state.duration and pdr_rel_width > 0.0:
- result = pdr_hi.target_tr
- logging.info(u"re-measuring PDR upper bound")
- else:
- result = None
- return result
diff --git a/resources/libraries/python/MLRsearch/NdrPdrResult.py b/resources/libraries/python/MLRsearch/NdrPdrResult.py
deleted file mode 100644
index 3454ef1957..0000000000
--- a/resources/libraries/python/MLRsearch/NdrPdrResult.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining NdrPdrResult class."""
-
-from .ReceiveRateInterval import ReceiveRateInterval
-
-
-class NdrPdrResult:
- """Two measurement intervals, return value of search algorithms.
-
- Partial fraction is NOT part of the result. Pdr interval should be valid
- for all partial fractions implied by the interval."""
-
- def __init__(self, ndr_interval, pdr_interval):
- """Store the measured intervals after checking argument types.
-
- :param ndr_interval: Object containing data for NDR part of the result.
- :param pdr_interval: Object containing data for PDR part of the result.
- :type ndr_interval: ReceiveRateInterval.ReceiveRateInterval
- :type pdr_interval: ReceiveRateInterval.ReceiveRateInterval
- """
- # TODO: Type checking is not very pythonic,
- # perhaps users can fix wrong usage without it?
- if not isinstance(ndr_interval, ReceiveRateInterval):
- raise TypeError(
- f"ndr_interval, is not a ReceiveRateInterval: {ndr_interval!r}"
- )
- if not isinstance(pdr_interval, ReceiveRateInterval):
- raise TypeError(
- f"pdr_interval, is not a ReceiveRateInterval: {pdr_interval!r}"
- )
- self.ndr_interval = ndr_interval
- self.pdr_interval = pdr_interval
-
- def width_in_goals(self, relative_width_goal):
- """Return a debug string related to current widths in logarithmic scale.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Message containing NDR and PDR widths in goals.
- :rtype: str
- """
- return f"ndr {self.ndr_interval.width_in_goals(relative_width_goal)};" \
- f" pdr {self.pdr_interval.width_in_goals(relative_width_goal)}"
-
- def __str__(self):
- """Return string as tuple of named values."""
- return f"NDR={self.ndr_interval!s};PDR={self.pdr_interval!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"NdrPdrResult(ndr_interval={self.ndr_interval!r}," \
- f"pdr_interval={self.pdr_interval!r})"
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
deleted file mode 100644
index eff23e8bcc..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateInterval class."""
-
-import math
-
-from .ReceiveRateMeasurement import ReceiveRateMeasurement
-
-
-class ReceiveRateInterval:
- """Structure defining two Rr measurements, and their relation."""
-
- def __init__(self, measured_low, measured_high):
- """Store the bound measurements after checking argument types.
-
- :param measured_low: Measurement for the lower bound.
- :param measured_high: Measurement for the upper bound.
- :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
- # TODO: Type checking is not very pythonic,
- # perhaps users can fix wrong usage without it?
- if not isinstance(measured_low, ReceiveRateMeasurement):
- raise TypeError(
- f"measured_low is not a ReceiveRateMeasurement: "
- f"{measured_low!r}"
- )
- if not isinstance(measured_high, ReceiveRateMeasurement):
- raise TypeError(
- f"measured_high is not a ReceiveRateMeasurement: "
- f"{measured_high!r}"
- )
- self.measured_low = measured_low
- self.measured_high = measured_high
- # Declare secondary quantities to appease pylint.
- self.abs_tr_width = None
- """Absolute width of target transmit rate. Upper minus lower."""
- self.rel_tr_width = None
- """Relative width of target transmit rate. Absolute divided by upper."""
- self.sort()
-
- def sort(self):
- """Sort bounds by target Tr, compute secondary quantities."""
- if self.measured_low.target_tr > self.measured_high.target_tr:
- self.measured_low, self.measured_high = (
- self.measured_high, self.measured_low
- )
- self.abs_tr_width = (
- self.measured_high.target_tr - self.measured_low.target_tr
- )
- self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
-
- def width_in_goals(self, relative_width_goal):
- """Return float value.
-
- Relative width goal is some (negative) value on logarithmic scale.
- Current relative width is another logarithmic value.
- Return the latter divided by the former.
- This is useful when investigating how did surprising widths come to be.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Current width as logarithmic multiple of goal width [1].
- :rtype: float
- """
- return math.log(1.0 - self.rel_tr_width) / math.log(
- 1.0 - relative_width_goal)
-
- def __str__(self):
- """Return string as half-open interval."""
- return f"[{self.measured_low!s};{self.measured_high!s})"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \
- f"measured_high={self.measured_high!r})"
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
deleted file mode 100644
index c732e66026..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateMeasurement class."""
-
-
-class ReceiveRateMeasurement:
- """Structure defining the result of single Rr measurement."""
-
- def __init__(
- self, duration, target_tr, transmit_count, loss_count,
- approximated_duration=0.0, partial_transmit_count=0):
- """Constructor, normalize primary and compute secondary quantities.
-
- If approximated_duration is nonzero, it is stored.
- If approximated_duration is zero, duration value is stored.
- Either way, additional secondary quantities are computed
- from the store value.
-
- If there is zero transmit_count, fractions are set to zero.
-
- In some cases, traffic generator does not attempt all the needed
- transactions. In that case, nonzero partial_transmit_count
- holds (an estimate of) count of the actually attempted transactions.
- This is used to populate some secondary quantities.
-
- TODO: Use None instead of zero?
-
- :param duration: Measurement duration [s].
- :param target_tr: Target transmit rate [pps].
- If bidirectional traffic is measured, this is bidirectional rate.
- :param transmit_count: Number of packets transmitted [1].
- :param loss_count: Number of packets transmitted but not received [1].
- :param approximated_duration: Estimate of the actual time of the trial.
- :param partial_transmit_count: Estimate count of actually attempted
- transactions.
- :type duration: float
- :type target_tr: float
- :type transmit_count: int
- :type loss_count: int
- :type approximated_duration: float
- :type partial_transmit_count: int
- """
- self.duration = float(duration)
- self.target_tr = float(target_tr)
- self.transmit_count = int(transmit_count)
- self.loss_count = int(loss_count)
- self.receive_count = transmit_count - loss_count
- self.transmit_rate = transmit_count / self.duration
- self.loss_rate = loss_count / self.duration
- self.receive_rate = self.receive_count / self.duration
- self.loss_fraction = (
- float(self.loss_count) / self.transmit_count
- if self.transmit_count > 0 else 1.0
- )
- self.receive_fraction = (
- float(self.receive_count) / self.transmit_count
- if self.transmit_count > 0 else 0.0
- )
- self.approximated_duration = (
- float(approximated_duration) if approximated_duration
- else self.duration
- )
- self.approximated_receive_rate = (
- self.receive_count / self.approximated_duration
- if self.approximated_duration > 0.0 else 0.0
- )
- # If the traffic generator is unreliable and sends less packets,
- # the absolute receive rate might be too low for next target.
- self.partial_transmit_count = (
- int(partial_transmit_count) if partial_transmit_count
- else self.transmit_count
- )
- self.partial_receive_fraction = (
- float(self.receive_count) / self.partial_transmit_count
- if self.partial_transmit_count > 0 else 0.0
- )
- self.partial_receive_rate = (
- self.target_tr * self.partial_receive_fraction
- )
- # We use relative packet ratios in order to support cases
- # where target_tr is in transactions per second,
- # but there are multiple packets per transaction.
- self.relative_receive_rate = (
- self.target_tr * self.receive_count / self.transmit_count
- )
-
- def __str__(self):
- """Return string reporting input and loss fraction."""
- return f"d={self.duration!s},Tr={self.target_tr!s}," \
- f"Df={self.loss_fraction!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateMeasurement(duration={self.duration!r}," \
- f"target_tr={self.target_tr!r}," \
- f"transmit_count={self.transmit_count!r}," \
- f"loss_count={self.loss_count!r}," \
- f"approximated_duration={self.approximated_duration!r}," \
- f"partial_transmit_count={self.partial_transmit_count!r})"
diff --git a/resources/libraries/python/MLRsearch/__init__.py b/resources/libraries/python/MLRsearch/__init__.py
index 70c713eaa0..09ce7e6719 100644
--- a/resources/libraries/python/MLRsearch/__init__.py
+++ b/resources/libraries/python/MLRsearch/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,3 +14,17 @@
"""
__init__ file for Python package "MLRsearch".
"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+# Import user-facing (API) stuff, so users do not need to know submodules.
+from .config import Config
+from .goal_result import GoalResult
+from .multiple_loss_ratio_search import MultipleLossRatioSearch
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .trial_measurement import AbstractMeasurer, MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/candidate.py b/resources/libraries/python/MLRsearch/candidate.py
new file mode 100644
index 0000000000..16bbe60bae
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/candidate.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Candidate class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from functools import total_ordering
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .selector import Selector
+
+
+@total_ordering
+@dataclass(frozen=True)
+class Candidate:
+ """Class describing next trial inputs, as nominated by a selector.
+
+ As each selector is notified by the controller when its nominated load
+ becomes the winner, a reference to the selector is also included here.
+
+ The rest of the code focuses on defining the ordering between candidates.
+ When two instances are compared, the lesser has higher priority
+ for choosing which trial is actually performed next.
+
+ As Python implicitly converts values to bool in many places
+ (e.g. in "if" statement), any instance is called "truthy" if it converts
+ to True, and "falsy" if it converts to False.
+ To make such places nice and readable, __bool__ method is implemented
+ in a way that a candidate instance is falsy if its load is None.
+ As a falsy candidate never gets measured,
+ other fields of a falsy instance are irrelevant.
+ """
+
+ load: Optional[DiscreteLoad] = None
+ """Measure at this intended load. None if no load nominated by selector."""
+ duration: float = None
+ """Trial duration as chosen by the selector."""
+ width: Optional[DiscreteWidth] = None
+ """Set the global width to this when this candidate becomes the winner."""
+ selector: Selector = None
+ """Reference to the selector instance which nominated this candidate."""
+
+ def __str__(self) -> str:
+ """Convert trial inputs into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"d={self.duration},l={self.load}"
+
+ def __eq__(self, other: Candidate) -> bool:
+ """Return wheter self is identical to the other candidate.
+
+ This is just a pretense for total ordering wrapper to work.
+ In reality, MLRsearch shall never test equivalence,
+ so we save space by just raising RuntimeError if this is ever called.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if the instances are equivalent.
+ :rtype: bool
+ :raises RuntimeError: Always, to prevent unintended usage.
+ """
+ raise RuntimeError("Candidate equality comparison shall not be needed.")
+
+ def __lt__(self, other: Candidate) -> bool:
+ """Return whether self should be measured before other.
+
+ In the decreasing order of importance:
+ Non-None load is preferred.
+ Self is less than other when both loads are None.
+ Lower offered load is preferred.
+ Longer trial duration is preferred.
+ Non-none width is preferred.
+ Larger width is preferred.
+ Self is preferred.
+
+ The logic comes from the desire to save time and being conservative.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if self should be measured sooner.
+ :rtype: bool
+ """
+ if not self.load:
+ if other.load:
+ return False
+ return True
+ if not other.load:
+ return True
+ if self.load < other.load:
+ return True
+ if self.load > other.load:
+ return False
+ if self.duration > other.duration:
+ return True
+ if self.duration < other.duration:
+ return False
+ if not self.width:
+ if other.width:
+ return False
+ return True
+ if not other.width:
+ return True
+ return self.width >= other.width
+
+ def __bool__(self) -> bool:
+ """Does this candidate choose to perform any trial measurement?
+
+ :returns: True if yes, it does choose to perform.
+ :rtype: bool
+ """
+ return bool(self.load)
+
+ @staticmethod
+ def nomination_from(selector: Selector) -> Candidate:
+ """Call nominate on selector, wrap into Candidate instance to return.
+
+ We avoid dependency cycle while letting candidate depend on selector,
+ therefore selector cannot know how to wrap its nomination
+ into a full candidate instance.
+ This factory method finishes the wrapping.
+
+ :param selector: Selector to call.
+ :type selector: Selector
+ :returns: Newly created Candidate instance with nominated trial inputs.
+ :rtype: Candidate
+ """
+ load, duration, width = selector.nominate()
+ return Candidate(
+ load=load,
+ duration=duration,
+ width=width,
+ selector=selector,
+ )
+
+ def won(self) -> None:
+ """Inform selector its candidate became a winner."""
+ self.selector.won(self.load)
diff --git a/resources/libraries/python/MLRsearch/config.py b/resources/libraries/python/MLRsearch/config.py
new file mode 100644
index 0000000000..7aa8ed75a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/config.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Config class."""
+
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Optional
+
+from .dataclass import DataclassProperty
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+
+
+@dataclass
+class Config:
+ """Structure containing several static config items.
+
+ The main MLRsearch algorithm uses multiple customizable values.
+ Pylint complains if the values appear as long argument lists
+ or multiple local variables.
+
+ This class offers a storage for values which do not contain
+ internally mutable state and are set at an unknown time
+ before the search starts. This way users can override only some values,
+ and do it over multiple calls.
+ All "official" user inputs are contained here.
+
+ Properties are defined to enforce the requirements on allowed values.
+ All fields have default values, so instances can be created without any.
+ It is still recommended to set all values after instantiation,
+ as the defaults may change in the next version.
+
+ As some relations between values of different fields are required,
+ users must take care to set them in the correct order.
+
+ For example, min_load has to be set to a value smaller
+ than the current value of max_load.
+ """
+
+ # Externally visible "fields" (but in fact redefined as properties).
+ goals: SearchGoalTuple = SearchGoalTuple((SearchGoal(),))
+ """Container holding search goals."""
+ min_load: float = 9001.0
+ """Each trial measurement must have intended load at least this [tps]."""
+ max_load: float = 1e9
+ """Each trial measurement must have intended load at most this [tps]."""
+ search_duration_max: float = 1200.0
+ """The search will end as a failure this long [s] after it is started."""
+ warmup_duration: float = 1.0
+ """If specified, one trial at max load and this duration is performed
+ before the usual search starts. None converts to zero and means no warmup.
+ The results of that one trial are ignored."""
+
+ @DataclassProperty
+ def goals(self) -> SearchGoalTuple:
+ """Return the reference to the current container of goals.
+
+ :returns: The current container instance.
+ :rtype: SearchGoalTuple
+ """
+ return self._goals
+
+ @goals.setter
+ def goals(self, goals: Iterable[SearchGoal]) -> None:
+ """Create and store the goal container.
+
+ :param goals: Search goals to add to the container to store.
+ :type goals: Iterable[SearchGoal]
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If any of the goals is not a SearchGoal.
+ """
+ self._goals = SearchGoalTuple(goals)
+
+ @DataclassProperty
+ def min_load(self) -> float:
+ """Getter for min load, no logic here.
+
+ :returns: Currently set minimal intended load [tps].
+ :rtype: float
+ """
+ return self._min_load
+
+ @min_load.setter
+ def min_load(self, load: float) -> None:
+ """Set min load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= 0.0:
+ raise ValueError(f"Min load {load} must be positive.")
+ # At the time init is first called, _max_load is not set yet.
+ if hasattr(self, "_max_load") and load >= self.max_load:
+ raise ValueError(f"Min load {load} must be smaller.")
+ self._min_load = load
+
+ @DataclassProperty
+ def max_load(self) -> float:
+ """Getter for max load, no logic here.
+
+ :returns: Currently set maximal intended load [tps].
+ :rtype: float
+ """
+ return self._max_load
+
+ @max_load.setter
+ def max_load(self, load: float) -> None:
+ """Set max load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= self.min_load:
+ raise ValueError(f"Max load {load} must be bigger.")
+ self._max_load = load
+
+ @DataclassProperty
+ def search_duration_max(self) -> float:
+ """Getter for max search duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._search_duration_max
+
+ @search_duration_max.setter
+ def search_duration_max(self, duration: float) -> None:
+ """Set max search duration after converting and checking value.
+
+ :param duration: Search duration maximum [s] to set.
+ :type duration: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ duration = float(duration)
+ if duration <= 0.0:
+ raise ValueError(f"Search duration max too small: {duration}")
+ self._search_duration_max = duration
+
+ @DataclassProperty
+ def warmup_duration(self) -> float:
+ """Getter for warmup duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._warmup_duration
+
+ @warmup_duration.setter
+ def warmup_duration(self, duration: Optional[float]) -> None:
+ """Set warmup duration after converting and checking value.
+
+ Zero duration is treated as None, meaning no warmup trial.
+
+ :param duration: Warmup duration [s] to set.
+ :type duration: Optional(float)
+ :raises ValueError: If the argument is found invalid.
+ """
+ if duration:
+ duration = float(duration)
+ if duration < 0.0:
+ raise ValueError(f"Warmup duration too small: {duration}")
+ else:
+ duration = 0.0
+ self._warmup_duration = duration
diff --git a/resources/libraries/python/MLRsearch/dataclass/__init__.py b/resources/libraries/python/MLRsearch/dataclass/__init__.py
new file mode 100644
index 0000000000..e546b090c9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "dataclass_property".
+"""
+
+from .dc_property import DataclassProperty
+from .field import secondary_field
diff --git a/resources/libraries/python/MLRsearch/dataclass/dc_property.py b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
new file mode 100644
index 0000000000..7f3b49aeb8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DataclassProperty class.
+
+The main issue that needs support is dataclasses with properties
+(including setters) and with (immutable) default values.
+
+First, this explains how property ends up passed as default constructor value:
+https://florimond.dev/en/posts/2018/10/
+/reconciling-dataclasses-and-properties-in-python/
+TL;DR: By the time __init__ is generated, original class variable (type hint)
+is replaced by property (method definition).
+
+Second, there are ways to deal with that:
+https://stackoverflow.com/a/61480946
+TL;DR: It relies on the underscored field being replaced by the value.
+
+But that does not work for field which use default_factory (or no default)
+(the underscored class field is deleted instead).
+So another way is needed to cover those cases,
+ideally without the need to define both original and underscored field.
+
+This implementation relies on a fact that decorators are executed
+when the class fields do yet exist, and decorated function
+does know its name, so the decorator can get the value stored in
+the class field, and store it as an additional attribute of the getter function.
+Then for setter, the property contains the getter (as an unbound function),
+so it can access the additional attribute to get the value.
+
+This approach circumvents the precautions dataclasses take to prevent mishaps
+when a single mutable object is shared between multiple instances.
+So it is up to setters to create an appropriate copy of the default object
+if the default value is mutable.
+
+The default value cannot be MISSING nor Field nor DataclassProperty,
+otherwise the intended logic breaks.
+"""
+
+from __future__ import annotations
+
+from dataclasses import Field, MISSING
+from functools import wraps
+from inspect import stack
+from typing import Callable, Optional, TypeVar, Union
+
+
+Self = TypeVar("Self")
+"""Type for the dataclass instances being created using properties."""
+Value = TypeVar("Value")
+"""Type for the value the property (getter, setter) handles."""
+
+
+def _calling_scope_variable(name: str) -> Value:
+ """Get a variable from a higher scope.
+
+ This feels dirty, but without this the syntactic sugar
+ would not be sweet enough.
+
+ The implementation is copied from https://stackoverflow.com/a/14694234
+ with the difference of raising RuntimeError (instead of returning None)
+ if no variable of that name is found in any of the scopes.
+
+ :param name: Name of the variable to access.
+ :type name: str
+ :returns: The value of the found variable.
+ :rtype: Value
+ :raises RuntimeError: If the variable is not found in any calling scope.
+ """
+ frame = stack()[1][0]
+ while name not in frame.f_locals:
+ frame = frame.f_back
+ if frame is None:
+ raise RuntimeError(f"Field {name} value not found.")
+ return frame.f_locals[name]
+
+
+class DataclassProperty(property):
+ """Subclass of property, handles default values for dataclass fields.
+
+ If a dataclass field does not specify a default value (nor default_factory),
+ this is not needed, and in fact it will not work (so use built-in property).
+
+ This implementation seemlessly finds and inserts the default value
+ (can be mutable) into a new attribute of the getter function.
+ Before calling a setter function in init (recognized by type),
+ the default value is retrieved and passed transparently to the setter.
+ It is the responsibilty of the setter to appropriately clone the value,
+ in order to prevent multiple instances sharing the same mutable value.
+ """
+
+ def __init__(
+ self,
+ fget: Optional[Callable[[], Value]] = None,
+ fset: Optional[Callable[[Self, Value], None]] = None,
+ fdel: Optional[Callable[[], None]] = None,
+ doc: Optional[str] = None,
+ ):
+ """Find and store the default value, construct the property.
+
+ See this for how the superclass property works:
+ https://docs.python.org/3/howto/descriptor.html#properties
+
+ :param fget: Getter (unbound) function to use, if any.
+ :param fset: Setter (unbound) function to use, if any.
+ :param fdel: Deleter (unbound) function to use, if any.
+ :param doc: Docstring to display when examining the property.
+ :type fget: Optional[Callable[[Self], Value]]
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :type fdel: Optional[Callable[[Self], None]]
+ :type doc: Optional[str]
+ """
+ variable_found = _calling_scope_variable(fget.__name__)
+ if not isinstance(variable_found, DataclassProperty):
+ if isinstance(variable_found, Field):
+ if variable_found.default is not MISSING:
+ fget.default_value = variable_found.default
+ # Else do not store any default value.
+ else:
+ fget.default_value = variable_found
+ # Else this is the second time init is called (when setting setter),
+ # in which case the default is already stored into fget.
+ super().__init__(fget=fget, fset=fset, fdel=fdel, doc=doc)
+
+ def setter(
+ self,
+ fset: Optional[Callable[[Self, Value], None]],
+ ) -> DataclassProperty:
+ """Return new instance with a wrapped setter function set.
+
+ If the argument is None, call superclass method.
+
+ The wrapped function recognizes when it is called in init
+ (by the fact the value argument is of type DataclassProperty)
+ and in that case it extracts the stored default and passes that
+ to the user-defined setter function.
+
+ :param fset: Setter function to wrap and apply.
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :returns: New property instance with correct setter function set.
+ :rtype: DataclassProperty
+ """
+ if fset is None:
+ return super().setter(fset)
+
+ @wraps(fset)
+ def wrapped(sel_: Self, val: Union[Value, DataclassProperty]) -> None:
+ """Extract default from getter if needed, call the user setter.
+
+ The sel_ parameter is listed explicitly, to signify
+ this is an unbound function, not a bounded method yet.
+
+ :param sel_: Instance of dataclass (not of DataclassProperty)
+ to set the value on.
+ :param val: Set this value, or the default value stored there.
+ :type sel_: Self
+ :type val: Union[Value, DataclassProperty]
+ """
+ if isinstance(val, DataclassProperty):
+ val = val.fget.default_value
+ fset(sel_, val)
+
+ return super().setter(wrapped)
diff --git a/resources/libraries/python/MLRsearch/dataclass/field.py b/resources/libraries/python/MLRsearch/dataclass/field.py
new file mode 100644
index 0000000000..55d9d0879f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/field.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining secondary_field function.
+
+Just a shrothand for frequently repeated expression.
+
+The main point is that this dataclass field is not used in init.
+Maybe it is a derived value of a frozen dataclass.
+Maybe it is a cache to help avoiding repeated computation.
+Maybe it is a temporary value stored in one method and read in another method.
+In any case, the caller does not need to know it is here,
+so it is excluded from repr, hashing, ordering and similar.
+"""
+
+from dataclasses import Field, field
+
+
+def secondary_field() -> Field:
+ """Return newly created Field with non-default arguments
+
+ In practice, it seems to be fine to reuse the resulting Field instance
+ when defining multiple dataclass fields,
+ but we keep this as a function to improve readability.
+
+ :returns: A new Field instance useful for secondary fields.
+ :rtype: Field
+ """
+ return field(
+ default=None,
+ init=False,
+ repr=False,
+ compare=False,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_interval.py b/resources/libraries/python/MLRsearch/discrete_interval.py
new file mode 100644
index 0000000000..0a3bf443a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_interval.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteInterval class."""
+
+from dataclasses import dataclass
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+# TODO: Can this be frozen?
+@dataclass
+class DiscreteInterval:
+ """Interval class with more computations available.
+
+ Along discrete form of width,
+ a MLR specific way for halving the interval is also included.
+
+ The two primary field values do not have to be valid relevant bounds,
+ but at the end of the search, they usually are.
+
+ The load values must be round.
+ """
+
+ lower_bound: DiscreteLoad
+ """Value for the lower intended load (or load stats or similar)."""
+ upper_bound: DiscreteLoad
+ """Value for the higher intended load (or load stats or similar)."""
+ # Primary fields above, derived below.
+ discrete_width: DiscreteWidth = secondary_field()
+ """Discrete width between intended loads (upper_bound minus lower_bound)."""
+
+ def __post_init__(self) -> None:
+ """Sort bounds by intended load, compute secondary quantities.
+
+ :raises RuntimeError: If a result used non-rounded load.
+ """
+ if not self.lower_bound.is_round:
+ raise RuntimeError(f"Non-round lower bound: {self.lower_bound!r}")
+ if not self.upper_bound.is_round:
+ raise RuntimeError(f"Non-round upper bound: {self.upper_bound!r}")
+ if self.lower_bound > self.upper_bound:
+ tmp = self.lower_bound
+ self.lower_bound = self.upper_bound
+ self.upper_bound = tmp
+ self.discrete_width = self.upper_bound - self.lower_bound
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lower_bound=({self.lower_bound}),upper_bound=({self.upper_bound})"
+ )
+
+ # TODO: Use "target" instad of "goal" in argument and variable names.
+
+ def width_in_goals(self, goal: DiscreteWidth) -> float:
+ """Return relative width as a multiple of the given goal (int form).
+
+ Integer forms are used for computation, safe as loads are rounded.
+ The result is a float, as self int may not be divisible by goal int.
+
+ :param goal: A relative width amount to be used as a unit.
+ :type goal: DiscreteWidth
+ :returns: Self width in multiples of (integer form of) goal width.
+ :rtype: float
+ """
+ return int(self.discrete_width) / int(goal)
+
+ def middle(self, goal: DiscreteWidth) -> DiscreteLoad:
+ """Return new intended load (discrete form) in the middle.
+
+ All calculations are based on int forms.
+
+ One of the halfs is rounded to a power-of-two multiple of the goal.
+ The power that leads to most even split is used.
+ Lower width is the smaller one (if not exactly even).
+
+ This approach prefers lower loads (to remain conservative) and can save
+ some measurements (when all middle measurements have high loss).
+ Note that when competing with external search from above,
+ that search is already likely to produce widths that are
+ power-of-two multiples of the target width.
+
+ If the interval width is one goal (or less), RuntimeError is raised.
+ If the interval width is between one and two goals (not including),
+ a more even split is attempted (using half the goal value).
+
+ :param goal: Target width goal to use for uneven halving.
+ :type goal: DiscreteWidth
+ :returns: New load to use for bisecting.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ int_self, int_goal = int(self.discrete_width), int(goal)
+ if int_self <= int_goal:
+ raise RuntimeError(f"Do not halve small enough interval: {self!r}")
+ if int_self == 2 * int_goal:
+ # Even split, return here simplifies the while loop below.
+ return self.lower_bound + goal
+ if int_self < 2 * int_goal:
+ # This can only happen when int_goal >= 2.
+ # In this case, we do not have good enough split at this width goal,
+ # but maybe this is not the final target, so we can attempt
+ # a split at half width goal.
+ if not int_goal % 2:
+ return self.middle(goal=goal.half_rounded_down())
+ # Odd int_goal, so this must by the last phase. Do even split.
+ lo_width = self.discrete_width.half_rounded_down()
+ return self.lower_bound + lo_width
+ hi_width = goal
+ lo_width = self.discrete_width - hi_width
+ # We know lo_width > hi_width because we did not do the even split.
+ while 1:
+ hi2_width = hi_width * 2
+ lo2_width = self.discrete_width - hi2_width
+ if lo2_width <= hi2_width:
+ break
+ hi_width, lo_width = hi2_width, lo2_width
+ # Which of the two options is more even? Product decides.
+ if int(hi_width) * int(lo_width) > int(hi2_width) * int(lo2_width):
+ # Previous attempt was more even, but hi_width was the smaller one.
+ lo2_width = hi_width
+ # Else lo2_width is more even and no larger than hi2_width.
+ return self.lower_bound + lo2_width
diff --git a/resources/libraries/python/MLRsearch/discrete_load.py b/resources/libraries/python/MLRsearch/discrete_load.py
new file mode 100644
index 0000000000..a75b4acf96
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_load.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteLoad class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from functools import total_ordering
+from typing import Callable, Optional, Union
+
+from .load_rounding import LoadRounding
+from .discrete_width import DiscreteWidth
+
+
+@total_ordering
+@dataclass
+class DiscreteLoad:
+ """Structure to store load value together with its rounded integer form.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+ Conversion methods and factories are added for convenience.
+
+ In general, the float form is allowed to differ from conversion from int.
+
+ Comparisons are supported, acting on the float load component.
+ Additive operations are supported, acting on int form.
+ Multiplication by a float constant is supported, acting on float form.
+
+ As for all user defined classes by default, all instances are truthy.
+ That is useful when dealing with Optional values, as None is falsy.
+
+ This dataclass is effectively frozen, but cannot be marked as such
+ as that would prevent LoadStats from being its subclass.
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_load: float = None
+ """Float form of intended load [tps], usable for measurer."""
+ int_load: int = field(compare=False, default=None)
+ """Integer form, usable for exact computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float load to differ from
+ conversion from int load. MLRsearch should round explicitly later,
+ based on its additional information.
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_load is None and self.int_load is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_load is None:
+ self.int_load = int(self.int_load)
+ self.float_load = self.rounding.int2float(self.int_load)
+ else:
+ self.float_load = float(self.float_load)
+ self.int_load = self.rounding.float2int(self.float_load)
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_load={int(self)}"
+
+ # Explicit comparison operators.
+ # Those generated with dataclass order=True do not allow subclass instances.
+
+ def __eq__(self, other: Optional[DiscreteLoad]) -> bool:
+ """Return whether the other instance has the same float form.
+
+ None is effectively considered to be an unequal instance.
+
+ :param other: Other instance to compare to, or None.
+ :type other: Optional[DiscreteLoad]
+ :returns: True only if float forms are exactly equal.
+ :rtype: bool
+ """
+ if other is None:
+ return False
+ return float(self) == float(other)
+
+ def __lt__(self, other: DiscreteLoad) -> bool:
+ """Return whether self has smaller float form than the other instance.
+
+ None is not supported, as MLRsearch does not need that
+ (so when None appears we want to raise).
+
+ :param other: Other instance to compare to.
+ :type other: DiscreteLoad
+ :returns: True only if float forms of self is strictly smaller.
+ :rtype: bool
+ """
+ return float(self) < float(other)
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ @property
+ def is_round(self) -> bool:
+ """Return whether float load matches converted int load.
+
+ :returns: False if float load is not rounded.
+ :rtype: bool
+ """
+ expected = self.rounding.int2float(self.int_load)
+ return expected == self.float_load
+
+ def __int__(self) -> int:
+ """Return the int value.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_load
+
+ def __float__(self) -> float:
+ """Return the float value.
+
+ :returns: The float field value [tps].
+ :rtype: float
+ """
+ return self.float_load
+
+ @staticmethod
+ def int_conver(rounding: LoadRounding) -> Callable[[int], DiscreteLoad]:
+ """Return a factory that turns an int load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from int.
+ :rtype: Callable[[int], DiscreteLoad]
+ """
+
+ def factory_int(int_load: int) -> DiscreteLoad:
+ """Use rounding and int load to create discrete load.
+
+ :param int_load: Intended load in integer form.
+ :type int_load: int
+ :returns: New discrete load instance matching the int load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, int_load=int_load)
+
+ return factory_int
+
+ @staticmethod
+ def float_conver(rounding: LoadRounding) -> Callable[[float], DiscreteLoad]:
+ """Return a factory that turns a float load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from float.
+ :rtype: Callable[[float], DiscreteLoad]
+ """
+
+ def factory_float(float_load: float) -> DiscreteLoad:
+ """Use rounding instance and float load to create discrete load.
+
+ The float form is not rounded yet.
+
+ :param int_load: Intended load in float form [tps].
+ :type int_load: float
+ :returns: New discrete load instance matching the float load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, float_load=float_load)
+
+ return factory_float
+
+ def rounded_down(self) -> DiscreteLoad:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, int_load=int(self))
+
+ def hashable(self) -> DiscreteLoad:
+ """Return new equivalent instance.
+
+ This is mainly useful for conversion from unhashable subclasses,
+ such as LoadStats.
+ Rounding instance (reference) is copied from self.
+
+ :returns: New instance with values based on float form of self.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, float_load=float(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance with width added to int load.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding two loads by mistake
+ (or adding int to load and similar).
+
+ :param width: Value to add to int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load + int(width),
+ )
+
+ def __sub__(
+ self, other: Union[DiscreteWidth, DiscreteLoad]
+ ) -> Union[DiscreteLoad, DiscreteWidth]:
+ """Return result based on the argument type.
+
+ Load minus load is width, load minus width is load.
+ This allows the same operator to support both operations.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: Union[DiscreteWidth, DiscreteLoad]
+ :returns: Resulting width or load.
+ :rtype: Union[DiscreteLoad, DiscreteWidth]
+ :raises RuntimeError: If the argument type is not supported.
+ """
+ if isinstance(other, DiscreteWidth):
+ return self._minus_width(other)
+ if isinstance(other, DiscreteLoad):
+ return self._minus_load(other)
+ raise RuntimeError(f"Unsupported type {other!r}")
+
+ def _minus_width(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance, width subtracted from int load.
+
+ Rounding instance (reference) is copied from self.
+
+ :param width: Value to subtract from int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load - int(width),
+ )
+
+ def _minus_load(self, other: DiscreteLoad) -> DiscreteWidth:
+ """Return newly constructed width instance, difference of int loads.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: DiscreteLoad
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_load - int(other),
+ )
+
+ def __mul__(self, coefficient: float) -> DiscreteLoad:
+ """Return newly constructed instance, float load multiplied by argument.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Value to multiply float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ if not isinstance(coefficient, float):
+ raise RuntimeError(f"Not float: {coefficient!r}")
+ if coefficient <= 0.0:
+ raise RuntimeError(f"Not positive: {coefficient!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ float_load=self.float_load * coefficient,
+ )
+
+ def __truediv__(self, coefficient: float) -> DiscreteLoad:
+ """Call multiplication with inverse argument.
+
+ :param coefficient: Value to divide float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ return self * (1.0 / coefficient)
diff --git a/resources/libraries/python/MLRsearch/discrete_result.py b/resources/libraries/python/MLRsearch/discrete_result.py
new file mode 100644
index 0000000000..882b6081c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_result.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .trial_measurement import MeasurementResult
+
+
+@dataclass
+class DiscreteResult(MeasurementResult):
+ """A measurement result where intended load is also given as discrete load.
+
+ The discrete load has to be round and has to match the intended load.
+ """
+
+ # Must have default as superclass has fields with default values.
+ discrete_load: DiscreteLoad = None
+ """Intended load [tps]; discrete, round and equal to intended load."""
+
+ def __post_init__(self) -> None:
+ """Call super, verify intended and discrete loads are the same.
+
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ super().__post_init__()
+ if not isinstance(self.discrete_load, DiscreteLoad):
+ raise TypeError(f"Not a discrete load: {self.discrete_load!r}")
+ if not self.discrete_load.is_round:
+ raise ValueError(f"Discrete load not round: {self.discrete_load!r}")
+ if float(self.discrete_load) != self.intended_load:
+ raise ValueError(f"Load mismatch: {self!r}")
+
+ @staticmethod
+ def with_load(
+ result: MeasurementResult, load: DiscreteLoad
+ ) -> DiscreteResult:
+ """Return result with added load.
+
+ :param result: A result, possibly without discrete load.
+ :param load: Discrete load to add.
+ :type result: MeasurementResult
+ :type load: DiscreteLoad
+ :returns: Equivalent result with matching discrete load.
+ :rtype: DiscreteResult
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ return DiscreteResult(
+ intended_duration=result.intended_duration,
+ intended_load=result.intended_load,
+ offered_count=result.offered_count,
+ loss_count=result.loss_count,
+ forwarding_count=result.forwarding_count,
+ offered_duration=result.offered_duration,
+ duration_with_overheads=result.duration_with_overheads,
+ intended_count=result.intended_count,
+ discrete_load=load,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_width.py b/resources/libraries/python/MLRsearch/discrete_width.py
new file mode 100644
index 0000000000..8a4845a83f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_width.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteWidth class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+
+from .load_rounding import LoadRounding
+
+
+# TODO: Make properly frozen.
+@dataclass(order=True)
+class DiscreteWidth:
+ """Structure to store float width together with its rounded integer form.
+
+ The width does not have to be positive, i.e. the computed integer width
+ does not have to be larger than zero.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+
+ Conversion and arithmetic methods are added for convenience.
+ Division and non-integer multiplication are intentionally not supported,
+ as MLRsearch should not seek unround widths when round ones are available.
+
+ The instance is effectively immutable, but not hashable as it refers
+ to the rounding instance, which is implemented as mutable
+ (although the mutations are not visible).
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_width: float = None
+ """Relative width of float intended load.
+ This is treated as a constructor argument, and does not need to match
+ the int width. Int width is computed to be no wider than this."""
+ int_width: int = field(compare=False, default=None)
+ """Integer form, difference of integer loads.
+ This is the primary quantity used by most computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float width to be slightly larger
+ than the implied int width.
+
+ If both forms are specified, the float form is taken as primary
+ (thus the integer form is recomputed to match).
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_width is None and self.int_width is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_width is None:
+ self.int_width = int(self.int_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = self.rounding.int2float(self.int_width)
+ self.float_width = (increased_load - min_load) / increased_load
+ return
+ self.float_width = float(self.float_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = min_load / (1.0 - self.float_width)
+ int_load = self.rounding.float2int(increased_load)
+ verify_load = self.rounding.int2float(int_load)
+ if verify_load > increased_load:
+ int_load -= 1
+ self.int_width = int_load
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_width={int(self)}"
+
+ def __int__(self) -> int:
+ """Return the integer form.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_width
+
+ def __float__(self) -> float:
+ """Return the float form.
+
+ :returns: The float field value.
+ :rtype: float
+ """
+ return self.float_width
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ def rounded_down(self) -> DiscreteWidth:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(rounding=self.rounding, int_width=int(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths added.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+
+ :param width: Value to add to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width + int(width),
+ )
+
+ def __sub__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths subtracted.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+ Non-positive results are disallowed by constructor.
+
+ :param width: Value to subtract to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {type(width)}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width - int(width),
+ )
+
+ def __mul__(self, coefficient: int) -> DiscreteWidth:
+ """Construct new instance with int value multiplied.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Constant to multiply int width with.
+ :type coefficient: int
+ :returns: New instance with multiplied int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If argument value does not meet requirements.
+ """
+ if not isinstance(coefficient, int):
+ raise RuntimeError(f"Coefficient not int: {coefficient!r}")
+ if coefficient < 1:
+ raise RuntimeError(f"Coefficient not positive: {coefficient!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width * coefficient,
+ )
+
+ def half_rounded_down(self) -> DiscreteWidth:
+ """Contruct new instance of half the integer width.
+
+ If the current integer width is odd, round the half width down.
+
+ :returns: New instance with half int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If the resulting integerl width is not positive.
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width // 2,
+ )
diff --git a/resources/libraries/python/MLRsearch/expander.py b/resources/libraries/python/MLRsearch/expander.py
new file mode 100644
index 0000000000..0e6800477e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/expander.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetExpander class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetedExpander:
+ """Utility class to track expanding width during external search.
+
+ One instance per selector but takes into consideration global current width.
+
+ Generally, many strategies may limit next_width immediately,
+ but next_width expands only after measurement
+ when external search fails to find its bound (global width is also bumped).
+ See strategy classes for specific details on external and internal search.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if initial upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if initial lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ next_width: DiscreteWidth = secondary_field()
+ """This will be used in next search step if no strategy intervenes."""
+
+ def __post_init__(self) -> None:
+ """Prepare next width."""
+ self.next_width = self.target.discrete_width
+ if self.initial_lower_load and self.initial_upper_load:
+ interval_width = self.initial_upper_load - self.initial_lower_load
+ self.next_width = max(self.next_width, interval_width)
+ self.expand(bump_global=False)
+
+ def expand(self, bump_global: bool = True) -> None:
+ """Multiply next width by expansion coefficient.
+
+ The global current width should be bumped when external search
+ is done but load is not the bound we were looking for.
+
+ For global width shrinking, set the field directly.
+
+ :param bump_global: False if called from limit or post init.
+ :type bump_global: bool
+ """
+ self.next_width *= self.target.expansion_coefficient
+ if bump_global:
+ self.global_width.width = self.next_width
+
+ def get_width(self) -> DiscreteWidth:
+ """Return next width corrected by global current width.
+
+ :returns: The width to use, see GlobalWidth.
+ :rtype: DiscreteWidth
+ """
+ return self.global_width.or_larger(self.next_width)
+
+ def limit(self, last_width: DiscreteWidth) -> None:
+ """Decrease the prepared next width.
+
+ This is called by other strategies when bounds are getting narrower.
+
+ Global current width is not updated yet,
+ as the other strategy may not end up becoming the winner
+ and we want to avoid interfering with other selector strategies.
+
+ :param last_width: As applied by other strategy, smaller of two halves.
+ :type last_width: DiscreteWidth
+ """
+ self.next_width = max(last_width, self.target.discrete_width)
+ self.expand(bump_global=False)
diff --git a/resources/libraries/python/MLRsearch/global_width.py b/resources/libraries/python/MLRsearch/global_width.py
new file mode 100644
index 0000000000..6f7df8b894
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/global_width.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GlobalWidth class."""
+
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+@dataclass
+class GlobalWidth:
+ """Primarily used to synchronize external search steps across selectors.
+
+ The full name is global current width, but that is too long for identifiers.
+
+ While each selector tracks its "local" (per goal) width using expander,
+ it is important we do not interleave upper external search for two goals.
+ That is why all selector instances refer to a singleton instance of this.
+
+ In general, this value remains constant when main loop iterates over
+ selectors and when selector iterates over strategies.
+ After winner is measured, this width is set to winner width value
+ and for some strategies that width is expanded when external search says so.
+
+ The two methods are not really worth creating a new class,
+ but the main reason is having a name for type hints
+ that distinguish this from various other "width" and "current" values.
+ """
+
+ width: DiscreteWidth
+ """Minimum width to apply at next external search step."""
+ # TODO: Add a setter, so it is easier to add debug logging.
+
+ @staticmethod
+ def from_loads(load0: DiscreteLoad, load1: DiscreteLoad) -> GlobalWidth:
+ """Initialize the value based on two loads from initial trials.
+
+ :param load0: Lower (or equal) load from the two most recent trials.
+ :param load1: Higher (or equal) load from the two most recent trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :returns: Newly created instance with computed width.
+ :rtype: GlobalWidth
+ """
+ return GlobalWidth(load1 - load0)
+
+ def or_larger(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return width from argument or self, whichever is larger.
+
+ :param width: A selector (strategy) asks if this width is large enough.
+ :type width: DiscreteWidth
+ :returns: Argument or current width.
+ :rtype: DiscreteWidth
+ """
+ return width if width > self.width else self.width
diff --git a/resources/libraries/python/MLRsearch/goal_result.py b/resources/libraries/python/MLRsearch/goal_result.py
new file mode 100644
index 0000000000..91dccec0bb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/goal_result.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GoalResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .relevant_bounds import RelevantBounds
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class GoalResult:
+ """Composite to be mapped for each search goal at the end of the search.
+
+ The values are stored as trimmed stats,
+ the conditional throughput is returned as a discrete loads.
+ Thus, users interested only in float values have to convert explicitly.
+
+ Irregular goal results are supported as instances with a bound missing.
+ """
+
+ relevant_lower_bound: Optional[TrimmedStat]
+ """The relevant lower bound for the search goal."""
+ relevant_upper_bound: Optional[TrimmedStat]
+ """The relevant lower upper for the search goal."""
+
+ @staticmethod
+ def from_bounds(bounds: RelevantBounds) -> GoalResult:
+ """Factory, so that the call site can be shorter.
+
+ :param bounds: The relevant bounds as found in measurement database.
+ :type bounds: RelevantBounds
+ :returns: Newly created instance based on the bounds.
+ :rtype: GoalResult
+ """
+ return GoalResult(
+ relevant_lower_bound=bounds.clo,
+ relevant_upper_bound=bounds.chi,
+ )
+
+ @property
+ def conditional_throughput(self) -> Optional[DiscreteLoad]:
+ """Compute conditional throughput from the relevant lower bound.
+
+ If the relevant lower bound is missing, None is returned.
+
+ The conditional throughput has the same semantics as load,
+ so if load is unidirectional and user wants bidirectional
+ throughput, the manager has to compensate.
+
+ :return: Conditional throughput at the relevant lower bound.
+ :rtype: Optional[DiscreteLoad]
+ """
+ if not (rlb := self.relevant_lower_bound):
+ return None
+ stat = next(iter(rlb.target_to_stat.values()))
+ return rlb * (1.0 - stat.pessimistic_loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/limit_handler.py b/resources/libraries/python/MLRsearch/limit_handler.py
new file mode 100644
index 0000000000..5919f398f3
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/limit_handler.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LimitHandler class."""
+
+from dataclasses import dataclass
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_interval import DiscreteInterval
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+
+
+@dataclass
+class LimitHandler:
+ """Encapsulated methods for logic around handling limits.
+
+ In multiple places within MLRsearch code, an intended load value
+ is only useful if it is far enough from possible known values.
+ All such places can be served with the handle method
+ with appropriate arguments.
+ """
+
+ rounding: LoadRounding
+ """Rounding instance to use."""
+ debug: Callable[[str], None]
+ """Injectable logging function."""
+ # The two fields below are derived, extracted from rounding as a shortcut.
+ min_load: DiscreteLoad = secondary_field()
+ """Minimal load, as prescribed by Config."""
+ max_load: DiscreteLoad = secondary_field()
+ """Maximal load, as prescribed by Config."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived quantities."""
+ from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.min_load = from_float(self.rounding.min_load)
+ self.max_load = from_float(self.rounding.max_load)
+
+ def handle(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ clo: Optional[DiscreteLoad],
+ chi: Optional[DiscreteLoad],
+ ) -> Optional[DiscreteLoad]:
+ """Return new intended load after considering limits and bounds.
+
+ Not only we want to avoid measuring outside minmax interval,
+ we also want to avoid measuring too close to known limits and bounds.
+ We either round or return None, depending on hints from bound loads.
+
+ When rounding away from hard limits, we may end up being
+ too close to an already measured bound.
+ In this case, pick a midpoint between the bound and the limit.
+
+ The last two arguments are just loads (not full measurement results)
+ to allow callers to exclude some load without measuring them.
+ As a convenience, full results are also supported,
+ so that callers do not need to care about None when extracting load.
+
+ :param load: Intended load candidate, initial or from a load selector.
+ :param width: Relative width goal, considered narrow enough for now.
+ :param clo: Intended load of current relevant lower bound.
+ :param chi: Intended load of current relevant upper bound.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type clo: Optional[DiscreteLoad]
+ :type chi: Optional[DiscreteLoad]
+ :return: Adjusted load to measure at, or None if narrow enough already.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If unsupported corner case is detected.
+ """
+ if not load:
+ raise RuntimeError("Got None load to handle.")
+ load = load.rounded_down()
+ min_load, max_load = self.min_load, self.max_load
+ if clo and not clo.is_round:
+ raise RuntimeError(f"Clo {clo} should have been round.")
+ if chi and not chi.is_round:
+ raise RuntimeError(f"Chi {chi} should have been round.")
+ if not clo and not chi:
+ load = self._handle_load_with_excludes(
+ load, width, min_load, max_load, min_ex=False, max_ex=False
+ )
+ # The "return load" lines are separate from load computation,
+ # so that logging can be added more easily when debugging.
+ return load
+ if chi and not clo:
+ if chi <= min_load:
+ # Expected when hitting the min load.
+ return None
+ if load >= chi:
+ # This can happen when mrr2 forward rate is rounded to mrr2.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, min_load, chi, min_ex=False, max_ex=True
+ )
+ return load
+ if clo and not chi:
+ if clo >= max_load:
+ raise RuntimeError("Lower load expected.")
+ if load <= clo:
+ raise RuntimeError("Higher load expected.")
+ load = self._handle_load_with_excludes(
+ load, width, clo, max_load, min_ex=True, max_ex=False
+ )
+ return load
+ # We have both clo and chi defined.
+ if not clo < load < chi:
+ # Happens when bisect compares with bounded extend.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, clo, chi, min_ex=True, max_ex=True
+ )
+ return load
+
+ def _handle_load_with_excludes(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ minimum: DiscreteLoad,
+ maximum: DiscreteLoad,
+ min_ex: bool,
+ max_ex: bool,
+ ) -> Optional[DiscreteLoad]:
+ """Adjust load if too close to limits, respecting exclusions.
+
+ This is a reusable block.
+ Limits may come from previous bounds or from hard load limits.
+ When coming from bounds, rounding to that is not allowed.
+ When coming from hard limits, rounding to the limit value
+ is allowed in general (given by the setting the _ex flag).
+
+ :param load: The candidate intended load before accounting for limits.
+ :param width: Relative width of area around the limits to avoid.
+ :param minimum: The lower limit to round around.
+ :param maximum: The upper limit to round around.
+ :param min_ex: If false, rounding to the minimum is allowed.
+ :param max_ex: If false, rounding to the maximum is allowed.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type minimum: DiscreteLoad
+ :type maximum: DiscreteLoad
+ :type min_ex: bool
+ :type max_ex: bool
+ :returns: Adjusted load value, or None if narrow enough.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if not minimum <= load <= maximum:
+ raise RuntimeError(
+ "Internal error: load outside limits:"
+ f" load {load} min {minimum} max {maximum}"
+ )
+ max_width = maximum - minimum
+ if width >= max_width:
+ self.debug("Warning: Handling called with wide width.")
+ if not min_ex:
+ self.debug("Minimum not excluded, rounding to it.")
+ return minimum
+ if not max_ex:
+ self.debug("Maximum not excluded, rounding to it.")
+ return maximum
+ self.debug("Both limits excluded, narrow enough.")
+ return None
+ soft_min = minimum + width
+ soft_max = maximum - width
+ if soft_min > soft_max:
+ self.debug("Whole interval is less than two goals.")
+ middle = DiscreteInterval(minimum, maximum).middle(width)
+ soft_min = soft_max = middle
+ if load < soft_min:
+ if min_ex:
+ self.debug("Min excluded, rounding to soft min.")
+ return soft_min
+ self.debug("Min not excluded, rounding to minimum.")
+ return minimum
+ if load > soft_max:
+ if max_ex:
+ self.debug("Max excluded, rounding to soft max.")
+ return soft_max
+ self.debug("Max not excluded, rounding to maximum.")
+ return maximum
+ # Far enough from all limits, no additional adjustment is needed.
+ return load
diff --git a/resources/libraries/python/MLRsearch/load_rounding.py b/resources/libraries/python/MLRsearch/load_rounding.py
new file mode 100644
index 0000000000..0ac4487be9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_rounding.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadRounding class."""
+
+import math
+
+from dataclasses import dataclass
+from typing import List, Tuple
+
+from .dataclass import secondary_field
+
+
+@dataclass
+class LoadRounding:
+ """Class encapsulating stateful utilities that round intended load values.
+
+ For MLRsearch algorithm logic to be correct, it is important that
+ interval width expansion and narrowing are exactly reversible,
+ which is not true in general for floating point number arithmetics.
+
+ This class offers conversion to and from an integer quantity.
+ Operations in the integer realm are guaranteed to be reversible,
+ so the only risk is when converting between float and integer realm.
+
+ Which relative width corresponds to the unit integer
+ is computed in initialization from width goals,
+ striking a balance between memory requirements and precision.
+
+ There are two quality knobs. One restricts how far
+ can an integer be from the exact float value.
+ The other restrict how close it can be. That is to make sure
+ even with unpredictable rounding errors during the conversion,
+ the converted integer value is never bigger than the intended float value,
+ to ensure the intervals returned from MLRsearch will always
+ meet the relative width goal.
+
+ An instance of this class is mutable only in the sense it contains
+ a growing cache of previously computed values.
+ """
+
+ # TODO: Hide the cache and present as frozen hashable object.
+
+ min_load: float
+ """Minimal intended load [tps] to support, must be positive."""
+ max_load: float
+ """Maximal intended load [tps] to support, must be bigger than min load."""
+ float_goals: Tuple[float]
+ """Relative width goals to approximate, each must be positive
+ and smaller than one. Deduplicated and sorted in post init."""
+ quality_lower: float = 0.99
+ """Minimal multiple of each goal to be achievable."""
+ quality_upper: float = 0.999999
+ """Maximal multiple of each goal to be achievable."""
+ # Primary fields above, computed fields below.
+ max_int_load: int = secondary_field()
+ """Integer for max load (min load int is zero)."""
+ _int2load: List[Tuple[int, float]] = secondary_field()
+ """Known int values (sorted) and their float equivalents."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, perform checks, initialize conversion structures.
+
+ :raises RuntimeError: If a requirement is not met.
+ """
+ self.min_load = float(self.min_load)
+ self.max_load = float(self.max_load)
+ if not 0.0 < self.min_load < self.max_load:
+ raise RuntimeError("Load limits not supported: {self}")
+ self.quality_lower = float(self.quality_lower)
+ self.quality_upper = float(self.quality_upper)
+ if not 0.0 < self.quality_lower < self.quality_upper < 1.0:
+ raise RuntimeError("Qualities not supported: {self}")
+ goals = []
+ for goal in self.float_goals:
+ goal = float(goal)
+ if not 0.0 < goal < 1.0:
+ raise RuntimeError(f"Goal width {goal} is not supported.")
+ goals.append(goal)
+ self.float_goals = tuple(sorted(set(goals)))
+ self.max_int_load = self._find_ints()
+ self._int2load = []
+ self._int2load.append((0, self.min_load))
+ self._int2load.append((self.max_int_load, self.max_load))
+
+ def _find_ints(self) -> int:
+ """Find and return value for max_int_load.
+
+ Separated out of post init, as this is less conversion and checking,
+ and more math and searching.
+
+ A dumb implementation would start with 1 and kept increasing by 1
+ until all goals are within quality limits.
+ An actual implementation is smarter with the increment,
+ so it is expected to find the resulting values somewhat faster.
+
+ :returns: Value to be stored as max_int_load.
+ :rtype: int
+ """
+ minmax_log_width = math.log(self.max_load) - math.log(self.min_load)
+ log_goals = [-math.log1p(-goal) for goal in self.float_goals]
+ candidate = 1
+ while 1:
+ log_width_unit = minmax_log_width / candidate
+ # Fallback to increment by one if rounding errors make tries bad.
+ next_tries = [candidate + 1]
+ acceptable = True
+ for log_goal in log_goals:
+ units = log_goal / log_width_unit
+ int_units = math.floor(units)
+ quality = int_units / units
+ if not self.quality_lower <= quality <= self.quality_upper:
+ acceptable = False
+ target = (int_units + 1) / self.quality_upper
+ next_try = (target / units) * candidate
+ next_tries.append(next_try)
+ # Else quality acceptable, not bumping the candidate.
+ if acceptable:
+ return candidate
+ candidate = int(math.ceil(max(next_tries)))
+
+ def int2float(self, int_load: int) -> float:
+ """Convert from int to float tps load. Expand internal table as needed.
+
+ Too low or too high ints result in min or max load respectively.
+
+ :param int_load: Integer quantity to turn back into float load.
+ :type int_load: int
+ :returns: Converted load in tps.
+ :rtype: float
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if int_load <= 0:
+ return self.min_load
+ if int_load >= self.max_int_load:
+ return self.max_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_int < int_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_int > int_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_load
+ raise RuntimeError("Bisect in int2float failed.")
+
+ def float2int(self, float_load: float) -> int:
+ """Convert and round from tps load to int. Maybe expand internal table.
+
+ Too low or too high load result in zero or max int respectively.
+
+ Result value is rounded down to an integer.
+
+ :param float_load: Tps quantity to convert into int.
+ :type float_load: float
+ :returns: Converted integer value suitable for halving.
+ :rtype: int
+ """
+ if float_load <= self.min_load:
+ return 0
+ if float_load >= self.max_load:
+ return self.max_int_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_load < float_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_load > float_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_int
+ return lo_int
diff --git a/resources/libraries/python/MLRsearch/load_stats.py b/resources/libraries/python/MLRsearch/load_stats.py
new file mode 100644
index 0000000000..5f4757f488
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_stats.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStats class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .target_stat import TargetStat
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+
+
+# The eq=False part is needed to make sure comparison is inherited properly.
+@dataclass(eq=False)
+class LoadStats(DiscreteLoad):
+ """An offered load together with stats for all possible targets.
+
+ As LoadStats is frequently passed instead of plan DiscreteLoad,
+ equality and ordering is dictated by the float load.
+ """
+
+ target_to_stat: Dict[TargetSpec, TargetStat] = None
+ """Mapping from target specification to its current stat for this load."""
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there are targets to track."""
+ super().__post_init__()
+ if not self.target_to_stat:
+ raise ValueError(f"No targets: {self.target_to_stat!r}")
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ This works well only for trimmed stats,
+ as only the stat for the first target present is shown.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"fl={self.float_load}"
+ f",s=({next(iter(self.target_to_stat.values()))})"
+ )
+
+ def __hash__(self) -> int:
+ """Raise as stats are mutable by definition.
+
+ :returns: Hash value for this instance if possible.
+ :rtype: int
+ :raises TypeError: Not immutable.
+ """
+ raise TypeError("Loadstats are mutable so constant hash is impossible.")
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial measurement result.
+
+ :param result: The result to take into account.
+ :type result: DiscreteResult
+ :raises RuntimeError: If result load does is not equal to the self load.
+ """
+ if result.intended_load != float(self):
+ raise RuntimeError(
+ f"Attempting to add load {result.intended_load}"
+ f" to result set for {float(self)}"
+ )
+ for stat in self.target_to_stat.values():
+ stat.add(result)
+
+ @staticmethod
+ def new_empty(load: DiscreteLoad, targets: Tuple[TargetSpec]) -> LoadStats:
+ """Factory method to initialize mapping for given targets.
+
+ :param load: The intended load value for the new instance.
+ :param targets: The target specifications to track stats for.
+ :type load: DiscreteLoad
+ :type targets: Tuple[TargetSpec]
+ :returns: New instance with empty stats initialized.
+ :rtype: LoadStats
+ :raise ValueError: Is the load is not rounded.
+ """
+ if not load.is_round:
+ raise ValueError(f"Not round: {load!r}")
+ return LoadStats(
+ rounding=load.rounding,
+ int_load=int(load),
+ target_to_stat={target: TargetStat(target) for target in targets},
+ )
+
+ def estimates(self, target: TargetSpec) -> Tuple[bool, bool]:
+ """Classify this load according to given target.
+
+ :param target: According to which target this should be classified.
+ :type target: TargetSpec
+ :returns: Tuple of two estimates whether load can be lower bound.
+ (True, False) means target is not reached yet.
+ :rtype: Tuple[bool, bool]
+ """
+ return self.target_to_stat[target].estimates()
diff --git a/resources/libraries/python/MLRsearch/measurement_database.py b/resources/libraries/python/MLRsearch/measurement_database.py
new file mode 100644
index 0000000000..7a6618c0da
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/measurement_database.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementDatabase class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .load_stats import LoadStats
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class MeasurementDatabase:
+ """Structure holding measurement results for multiple durations and loads.
+
+ Several utility methods are added, accomplishing tasks useful for MLRsearch.
+
+ While TargetStats can decide when a single load is a lower bound (or upper),
+ it does not deal with loss inversion (higher load with less load).
+
+ This class introduces the concept of relevant bounds.
+ Relevant upper bound is simply the lowest load classified as an upper bound.
+ But relevant lower bound is only chosen from lower bound loads
+ strictly smaller than the relevant upper bound.
+ This way any higher loads with good results are ignored,
+ so relevant bound give conservative estimate of SUT true performance.
+ """
+
+ targets: Tuple[TargetSpec] = None
+ """Targets to track stats for."""
+ load_to_stats: Dict[DiscreteLoad, LoadStats] = None
+ """Mapping from loads to stats."""
+
+ def __post_init__(self) -> None:
+ """Check and sort initial values.
+
+ If no stats yet, initialize empty ones.
+
+ :raises ValueError: If there are no targets.
+ """
+ if not self.targets:
+ raise ValueError(f"Database needs targets: {self.targets!r}")
+ if not self.load_to_stats:
+ self.load_to_stats = {}
+ self._sort()
+
+ def _sort(self) -> None:
+ """Sort keys from low to high load."""
+ self.load_to_stats = dict(sorted(self.load_to_stats.items()))
+
+ def __getitem__(self, key: DiscreteLoad) -> LoadStats:
+ """Allow access to stats as if self was load_to_stats.
+
+ This also accepts LoadStats as key, so callers do not need
+ to care about hashability.
+
+ :param key: The load to get stats for.
+ :type key: DiscreteLoad
+ :returns: Stats for the given load.
+ :rtype LoadStats:
+ """
+ return self.load_to_stats[key.hashable()]
+
+ def add(self, result: DiscreteResult) -> None:
+ """Incorporate given trial measurement result.
+
+ :param result: Measurement result to add to the database.
+ :type result: DiscreteResult
+ """
+ discrete_load = result.discrete_load.hashable()
+ if not discrete_load.is_round:
+ raise ValueError(f"Not round load: {discrete_load!r}")
+ if discrete_load not in self.load_to_stats:
+ self.load_to_stats[discrete_load] = LoadStats.new_empty(
+ load=discrete_load,
+ targets=self.targets,
+ )
+ self._sort()
+ self.load_to_stats[discrete_load].add(result)
+
+ def get_relevant_bounds(self, target: TargetSpec) -> RelevantBounds:
+ """Return None or a valid trimmed stat, for the two relevant bounds.
+
+ A load is valid only if both optimistic and pessimistic estimates agree.
+
+ If some value is not available, None is returned instead.
+ The returned stats are trimmed to the argument target.
+
+ The implementation starts from low loads
+ and the search stops at lowest upper bound,
+ thus conforming to the conservative definition of relevant bounds.
+
+ :param target: Target to classify loads when finding bounds.
+ :type target: TargetSpec
+ :returns: Relevant lower bound, relevant upper bound.
+ :rtype: RelevantBounds
+ """
+ lower_bound, upper_bound = None, None
+ for load_stats in self.load_to_stats.values():
+ opt, pes = load_stats.estimates(target)
+ if opt != pes:
+ continue
+ if not opt:
+ upper_bound = load_stats
+ break
+ lower_bound = load_stats
+ if lower_bound:
+ lower_bound = TrimmedStat.for_target(lower_bound, target)
+ if upper_bound:
+ upper_bound = TrimmedStat.for_target(upper_bound, target)
+ return RelevantBounds(clo=lower_bound, chi=upper_bound)
diff --git a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
new file mode 100644
index 0000000000..4d3ff7c4cb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
@@ -0,0 +1,325 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MultipleLossRatioSearch class."""
+
+import logging
+import time
+
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple
+
+from .candidate import Candidate
+from .config import Config
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .expander import GlobalWidth
+from .goal_result import GoalResult
+from .limit_handler import LimitHandler
+from .load_rounding import LoadRounding
+from .measurement_database import MeasurementDatabase
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .selector import Selector
+from .target_scaling import TargetScaling
+from .trial_measurement import AbstractMeasurer
+
+
+@dataclass
+class MultipleLossRatioSearch:
+ """Implementation of the controller part of MLRsearch algorithm.
+
+ The manager part is creating and calling this,
+ the measurer part is injected.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final narrow bounds
+ (related to its search goal) after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm in this class contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for bounds for multiple search goals at once.
+ Specifically, the trial measurement results influence bounds for all goals,
+ even though the selection of trial inputs for next measurement
+ focuses only on one goal. The focus can switch between goals frequently.
+
+ Next improvement is that results of trial measurements
+ with small trial duration can be used to find a reasonable starting interval
+ for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+ Internally, such shorter trials come from "preceding targets",
+ handled in a same way as a search goal "final target".
+ Related improvement is that the "current" interval does not need to be valid
+ (e.g. one of the bounds is missing).
+ In that case, this algorithm will move and expand the interval,
+ in a process called external search. Only when both bounds are found,
+ the interval bisection (called internal search) starts making it narrow.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that target relative width is independent of measurement units.
+
+ Next improvement is basing the initial interval on forwarding rates
+ of few initial measurements, starting at max load and using forwarding rates
+ seen so far.
+
+ Next improvement is to allow the use of multiple shorter trials
+ instead one big trial, allowing a percentage of trials
+ to exceed the loss ratio target.
+ This makes the result more stable in practice.
+ Conservative behavior (single long trial, zero exceed ratio)
+ is still available using corresponding goal definitions.
+
+ Final improvement is exiting early if the minimal load
+ is not a valid lower bound (at final duration)
+ and also exiting if the overall search duration is too long.
+
+ There are also subtle optimizations related to candidate selection
+ and uneven splitting of intervals, too numerous to list here.
+
+ The return values describe performance at the relevant lower bound
+ as "conditional throughput", which is based on loss ratio of one of trials
+ selected as a quantile based on exceed ratio parameter.
+ Usually this value may be quite pessimistic, as MLRsearch stops
+ measuring a load as soon as it becomes a lower bound,
+ so conditional throughput is usually based on forwarding rate
+ of the worst on the good long trials.
+ """
+
+ config: Config
+ """Arguments required at construction time."""
+ # End of fields required at intance creation.
+ measurer: AbstractMeasurer = secondary_field()
+ """Measurer to use, set at calling search()."""
+ debug: Callable[[str], None] = secondary_field()
+ """Object to call for logging, None means logging.debug."""
+ # Fields below are computed from data above
+ rounding: LoadRounding = secondary_field()
+ """Derived from goals. Instance to use for intended load rounding."""
+ from_float: Callable[[float], DiscreteLoad] = secondary_field()
+ """Conversion method from float [tps] intended load values."""
+ limit_handler: LimitHandler = secondary_field()
+ """Load post-processing utility based on config and rounding."""
+ scaling: TargetScaling = secondary_field()
+ """Utility for creating target chains for search goals."""
+ database: MeasurementDatabase = secondary_field()
+ """Storage for (stats of) measurement results so far."""
+ stop_time: float = secondary_field()
+ """Monotonic time value at which the search should end with failure."""
+
+ def search(
+ self,
+ measurer: AbstractMeasurer,
+ debug: Optional[Callable[[str], None]] = None,
+ ) -> Pep3140Dict[SearchGoal, GoalResult]:
+ """Perform initial trials, create state object, proceed with main loop.
+
+ Stateful arguments (measurer and debug) are stored.
+ Derived objects are constructed from config.
+
+ :param measurer: Measurement provider to use by this search object.
+ :param debug: Callable to optionally use instead of logging.debug().
+ :type measurer: AbstractMeasurer
+ :type debug: Optional[Callable[[str], None]]
+ :returns: Structure containing conditional throughputs and other stats,
+ one for each search goal. If a value is None it means there is
+ no lower bound (min load turned out to be an upper bound).
+ :rtype: Pep3140Dict[SearchGoal, GoalResult]
+ :raises RuntimeError: If total duration is larger than timeout,
+ or if min load becomes an upper bound for a search goal
+ that has fail fast true.
+ """
+ self.measurer = measurer
+ self.debug = logging.debug if debug is None else debug
+ self.rounding = LoadRounding(
+ min_load=self.config.min_load,
+ max_load=self.config.max_load,
+ float_goals=[goal.relative_width for goal in self.config.goals],
+ )
+ self.from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.limit_handler = LimitHandler(
+ rounding=self.rounding,
+ debug=self.debug,
+ )
+ self.scaling = TargetScaling(
+ goals=self.config.goals,
+ rounding=self.rounding,
+ )
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.stop_time = time.monotonic() + self.config.search_duration_max
+ result0, result1 = self.run_initial_trials()
+ self.main_loop(result0.discrete_load, result1.discrete_load)
+ ret_dict = Pep3140Dict()
+ for goal in self.config.goals:
+ target = self.scaling.goal_to_final_target[goal]
+ bounds = self.database.get_relevant_bounds(target=target)
+ ret_dict[goal] = GoalResult.from_bounds(bounds=bounds)
+ return ret_dict
+
+ def measure(self, duration: float, load: DiscreteLoad) -> DiscreteResult:
+ """Call measurer and put the result to appropriate form in database.
+
+ Also check the argument types and load roundness,
+ and return the result to the caller.
+
+ :param duration: Intended duration for the trial measurement.
+ :param load: Intended load for the trial measurement:
+ :type duration: float
+ :type load: DiscreteLoad
+ :returns: The trial results.
+ :rtype: DiscreteResult
+ :raises RuntimeError: If an argument doed not have the required type.
+ """
+ if not isinstance(duration, float):
+ raise RuntimeError(f"Duration has to be float: {duration!r}")
+ if not isinstance(load, DiscreteLoad):
+ raise RuntimeError(f"Load has to be discrete: {load!r}")
+ if not load.is_round:
+ raise RuntimeError(f"Told to measure unrounded: {load!r}")
+ self.debug(f"Measuring at d={duration},il={int(load)}")
+ result = self.measurer.measure(
+ intended_duration=duration,
+ intended_load=float(load),
+ )
+ self.debug(f"Measured lr={result.loss_ratio}")
+ result = DiscreteResult.with_load(result=result, load=load)
+ self.database.add(result)
+ return result
+
+ def run_initial_trials(self) -> Tuple[DiscreteResult, DiscreteResult]:
+ """Perform trials to get enough data to start the selectors.
+
+ Measurements are done with all initial targets in mind,
+ based on smallest target loss ratio, largest initial trial duration,
+ and largest initial target width.
+
+ Forwarding rate is used as a hint for next intended load.
+ The relative quantity is used, as load can use different units.
+ When the smallest target loss ratio is non-zero, a correction is needed
+ (forwarding rate is only a good hint for zero loss ratio load).
+ The correction is conservative (all increase in load turns to losses).
+
+ Also, warmup trial (if configured) is performed,
+ all other trials are added to the database.
+
+ This could return the initial width, but from implementation perspective
+ it is easier to return two measurements (or the same one twice) here
+ and compute width later. The "one value twice" happens when max load
+ has small loss, or when min load has big loss.
+
+ :returns: Two last measured values, in any order. Or one value twice.
+ :rtype: Tuple[DiscreteResult, DiscreteResult]
+ """
+ max_load = self.limit_handler.max_load
+ ratio, duration, width = None, None, None
+ for target in self.scaling.targets:
+ if target.preceding:
+ continue
+ if ratio is None or ratio > target.loss_ratio:
+ ratio = target.loss_ratio
+ if not duration or duration < target.trial_duration:
+ duration = target.trial_duration
+ if not width or width < target.discrete_width:
+ width = target.discrete_width
+ self.debug(f"Init ratio {ratio} duration {duration} width {width}")
+ if self.config.warmup_duration:
+ self.debug("Warmup trial.")
+ self.measure(self.config.warmup_duration, max_load)
+ # Warmup should not affect the real results, reset the database.
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.debug(f"First trial at max rate: {max_load}")
+ result0 = self.measure(duration, max_load)
+ rfr = result0.relative_forwarding_rate
+ corrected_rfr = (self.from_float(rfr) / (1.0 - ratio)).rounded_down()
+ if corrected_rfr >= max_load:
+ self.debug("Small loss, no other initial trials are needed.")
+ return result0, result0
+ mrr = self.limit_handler.handle(corrected_rfr, width, None, max_load)
+ self.debug(f"Second trial at (corrected) mrr: {mrr}")
+ result1 = self.measure(duration, mrr)
+ # Attempt to get narrower width.
+ result_ratio = result1.loss_ratio
+ if result_ratio > ratio:
+ rfr2 = result1.relative_forwarding_rate
+ crfr2 = (self.from_float(rfr2) / (1.0 - ratio)).rounded_down()
+ mrr2 = self.limit_handler.handle(crfr2, width, None, mrr)
+ else:
+ mrr2 = mrr + width
+ mrr2 = self.limit_handler.handle(mrr2, width, mrr, max_load)
+ if not mrr2:
+ self.debug("Close enough, measuring at mrr2 is not needed.")
+ return result1, result1
+ self.debug(f"Third trial at (corrected) mrr2: {mrr2}")
+ result2 = self.measure(duration, mrr2)
+ return result1, result2
+
+ def main_loop(self, load0: DiscreteLoad, load1: DiscreteLoad) -> None:
+ """Initialize selectors and keep measuring the winning candidate.
+
+ Selectors are created, the two input loads are useful starting points.
+
+ The search ends when no selector nominates any candidate,
+ or if the search takes too long (or if a selector raises).
+
+ Winner is selected according to ordering defined in Candidate class.
+ In case of a tie, selectors for earlier goals are preferred.
+
+ As a selector is only allowed to update current width as the winner,
+ the update is done here explicitly.
+
+ :param load0: Discrete load of one of results from run_initial_trials.
+ :param load1: Discrete load of other of results from run_initial_trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :raises RuntimeError: If the search takes too long,
+ or if min load becomes an upper bound for any search goal
+ """
+ if load1 < load0:
+ load0, load1 = load1, load0
+ global_width = GlobalWidth.from_loads(load0, load1)
+ selectors = []
+ for target in self.scaling.goal_to_final_target.values():
+ selector = Selector(
+ final_target=target,
+ global_width=global_width,
+ initial_lower_load=load0,
+ initial_upper_load=load1,
+ database=self.database,
+ handler=self.limit_handler,
+ debug=self.debug,
+ )
+ selectors.append(selector)
+ while time.monotonic() < self.stop_time:
+ winner = Candidate()
+ for selector in selectors:
+ # Order of arguments is important
+ # when two targets nominate the same candidate.
+ winner = min(Candidate.nomination_from(selector), winner)
+ if not winner:
+ break
+ # We do not check duration versus stop_time here,
+ # as some measurers can be unpredictably faster
+ # than their intended duration suggests.
+ self.measure(duration=winner.duration, load=winner.load)
+ # Delayed updates.
+ if winner.width:
+ global_width.width = winner.width
+ winner.won()
+ else:
+ raise RuntimeError("Optimized search takes too long.")
+ self.debug("Search done.")
diff --git a/resources/libraries/python/MLRsearch/pep3140/__init__.py b/resources/libraries/python/MLRsearch/pep3140/__init__.py
new file mode 100644
index 0000000000..f8e2ffaa8f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "pep3140".
+"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+from .classes import Pep3140Dict
diff --git a/resources/libraries/python/MLRsearch/pep3140/classes.py b/resources/libraries/python/MLRsearch/pep3140/classes.py
new file mode 100644
index 0000000000..9ab6e25c7c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/classes.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining a subclass of dict with an alternative str method."""
+
+
+class Pep3140Dict(dict):
+ """A dict with str support as proposed in PEP 3140.
+
+ Python implemented str acting on dict such that the resulting string
+ shows both keys and values in their repr form.
+ Therefore, str() of a dict gives the same result as repr().
+
+ This class shows both keys and values their str form instead.
+ """
+
+ def __str__(self) -> str:
+ """Return comma+space separated str of items in curly brackets.
+
+ :returns: PEP 3140 string form of the dict data.
+ :rtype: str
+ """
+ body = ", ".join(f"{key}: {value}" for key, value in self.items())
+ return f"{{{body}}}"
diff --git a/resources/libraries/python/MLRsearch/relevant_bounds.py b/resources/libraries/python/MLRsearch/relevant_bounds.py
new file mode 100644
index 0000000000..4bc6796f71
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/relevant_bounds.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RelevantBounds class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class RelevantBounds:
+ """Container for the pair of relevant bounds for a target.
+
+ If there is no valid bound, None is used.
+
+ Relevant upper bound is smallest load acting as an upper bound.
+ Relevant lower bound acts as a lower bound, has to be strictly smaller
+ than the relevant upper bound, and is largest among such loads.
+
+ The short names "clo" and "chi" are also commonly used
+ in logging and technical comments.
+
+ Trimming could be done here, but it needs to known the target explicitly,
+ so it is done in MeasurementDatabase instead.
+ """
+
+ clo: Optional[TrimmedStat]
+ """The relevant lower bound (trimmed) for the current target."""
+ chi: Optional[TrimmedStat]
+ """The relevant upper bound (trimmed) for the current target."""
+
+ # TODO: Check types in post init?
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ clo = int(self.clo) if self.clo else None
+ chi = int(self.chi) if self.chi else None
+ return f"clo={clo},chi={chi}"
diff --git a/resources/libraries/python/MLRsearch/search_goal.py b/resources/libraries/python/MLRsearch/search_goal.py
new file mode 100644
index 0000000000..777ad5b991
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoal class."""
+
+from dataclasses import dataclass
+
+
+@dataclass(frozen=True, eq=True)
+class SearchGoal:
+ """Storage class for search goal attributes.
+
+ This is the part of controller inputs that can be repeated
+ with different values. MLRsearch saves time by searching
+ for conditional throughput for each goal at the same time,
+ compared to repeated calls with separate goals.
+
+ Most fields (called attributes) of this composite
+ are relevant to the definition of conditional throughput.
+ The rest does not, but can affect the overal search time.
+ """
+
+ loss_ratio: float = 0.0
+ """The goal loss ratio.
+ A trial can satisfy the goal only when its trial loss ratio is not higher
+ than this. See MeasurementResult.loss_ratio for details.
+ A trial that does not satisfy this goal is called a bad trial."""
+ exceed_ratio: float = 0.5
+ """What portion of the duration sum can consist of bad trial seconds
+ while still being classified as lower bound (assuming no short trials)."""
+ relative_width: float = 0.005
+ """Target is achieved when the relevant lower bound
+ is no more than this (in units of the tightest upper bound) far
+ from the relevant upper bound."""
+ initial_trial_duration: float = 1.0
+ """Shortest trial duration employed when searching for this goal."""
+ final_trial_duration: float = 1.0
+ """Longest trial duration employed when searching for this goal."""
+ duration_sum: float = 21.0
+ """Minimal sum of durations of relevant trials sufficient to declare a load
+ to be upper or lower bound for this goal."""
+ preceding_targets: int = 2
+ """Number of increasingly coarser search targets to insert,
+ hoping to speed up searching for the final target of this goal."""
+ expansion_coefficient: int = 2
+ """External search multiplies width (in logarithmic space) by this."""
+ fail_fast: bool = True
+ """If true and min load is not an upper bound, raise.
+ If false, search will return None instead of lower bound."""
+
+ def __post_init__(self) -> None:
+ """Convert fields to correct types and call validate."""
+ super().__setattr__("loss_ratio", float(self.loss_ratio))
+ super().__setattr__("exceed_ratio", float(self.exceed_ratio))
+ super().__setattr__("relative_width", float(self.relative_width))
+ super().__setattr__(
+ "final_trial_duration", float(self.final_trial_duration)
+ )
+ super().__setattr__(
+ "initial_trial_duration", float(self.initial_trial_duration)
+ )
+ super().__setattr__("duration_sum", float(self.duration_sum))
+ super().__setattr__("preceding_targets", int(self.preceding_targets))
+ super().__setattr__(
+ "expansion_coefficient", int(self.expansion_coefficient)
+ )
+ super().__setattr__("fail_fast", bool(self.fail_fast))
+ self.validate()
+
+ def validate(self) -> None:
+ """Make sure the initialized values conform to requirements.
+
+ :raises ValueError: If a field value is outside allowed bounds.
+ """
+ if self.loss_ratio < 0.0:
+ raise ValueError(f"Loss ratio cannot be negative: {self}")
+ if self.loss_ratio >= 1.0:
+ raise ValueError(f"Loss ratio must be lower than 1: {self}")
+ if self.exceed_ratio < 0.0:
+ raise ValueError(f"Exceed ratio cannot be negative: {self}")
+ if self.exceed_ratio >= 1.0:
+ raise ValueError(f"Exceed ratio must be lower than 1: {self}")
+ if self.relative_width <= 0.0:
+ raise ValueError(f"Relative width must be positive: {self}")
+ if self.relative_width >= 1.0:
+ raise ValueError(f"Relative width must be less than 1: {self}")
+ if self.initial_trial_duration <= 0.0:
+ raise ValueError(f"Initial trial duration must be positive: {self}")
+ if self.final_trial_duration < self.initial_trial_duration:
+ raise ValueError(
+ f"Single duration max must be at least initial: {self}"
+ )
+ if self.duration_sum < self.final_trial_duration:
+ raise ValueError(
+ "Min duration sum cannot be smaller"
+ f" than final trial duration: {self}"
+ )
+ if self.expansion_coefficient <= 1:
+ raise ValueError(f"Expansion coefficient is too small: {self}")
+ too_small = False
+ if self.preceding_targets < 0:
+ too_small = True
+ elif self.preceding_targets < 1:
+ if self.initial_trial_duration < self.duration_sum:
+ too_small = True
+ if too_small:
+ raise ValueError(
+ f"Number of preceding targets is too small: {self}"
+ )
diff --git a/resources/libraries/python/MLRsearch/search_goal_tuple.py b/resources/libraries/python/MLRsearch/search_goal_tuple.py
new file mode 100644
index 0000000000..d40ba99b4b
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal_tuple.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoalTuple class."""
+
+from collections.abc import Iterator
+from dataclasses import dataclass
+from typing import Tuple
+
+from .search_goal import SearchGoal
+
+
+@dataclass(frozen=True)
+class SearchGoalTuple:
+ """Container class holding multiple search goals.
+
+ Just a convenience for checking their number and types.
+ """
+
+ goals: Tuple[SearchGoal, ...]
+ """Goals extracted from user-provided Iterable of search goals."""
+
+ def __post_init__(self) -> None:
+ """Check type and number of search goals.
+
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If a goal is not a SearchGoal.
+ """
+ super().__setattr__("goals", tuple(self.goals))
+ if not self.goals:
+ raise ValueError(f"Cannot be empty: {self.goals}")
+ for goal in self.goals:
+ if not isinstance(goal, SearchGoal):
+ raise TypeError(f"Must be a SearchGoal instance: {goal}")
+ copied = list(self.goals)
+ deduplicated = set(self.goals)
+ for goal in copied:
+ if goal not in deduplicated:
+ raise ValueError(f"Duplicate goal: {goal}")
+ deduplicated.remove(goal)
+ if deduplicated:
+ raise ValueError(f"Error processing goals: {deduplicated}")
+
+ def __iter__(self) -> Iterator[SearchGoal]:
+ """Enable itertion over goals.
+
+ :returns: Iterator iteratinc over contained goals.
+ :rtype: Iterator[SearchGoal]
+ """
+ return iter(self.goals)
diff --git a/resources/libraries/python/MLRsearch/selector.py b/resources/libraries/python/MLRsearch/selector.py
new file mode 100644
index 0000000000..4a6d2e2574
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/selector.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Selector class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, List, Optional, Tuple
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .expander import TargetedExpander
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .measurement_database import MeasurementDatabase
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .strategy import StrategyBase, STRATEGY_CLASSES
+
+
+@dataclass
+class Selector:
+ """A selector is an abstraction that focuses on only one of search goals.
+
+ While lower-level logic is hidden in strategy classes,
+ the code in this class is responsible for initializing strategies
+ and shifting targets towards the final target.
+
+ While the public methods have the same names and meaning as the ones
+ in strategy classes, their signature is different.
+ Selector adds the current target trial duration to the output of nominate(),
+ and adds the current bounds to the input of won().
+
+ The nominate method does not return a complete Candidate instance,
+ as we need to avoid circular dependencies
+ (candidate will refer to selector).
+ """
+
+ final_target: TargetSpec
+ """The target this selector is trying to ultimately achieve."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: DiscreteLoad
+ """Smaller of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ initial_upper_load: DiscreteLoad
+ """Larger of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ database: MeasurementDatabase = field(repr=False)
+ """Reference to the common database used by all selectors."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ current_target: TargetSpec = secondary_field()
+ """The target the selector is focusing on currently."""
+ target_stack: List[TargetSpec] = secondary_field()
+ """Stack of targets. When current target is achieved, next is popped."""
+ strategies: Tuple[StrategyBase] = secondary_field()
+ """Instances implementing particular selection strategies."""
+ current_strategy: Optional[StrategyBase] = secondary_field()
+ """Reference to strategy used for last nomination, needed for won()."""
+ # Cache.
+ bounds: RelevantBounds = secondary_field()
+ """New relevant bounds for this round of candidate selection."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived values."""
+ self.target_stack = [self.final_target]
+ while preceding_target := self.target_stack[-1].preceding:
+ self.target_stack.append(preceding_target)
+ self.current_target = self.target_stack.pop()
+ self._recreate_strategies()
+
+ def _recreate_strategies(self) -> None:
+ """Recreate strategies after current target has changed.
+
+ Width expander is recreated as target width is now smaller.
+ For convenience, strategies get injectable debug
+ which prints also the current target.
+ """
+ expander = TargetedExpander(
+ target=self.current_target,
+ global_width=self.global_width,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=self.debug,
+ )
+
+ def wrapped_debug(text: str) -> None:
+ """Call self debug with current target info prepended.
+
+ :param text: Message to log at debug level.
+ :type text: str
+ """
+ self.debug(f"Target {self.current_target}: {text}")
+
+ self.strategies = tuple(
+ cls(
+ target=self.current_target,
+ expander=expander,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=wrapped_debug,
+ )
+ for cls in STRATEGY_CLASSES
+ )
+ self.current_strategy = None
+ self.debug(f"Created strategies for: {self.current_target}")
+
+ def _update_bounds(self) -> None:
+ """Before each iteration, call this to update bounds cache."""
+ self.bounds = self.database.get_relevant_bounds(self.current_target)
+
+ def nominate(
+ self,
+ ) -> Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]:
+ """Find first strategy that wants to nominate, return trial inputs.
+
+ Returned load is None if no strategy wants to nominate.
+
+ Current target is shifted when (now preceding) target is reached.
+ As each strategy never becomes done before at least one
+ bound relevant to the current target becomes available,
+ it is never needed to revert to the preceding target after the shift.
+
+ As the initial trials had inputs relevant to all initial targets,
+ the only way for this not to nominate a load
+ is when the final target is reached (including hitting min or max load).
+ The case of hitting min load raises, so search fails early.
+
+ :returns: Nominated load, duration, and global width to set if winning.
+ :rtype: Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]
+ :raises RuntimeError: If internal inconsistency is detected,
+ or if min load becomes an upper bound.
+ """
+ self._update_bounds()
+ self.current_strategy = None
+ while 1:
+ for strategy in self.strategies:
+ load, width = strategy.nominate(self.bounds)
+ if load:
+ self.current_strategy = strategy
+ return load, self.current_target.trial_duration, width
+ if not self.bounds.clo and not self.bounds.chi:
+ raise RuntimeError("Internal error: no clo nor chi.")
+ if not self.target_stack:
+ if not self.bounds.clo and self.current_target.fail_fast:
+ raise RuntimeError(f"No lower bound: {self.bounds.chi!r}")
+ self.debug(f"Goal {self.current_target} reached: {self.bounds}")
+ return None, self.current_target.trial_duration, None
+ # Everything is ready for next target in the chain.
+ self.current_target = self.target_stack.pop()
+ # Debug logs look better if we forget bounds are TrimmedStat.
+ # Abuse rounding (if not None) to convert to pure DiscreteLoad.
+ clo, chi = self.bounds.clo, self.bounds.chi
+ self.initial_lower_load = clo.rounded_down() if clo else clo
+ self.initial_upper_load = chi.rounded_down() if chi else chi
+ self._update_bounds()
+ self._recreate_strategies()
+
+ def won(self, load: DiscreteLoad) -> None:
+ """Update any private info when candidate became a winner.
+
+ :param load: The load previously nominated by current strategy.
+ :type load: DiscreteLoad
+ """
+ self._update_bounds()
+ self.current_strategy.won(bounds=self.bounds, load=load)
diff --git a/resources/libraries/python/MLRsearch/strategy/__init__.py b/resources/libraries/python/MLRsearch/strategy/__init__.py
new file mode 100644
index 0000000000..a1e0225a17
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "strategy".
+"""
+
+from .base import StrategyBase
+from .bisect import BisectStrategy
+from .extend_hi import ExtendHiStrategy
+from .extend_lo import ExtendLoStrategy
+from .halve import HalveStrategy
+from .refine_hi import RefineHiStrategy
+from .refine_lo import RefineLoStrategy
+
+
+STRATEGY_CLASSES = (
+ HalveStrategy,
+ RefineLoStrategy,
+ RefineHiStrategy,
+ ExtendLoStrategy,
+ ExtendHiStrategy,
+ BisectStrategy,
+)
+"""Tuple of strategy constructors, in order of priority decreasing."""
diff --git a/resources/libraries/python/MLRsearch/strategy/base.py b/resources/libraries/python/MLRsearch/strategy/base.py
new file mode 100644
index 0000000000..0724f882bf
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/base.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining StrategyBase class."""
+
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Callable, Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..expander import TargetedExpander
+from ..limit_handler import LimitHandler
+from ..relevant_bounds import RelevantBounds
+from ..target_spec import TargetSpec
+
+
+@dataclass
+class StrategyBase(ABC):
+ """Abstract class encompassing data common to most strategies.
+
+ A strategy is one piece of logic a selector may use
+ when nominating a candidate according to its current target.
+
+ The two initial bound arguments may not be bounds at all.
+ For initial targets, the two values are usually mrr and mrr2.
+ For subsequent targets, the initial values are usually
+ the relevant bounds of the preceding target,
+ but one of them may be None if hitting min or max load.
+
+ The initial values are mainly used as stable alternatives
+ to relevant bounds of preceding target,
+ because those bounds may have been unpredictably altered
+ by nominations from unrelated search goals.
+ This greatly simplifies reasoning about strategies making progress.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ expander: TargetedExpander
+ """Instance to track width expansion during search (if applicable)."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the limit handler instance."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+
+ @abstractmethod
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate if the conditions activate this strategy.
+
+ A complete candidate refers also to the nominating selector.
+ To prevent circular dependence (selector refers to nominating strategy),
+ this function returns only duration and width.
+
+ Width should only be non-None if global current width should be updated
+ when the candidate based on this becomes winner.
+ But currently all strategies return non-None width
+ if they return non-None load.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ return None, None
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Notify the strategy its candidate became the winner.
+
+ Most strategies have no use for this information,
+ but some strategies may need to update their private information.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ return
+
+ def not_worth(self, bounds: RelevantBounds, load: DiscreteLoad) -> bool:
+ """A check on bounds common for multiple strategies.
+
+ The load is worth measuring only if it can create or improve
+ either relevant bound.
+
+ Each strategy is designed to create a relevant bound for current target,
+ which is only needed if that (or better) bound does not exist yet.
+ Conversely, if a strategy does not nominate, it is because
+ the load it would nominate (if any) is found not worth by this method.
+
+ :param bounds: Current relevant bounds.
+ :param load: Load of a possible candidate.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ :returns: True if the load should NOT be nominated.
+ :rtype: bool
+ """
+ if bounds.clo and bounds.clo >= load:
+ return True
+ if bounds.chi and bounds.chi <= load:
+ return True
+ if bounds.clo and bounds.chi:
+ # We are not hitting min nor max load.
+ # Measuring at this load will create or improve clo or chi.
+ # The only reason not to nominate is if interval is narrow already.
+ wig = DiscreteInterval(
+ lower_bound=bounds.clo,
+ upper_bound=bounds.chi,
+ ).width_in_goals(self.target.discrete_width)
+ if wig <= 1.0:
+ return True
+ return False
diff --git a/resources/libraries/python/MLRsearch/strategy/bisect.py b/resources/libraries/python/MLRsearch/strategy/bisect.py
new file mode 100644
index 0000000000..894544695e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/bisect.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining BisectStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class BisectStrategy(StrategyBase):
+ """Strategy to use when both bounds relevant to curent target are present.
+
+ Primarily, this strategy is there to perform internal search.
+ As powers of two are fiendly to binary search,
+ this strategy relies on the splitting logic described in DiscreteInterval.
+
+ The main reason why this class is so long is that a mere existence
+ of a valid bound for the current target does not imply
+ that bound is a good approximation of the final conditional throughput.
+ The bound might become valid due to efforts of a strategy
+ focusing on an entirely different search goal.
+
+ On the other hand, initial bounds may be better approximations,
+ but they also may be bad approximations (for example
+ when SUT behavior strongly depends on trial duration).
+
+ Based on comparison of existing current bounds to intial bounds,
+ this strategy also mimics what would external search do
+ (if the one current bound was missing and other initial bound was current).
+ In case that load value is closer to appropriate inital bound
+ (compared to how far the simple bisect between current bounds is),
+ that load is nominated.
+
+ It turns out those "conditional" external search nominations
+ are quite different from unconditional ones,
+ at least when it comes to handling limits
+ and tracking when width expansion should be applied.
+ That is why that logic is here
+ and not in some generic external search class.
+ """
+
+ expand_on_clo: bool = False
+ """If extending up, width should be expanded when load becomes clo."""
+ expand_on_chi: bool = False
+ """If extending down, width should be expanded when load becomes chi."""
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate between bounds or extending from them.
+
+ The external search logic is offloaded into private methods.
+ If they return a truthy load, that is returned from here as well.
+
+ Only if the actual bisect is selected,
+ the per-selector expander is limited to the (smaller) new width.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ if not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ interval = DiscreteInterval(bounds.clo, bounds.chi)
+ if interval.width_in_goals(self.target.discrete_width) <= 1.0:
+ return None, None
+ bisect_load = interval.middle(self.target.discrete_width)
+ load, width = self._extend_lo(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = False, True
+ self.debug(f"Preferring to extend down: {load}")
+ return load, width
+ load, width = self._extend_hi(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = True, False
+ self.debug(f"Preferring to extend up: {load}")
+ return load, width
+ load = bisect_load
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.expand_on_clo, self.expand_on_chi = False, False
+ self.debug(f"Preferring to bisect: {load}")
+ width_lo = DiscreteInterval(bounds.clo, load).discrete_width
+ width_hi = DiscreteInterval(load, bounds.chi).discrete_width
+ width = min(width_lo, width_hi)
+ self.expander.limit(width)
+ return load, width
+
+ def _extend_lo(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending down, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_upper_load:
+ return None, None
+ if bisect_load >= self.initial_upper_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.chi - width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load <= bisect_load:
+ return None, None
+ if load >= self.initial_upper_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def _extend_hi(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending up, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_lower_load:
+ return None, None
+ if bisect_load <= self.initial_lower_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.clo + width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load >= bisect_load:
+ return None, None
+ if load <= self.initial_lower_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width when appropriate.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if self.expand_on_clo and load == bounds.clo:
+ self.expander.expand()
+ elif self.expand_on_chi and load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_hi.py b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
new file mode 100644
index 0000000000..79c4ad7cf2
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendHiStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant upper bound.
+
+ Typically this is needed after RefineHiStrategy turned initial upper bound
+ into a current relevant lower bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant lower bound plus expander width.
+
+ This performs external search in upwards direction,
+ until a valid upper bound for the current target is found,
+ or until max load is hit.
+ Limit handling is used to avoid nominating too close
+ (or above) the max rate.
+
+ Width expansion is only applied if the candidate becomes a lower bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.chi or not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.clo + width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No chi, extending up: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became the new lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.clo:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_lo.py b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
new file mode 100644
index 0000000000..68d20b6a6a
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendLoStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant lower bound.
+
+ Typically this is needed after RefineLoStrategy turned initial lower bound
+ into a current relevant upper bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant upper bound minus expander width.
+
+ This performs external search in downwards direction,
+ until a valid lower bound for the current target is found,
+ or until min load is hit.
+ Limit handling is used to avoid nominating too close
+ (or below) the min rate.
+
+ Width expansion is only applied if the candidate becomes an upper bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.clo or not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.chi - width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No clo, extending down: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became new upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/halve.py b/resources/libraries/python/MLRsearch/strategy/halve.py
new file mode 100644
index 0000000000..3188a041c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/halve.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining HalveStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class HalveStrategy(StrategyBase):
+ """First strategy to apply for a new current target.
+
+ Pick a load between initial lower bound and initial upper bound,
+ nominate it if it is (still) worth it.
+
+ In a sense, this can be viewed as an extension of preceding target's
+ bisect strategy. But as the current target may require a different
+ trial duration, it is better to do it for the new target.
+
+ Alternatively, this is a way to save one application
+ of subsequent refine strategy, thus avoiding reducing risk of triggering
+ an external search (slight time saver for highly unstable SUTs).
+ Either way, minor time save is achieved by preceding target
+ only needing to reach double of current target width.
+
+ If the distance between initial bounds is already at or below
+ current target width, the middle point is not nominated.
+ The reasoning is that in this case external search is likely
+ to get triggered by the subsequent refine strategies,
+ so attaining a relevant bound here is not as likely to help.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the middle between initial lower and upper bound.
+
+ The returned width is the target width, even if initial bounds
+ happened to be closer together.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not self.initial_lower_load or not self.initial_upper_load:
+ return None, None
+ interval = DiscreteInterval(
+ lower_bound=self.initial_lower_load,
+ upper_bound=self.initial_upper_load,
+ )
+ wig = interval.width_in_goals(self.target.discrete_width)
+ if wig > 2.0:
+ # Can happen for initial target.
+ return None, None
+ if wig <= 1.0:
+ # Already was narrow enough, refinements shall be sufficient.
+ return None, None
+ load = interval.middle(self.target.discrete_width)
+ if self.not_worth(bounds, load):
+ return None, None
+ self.debug(f"Halving available: {load}")
+ # TODO: Report possibly smaller width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_hi.py b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
new file mode 100644
index 0000000000..caa8fc4a7d
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineHiStrategy(StrategyBase):
+ """If initial upper bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant lower bound,
+ or if there was no halving (and RefineLoStrategy confirmed initial
+ lower bound became a relevant lower bound for the new current target).
+
+ This either ensures a matching upper bound (target is achieved)
+ or moves the relevant lower bound higher (triggering external search).
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_upper_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Upperbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_lo.py b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
new file mode 100644
index 0000000000..7927798505
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineLoStrategy(StrategyBase):
+ """If initial lower bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant upper bound,
+ or if there was no halving.
+ This ensures a relevant bound (upper or lower) for the current target
+ exists.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_lower_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Lowerbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/target_scaling.py b/resources/libraries/python/MLRsearch/target_scaling.py
new file mode 100644
index 0000000000..25114c311c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_scaling.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetScaling class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .dataclass import secondary_field
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetScaling:
+ """Encapsulate targets derived from goals.
+
+ No default values for primaries, contructor call has to specify everything.
+ """
+
+ goals: SearchGoalTuple
+ """Set of goals to generate targets for."""
+ rounding: LoadRounding
+ """Rounding instance to use (targets have discrete width)."""
+ # Derived quantities.
+ targets: Tuple[TargetSpec] = secondary_field()
+ """The generated targets, linked into chains."""
+ goal_to_final_target: Dict[SearchGoal, TargetSpec] = secondary_field()
+ """Mapping from a goal to its corresponding final target."""
+
+ def __post_init__(self) -> None:
+ """For each goal create final, and non-final targets and link them."""
+ linked_targets = []
+ self.goal_to_final_target = {}
+ for goal in self.goals:
+ standalone_targets = []
+ # Final target.
+ width = DiscreteWidth(
+ rounding=self.rounding,
+ float_width=goal.relative_width,
+ ).rounded_down()
+ duration_sum = goal.duration_sum
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=goal.final_trial_duration,
+ duration_sum=duration_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=goal.fail_fast,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Non-final targets.
+ preceding_targets = goal.preceding_targets
+ multiplier = (
+ pow(
+ goal.initial_trial_duration / duration_sum,
+ 1.0 / preceding_targets,
+ )
+ if preceding_targets
+ else 1.0
+ )
+ for count in range(preceding_targets):
+ preceding_sum = duration_sum * pow(multiplier, count + 1)
+ if count + 1 >= preceding_targets:
+ preceding_sum = goal.initial_trial_duration
+ trial_duration = min(goal.final_trial_duration, preceding_sum)
+ width *= 2
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=trial_duration,
+ duration_sum=preceding_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=False,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Link preceding targets.
+ preceding_target = None
+ for target in reversed(standalone_targets):
+ linked_target = target.with_preceding(preceding_target)
+ linked_targets.append(linked_target)
+ preceding_target = linked_target
+ # Associate final target to the goal.
+ self.goal_to_final_target[goal] = linked_targets[-1]
+ # Store all targets as a tuple.
+ self.targets = tuple(linked_targets)
diff --git a/resources/libraries/python/MLRsearch/target_spec.py b/resources/libraries/python/MLRsearch/target_spec.py
new file mode 100644
index 0000000000..5279ba00a1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_spec.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetSpec class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Optional
+
+from .discrete_width import DiscreteWidth
+
+
+@dataclass(frozen=True, eq=True)
+class TargetSpec:
+ """Composite object holding attributes specifying one search target.
+
+ Abstractly, this has several similar meanings.
+ With discrete_width attribute this specifies when a selector is Done.
+ With expansion_coefficient attribute it tells selector how quickly
+ should it expand interval in external search.
+ With "preceding" attribute it helps selector, so it does not need to point
+ to preceding target separately from its current target.
+ Without those three attributes this object is still sufficient
+ for LoadStats to classify loads as lower bound, upper bound, or unknown.
+ """
+
+ loss_ratio: float
+ """Target loss ratio. Equal and directly analogous to goal loss ratio,
+ but applicable also for non-final targets."""
+ exceed_ratio: float
+ """Target exceed ratio. Equal and directly analogous to goal exceed ratio,
+ but applicable also for non-final targets."""
+ discrete_width: DiscreteWidth
+ """Target relative width. Analogous to goal relative width,
+ but coarser for non-final targets."""
+ trial_duration: float
+ """Duration to use for trials for this target. Shorter trials have lesser
+ (and more complicated) impact when determining upper and lower bounds."""
+ duration_sum: float
+ """Sum of trial durations sufficient to classify a load
+ as an upper or lower bound.
+ For non-final targets, this is shorter than goal duration_sum."""
+ expansion_coefficient: int = field(repr=False)
+ """Equal and directly analogous to goal expansion coefficient,
+ but applicable also for non-final targets."""
+ fail_fast: bool = field(repr=False)
+ """Copied from goal. If true and min load is not an upper bound, raise."""
+ preceding: Optional[TargetSpec] = field(repr=False)
+ """Reference to next coarser target (if any) belonging to the same goal."""
+
+ # No conversions or validations, as this is an internal structure.
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lr={self.loss_ratio},er={self.exceed_ratio}"
+ f",ds={self.duration_sum}"
+ )
+
+ def with_preceding(self, preceding: Optional[TargetSpec]) -> TargetSpec:
+ """Create an equivalent instance but with different preceding field.
+
+ This is useful in initialization. Create semi-initialized targets
+ starting from final one, than add references in reversed order.
+
+ :param preceding: New value for preceding field, cannot be None.
+ :type preceding: Optional[TargetSpec]
+ :returns: Instance with the new value applied.
+ :rtype: TargetSpec
+ """
+ return TargetSpec(
+ loss_ratio=self.loss_ratio,
+ exceed_ratio=self.exceed_ratio,
+ discrete_width=self.discrete_width,
+ trial_duration=self.trial_duration,
+ duration_sum=self.duration_sum,
+ expansion_coefficient=self.expansion_coefficient,
+ fail_fast=self.fail_fast,
+ preceding=preceding,
+ )
diff --git a/resources/libraries/python/MLRsearch/target_stat.py b/resources/libraries/python/MLRsearch/target_stat.py
new file mode 100644
index 0000000000..18e1ff4161
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_stat.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStat class."""
+
+from dataclasses import dataclass, field
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .discrete_result import DiscreteResult
+
+
+@dataclass
+class TargetStat:
+ """Class for aggregating trial results for a single load and target.
+
+ Reference to the target is included for convenience.
+
+ The main usage is for load classification, done in estimates method.
+ If both estimates agree, the load is classified as either a lower bound
+ or an upper bound. For additional logic for dealing with loss inversion
+ see MeasurementDatabase.
+
+ Also, data needed for conditional throughput is gathered here,
+ exposed only as a pessimistic loss ratio
+ (as the load value is not stored here).
+ """
+
+ target: TargetSpec = field(repr=False)
+ """The target for which this instance is aggregating results."""
+ good_long: float = 0.0
+ """Sum of durations of long enough trials satisfying target loss ratio."""
+ bad_long: float = 0.0
+ """Sum of durations of long trials not satisfying target loss ratio."""
+ good_short: float = 0.0
+ """Sum of durations of shorter trials satisfying target loss ratio."""
+ bad_short: float = 0.0
+ """Sum of durations of shorter trials not satisfying target loss ratio."""
+ long_losses: Dict[float, float] = field(repr=False, default_factory=dict)
+ """If a loss ratio value occured in a long trial, map it to duration sum."""
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"gl={self.good_long},bl={self.bad_long}"
+ f",gs={self.good_short},bs={self.bad_short}"
+ )
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial result.
+
+ Use intended duration for deciding between long and short trials,
+ but use offered duation (with overheads) to increase the duration sums.
+
+ :param result: The trial result to add to the stats.
+ :type result: DiscreteResult
+ """
+ dwo = result.duration_with_overheads
+ rlr = result.loss_ratio
+ if result.intended_duration >= self.target.trial_duration:
+ if rlr not in self.long_losses:
+ self.long_losses[rlr] = 0.0
+ self.long_losses = dict(sorted(self.long_losses.items()))
+ self.long_losses[rlr] += dwo
+ if rlr > self.target.loss_ratio:
+ self.bad_long += dwo
+ else:
+ self.good_long += dwo
+ else:
+ if rlr > self.target.loss_ratio:
+ self.bad_short += dwo
+ else:
+ self.good_short += dwo
+
+ def estimates(self) -> Tuple[bool, bool]:
+ """Return whether this load can become a lower bound.
+
+ This returns two estimates, hence the weird nonverb name of this method.
+ One estimate assumes all following results will satisfy the loss ratio,
+ the other assumes all results will not satisfy the loss ratio.
+ The sum of durations of the assumed results
+ is the minimum to reach target duration sum, or zero if already reached.
+
+ If both estimates are the same, it means the load is a definite bound.
+ This may happen even when the sum of durations of already
+ measured trials is less than the target, when the missing measurements
+ cannot change the classification.
+
+ :returns: Tuple of two estimates whether the load can be a lower bound.
+ (True, False) means more trial results are needed.
+ :rtype: Tuple[bool, bool]
+ """
+ coeff = self.target.exceed_ratio
+ decrease = self.good_short * coeff / (1.0 - coeff)
+ short_excess = self.bad_short - decrease
+ effective_excess = self.bad_long + max(0.0, short_excess)
+ effective_dursum = max(
+ self.good_long + effective_excess,
+ self.target.duration_sum,
+ )
+ limit_dursum = effective_dursum * self.target.exceed_ratio
+ optimistic = effective_excess <= limit_dursum
+ pessimistic = (effective_dursum - self.good_long) <= limit_dursum
+ return optimistic, pessimistic
+
+ @property
+ def pessimistic_loss_ratio(self) -> float:
+ """Return the loss ratio for conditional throughput computation.
+
+ It adds missing dursum as full-loss trials to long_losses
+ and returns a quantile corresponding to exceed ratio.
+ In case of tie (as in median for even number of samples),
+ this returns the lower value (as being equal to goal exceed ratio
+ is allowed).
+
+ For loads classified as a lower bound, the return value
+ ends up being no larger than the target loss ratio.
+ This is because the excess short bad trials would only come
+ after the quantile in question (as would full-loss missing trials).
+ For other loads, anything can happen, but conditional throughput
+ should not be computed for those anyway.
+ Those two facts allow the logic here be simpler than in estimates().
+
+ :returns: Effective loss ratio based on long trial results.
+ :rtype: float
+ """
+ all_long = max(self.target.duration_sum, self.good_long + self.bad_long)
+ remaining = all_long * (1.0 - self.target.exceed_ratio)
+ ret = None
+ for ratio, dursum in self.long_losses.items():
+ if ret is None or remaining > 0.0:
+ ret = ratio
+ remaining -= dursum
+ else:
+ break
+ else:
+ if remaining > 0.0:
+ ret = 1.0
+ return ret
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/__init__.py b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
new file mode 100644
index 0000000000..034ae41819
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "trial_measurement".
+"""
+
+from .abstract_measurer import AbstractMeasurer
+from .measurement_result import MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
new file mode 100644
index 0000000000..6fab79c8dc
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining AbstractMeasurer class."""
+
+from abc import ABCMeta, abstractmethod
+
+from .measurement_result import MeasurementResult as Result
+
+
+class AbstractMeasurer(metaclass=ABCMeta):
+ """Abstract class defining common API for trial measurement providers.
+
+ The original use of this class was in the realm of
+ RFC 2544 Throughput search, which explains the teminology
+ related to networks, frames, packets, offered load, forwarding rate
+ and similar.
+
+ But the same logic can be used in higher level networking scenarios
+ (e.g. https requests) or even outside networking (database transactions).
+
+ The current code uses language from packet forwarding,
+ docstring sometimes mention transactions as an alternative view.
+ """
+
+ @abstractmethod
+ def measure(self, intended_duration: float, intended_load: float) -> Result:
+ """Perform trial measurement and return the result.
+
+ It is assumed the measurer got already configured with anything else
+ needed to perform the measurement (e.g. traffic profile
+ or transaction limit).
+
+ Duration and load are the only values expected to vary
+ during the search.
+
+ :param intended_duration: Intended trial duration [s].
+ :param intended_load: Intended rate of transactions (packets) [tps].
+ It is a per-port rate, e.g. uni-directional for SUTs
+ with two ports.
+ :type intended_duration: float
+ :type intended_load: float
+ :returns: Structure detailing the result of the measurement.
+ :rtype: measurement_result.MeasurementResult
+ """
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
new file mode 100644
index 0000000000..9dc1ccf5f1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementResult class."""
+
+from dataclasses import dataclass
+
+
+@dataclass
+class MeasurementResult:
+ """Structure defining the result of a single trial measurement.
+
+ There are few primary (required) quantities. Various secondary (derived)
+ quantities are calculated and can be queried.
+
+ The constructor allows broader argument types,
+ the post init function converts to the stricter types.
+
+ Integer quantities (counts) are preferred, as float values
+ can suffer from rounding errors, and sometimes they are measured
+ at unknown (possibly very limited) precision and accuracy.
+
+ There are relations between the counts (e.g. offered count
+ should be equal to a sum of forwarding count and loss count).
+ This implementation does not perform consistency checks, but uses them
+ for computing quantities the caller left unspecified.
+
+ In some cases, the units of intended load are different from units
+ of loss count (e.g. load in transactions but loss in packets).
+ Quantities with relative_ prefix can be used to get load candidates
+ from forwarding results.
+
+ Sometimes, the measurement provider is unable to reach the intended load,
+ and it can react by spending longer than intended duration
+ to reach its intended count. To signal irregular situations like this,
+ several optional fields can be given, and various secondary quantities
+ are populated, so the measurement consumer can query the quantity
+ it wants to rely on in these irregular situations.
+
+ The current implementation intentionally limits the secondary quantities
+ to the few that proved useful in practice.
+ """
+
+ # Required primary quantities.
+ intended_duration: float
+ """Intended trial measurement duration [s]."""
+ intended_load: float
+ """Intended load [tps]. If bidirectional (or multi-port) traffic is used,
+ most users will put unidirectional (single-port) value here,
+ as bandwidth and pps limits are usually per-port."""
+ # Two of the next three primary quantities are required.
+ offered_count: int = None
+ """Number of packets actually transmitted (transactions attempted).
+ This should be the aggregate (bidirectional, multi-port) value,
+ so that asymmetric trafic profiles are supported."""
+ loss_count: int = None
+ """Number of packets transmitted but not received (transactions failed)."""
+ forwarding_count: int = None
+ """Number of packets successfully forwarded (transactions succeeded)."""
+ # Optional primary quantities.
+ offered_duration: float = None
+ """Estimate of the time [s] the trial was actually transmitting traffic."""
+ duration_with_overheads: float = None
+ """Estimate of the time [s] it took to get the trial result
+ since the measurement started."""
+ intended_count: int = None
+ """Expected number of packets to transmit. If not known,
+ the value of offered_count is used."""
+
+ def __post_init__(self) -> None:
+ """Convert types, compute missing values.
+
+ Current caveats:
+ A failing assumption looks like a conversion error.
+ Negative counts are allowed, which can lead to errors later.
+ """
+ self.intended_duration = float(self.intended_duration)
+ if self.offered_duration is None:
+ self.offered_duration = self.intended_duration
+ else:
+ self.offered_duration = float(self.offered_duration)
+ if self.duration_with_overheads is None:
+ self.duration_with_overheads = self.offered_duration
+ else:
+ self.duration_with_overheads = float(self.duration_with_overheads)
+ self.intended_load = float(self.intended_load)
+ if self.forwarding_count is None:
+ self.forwarding_count = int(self.offered_count) - int(
+ self.loss_count
+ )
+ else:
+ self.forwarding_count = int(self.forwarding_count)
+ if self.offered_count is None:
+ self.offered_count = self.forwarding_count + int(self.loss_count)
+ else:
+ self.offered_count = int(self.offered_count)
+ if self.loss_count is None:
+ self.loss_count = self.offered_count - self.forwarding_count
+ else:
+ self.loss_count = int(self.loss_count)
+ if self.intended_count is None:
+ self.intended_count = self.offered_count
+ else:
+ self.intended_count = int(self.intended_count)
+ # TODO: Handle (somehow) situations where offered > intended?
+
+ @property
+ def unsent_count(self) -> int:
+ """How many packets were not transmitted (transactions not started).
+
+ :return: Intended count minus offered count.
+ :rtype: int
+ """
+ return self.intended_count - self.offered_count
+
+ @property
+ def loss_ratio(self) -> float:
+ """Bad count divided by overall count, zero if the latter is zero.
+
+ The bad count includes not only loss count, but also unsent count.
+ If unsent count is negative, its absolute value is used.
+ The overall count is intended count or offered count,
+ whichever is bigger.
+
+ Together, the resulting formula tends to increase loss ratio
+ (but not above 100%) in irregular situations,
+ thus guiding search algorithms towards lower loads
+ where there should be less irregularities.
+ The zero default is there to prevent search algorithms from
+ getting stuck on a too low intended load.
+
+ :returns: Bad count divided by overall count.
+ :rtype: float
+ """
+ overall = max(self.offered_count, self.intended_count)
+ bad = abs(self.loss_count) + abs(self.unsent_count)
+ return bad / overall if overall else 0.0
+
+ @property
+ def relative_forwarding_rate(self) -> float:
+ """Forwarding rate in load units as if duration and load was intended.
+
+ The result is based purely on intended load and loss ratio.
+ While the resulting value may be far from what really happened,
+ it has nice behavior with respect to common assumptions
+ of search algorithms.
+
+ :returns: Forwarding rate in load units estimated from loss ratio.
+ :rtype: float
+ """
+ return self.intended_load * (1.0 - self.loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/trimmed_stat.py b/resources/libraries/python/MLRsearch/trimmed_stat.py
new file mode 100644
index 0000000000..74918d78b0
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trimmed_stat.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TrimmedStat class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .load_stats import LoadStats
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TrimmedStat(LoadStats):
+ """Load stats trimmed to a single target.
+
+ Useful mainly for reporting the overall results.
+ """
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there is one target to track."""
+ super().__post_init__()
+ if len(self.target_to_stat) != 1:
+ raise ValueError(f"No single target: {self.target_to_stat!r}")
+
+ @staticmethod
+ def for_target(stats: LoadStats, target: TargetSpec) -> TrimmedStat:
+ """Return new instance with only one target in the mapping.
+
+ :param stats: The load stats instance to trim.
+ :param target: The one target which should remain in the mapping.
+ :type stats: LoadStats
+ :type target: TargetSpec
+ :return: Newly created instance.
+ :rtype: TrimmedStat
+ """
+ return TrimmedStat(
+ rounding=stats.rounding,
+ int_load=stats.int_load,
+ target_to_stat={target: stats.target_to_stat[target]},
+ )
diff --git a/resources/libraries/python/Memif.py b/resources/libraries/python/Memif.py
index 2128d30428..32096d0ca5 100644
--- a/resources/libraries/python/Memif.py
+++ b/resources/libraries/python/Memif.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -74,7 +74,7 @@ class Memif:
includes only retval.
:rtype: dict
"""
- cmd = u"memif_socket_filename_add_del"
+ cmd = u"memif_socket_filename_add_del_v2"
err_msg = f"Failed to create memif socket on host {node[u'host']}"
args = dict(
is_add=is_add,
@@ -85,7 +85,7 @@ class Memif:
return papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def _memif_create(node, mid, sid, rxq=1, txq=1, role=1):
+ def _memif_create(node, mid, sid, rxq=1, txq=1, role=1, use_dma=False):
"""Create Memif interface on the given node, return its sw_if_index.
:param node: Given node to create Memif interface on.
@@ -94,16 +94,18 @@ class Memif:
:param rxq: Number of RX queues; 0 means do not set.
:param txq: Number of TX queues; 0 means do not set.
:param role: Memif interface role [master=0|slave=1]. Default is slave.
+ :param use_dma: Use DMA acceleration. Requires hardware support.
:type node: dict
:type mid: str
:type sid: str
:type rxq: int
:type txq: int
:type role: int
+ :type use_dma: bool
:returns: sw_if_index
:rtype: int
"""
- cmd = u"memif_create"
+ cmd = u"memif_create_v2"
err_msg = f"Failed to create memif interface on host {node[u'host']}"
args = dict(
role=role,
@@ -111,7 +113,8 @@ class Memif:
tx_queues=int(txq),
socket_id=int(sid),
id=int(mid),
- secret=u""
+ secret=u"",
+ use_dma=use_dma,
)
with PapiSocketExecutor(node) as papi_exec:
@@ -119,7 +122,8 @@ class Memif:
@staticmethod
def create_memif_interface(
- node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE"):
+ node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE", use_dma=False
+ ):
"""Create Memif interface on the given node.
:param node: Given node to create Memif interface on.
@@ -129,6 +133,7 @@ class Memif:
:param rxq: Number of RX queues; 0 means do not set.
:param txq: Number of TX queues; 0 means do not set.
:param role: Memif interface role [master=0|slave=1]. Default is master.
+ :param use_dma: Use DMA acceleration. Requires hardware support.
:type node: dict
:type filename: str
:type mid: str
@@ -136,6 +141,7 @@ class Memif:
:type rxq: int
:type txq: int
:type role: str
+ :type use_dma: bool
:returns: SW interface index.
:rtype: int
:raises ValueError: If command 'create memif' fails.
@@ -147,7 +153,7 @@ class Memif:
# Create memif
sw_if_index = Memif._memif_create(
- node, mid, sid, rxq=rxq, txq=txq, role=role
+ node, mid, sid, rxq=rxq, txq=txq, role=role, use_dma=use_dma
)
# Update Topology
diff --git a/resources/libraries/python/NATUtil.py b/resources/libraries/python/NATUtil.py
index 0f8e746663..e5f530ab46 100644
--- a/resources/libraries/python/NATUtil.py
+++ b/resources/libraries/python/NATUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -62,23 +62,17 @@ class NATUtil:
pass
@staticmethod
- def enable_nat44_plugin(
- node, inside_vrf=0, outside_vrf=0, users=0, user_memory=0,
- sessions=0, session_memory=0, user_sessions=0, mode=u""):
+ def enable_nat44_ed_plugin(
+ node, inside_vrf=0, outside_vrf=0, sessions=0, session_memory=0,
+ mode=u""):
"""Enable NAT44 plugin.
:param node: DUT node.
:param inside_vrf: Inside VRF ID.
:param outside_vrf: Outside VRF ID.
- :param users: Maximum number of users. Used only in endpoint-independent
- mode.
- :param user_memory: User memory size - overwrite auto calculated hash
- allocation parameter if non-zero.
:param sessions: Maximum number of sessions.
:param session_memory: Session memory size - overwrite auto calculated
hash allocation parameter if non-zero.
- :param user_sessions: Maximum number of sessions per user. Used only in
- endpoint-independent mode.
:param mode: NAT44 mode. Valid values:
- endpoint-independent
- endpoint-dependent
@@ -88,24 +82,18 @@ class NATUtil:
:type node: dict
:type inside_vrf: str or int
:type outside_vrf: str or int
- :type users: str or int
- :type user_memory: str or int
:type sessions: str or int
:type session_memory: str or int
- :type user_sessions: str or int
:type mode: str
"""
- cmd = u"nat44_plugin_enable_disable"
+ cmd = u"nat44_ed_plugin_enable_disable"
err_msg = f"Failed to enable NAT44 plugin on the host {node[u'host']}!"
args_in = dict(
enable=True,
inside_vrf=int(inside_vrf),
outside_vrf=int(outside_vrf),
- users=int(users),
- user_memory=int(user_memory),
sessions=int(sessions),
session_memory=int(session_memory),
- user_sessions=int(user_sessions),
flags=getattr(
Nat44ConfigFlags,
f"NAT44_IS_{mode.replace(u'-', u'_').upper()}"
@@ -194,10 +182,9 @@ class NATUtil:
"""Delete and re-add the NAT range setting."""
with PapiSocketExecutor(node) as papi_exec:
args_in[u"is_add"] = False
- papi_exec.add(cmd, **args_in)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
args_in[u"is_add"] = True
- papi_exec.add(cmd, **args_in)
- papi_exec.get_replies(err_msg)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
return resetter
@@ -211,14 +198,8 @@ class NATUtil:
cmd = u"nat44_show_running_config"
err_msg = f"Failed to get NAT44 configuration on host {node[u'host']}"
- try:
- with PapiSocketExecutor(node) as papi_exec:
- reply = papi_exec.add(cmd).get_reply(err_msg)
- except AssertionError:
- # Perhaps VPP is an older version
- old_cmd = u"nat_show_config"
- with PapiSocketExecutor(node) as papi_exec:
- reply = papi_exec.add(old_cmd).get_reply(err_msg)
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add(cmd).get_reply(err_msg)
logger.debug(f"NAT44 Configuration:\n{pformat(reply)}")
@@ -288,34 +269,77 @@ class NATUtil:
:rtype: int
"""
# vpp-device tests have not dedicated physical core so
- # ${thr_count_int} == 0 but we need to use one thread
+ # ${dp_count_int} == 0 but we need to use one thread
threads = 1 if not int(threads) else int(threads)
rest, mult = modf(log2(sessions/(10*threads)))
return 2 ** (int(mult) + (1 if rest else 0)) * 10
@staticmethod
def get_nat44_sessions_number(node, proto):
- """Get number of established NAT44 sessions from actual NAT44 mapping
- data.
+ """Get number of expected NAT44 sessions from NAT44 mapping data.
+
+ This keyword uses output from a CLI command,
+ so it can start failing when VPP changes the output format.
+ TODO: Switch to API (or stat segment) when available.
+
+ The current implementation supports both 2202 and post-2202 format.
+ (The Gerrit number changing the output format is 34877.)
+
+ For TCP proto, the expected state after rampup is
+ some number of sessions in transitory state (VPP has seen the FINs),
+ and some number of sessions in established state (meaning
+ some FINs were lost in the last trial).
+ While the two states may need slightly different number of cycles
+ to process next packet, the current implementation considers
+ both of them the "fast path", so they are both counted as expected.
+
+ As the tests should fail if a session is timed-out,
+ the logic substracts timed out sessions for the returned value
+ (only available for post-2202 format).
+
+ TODO: Investigate if it is worth to insert additional rampup trials
+ in TPUT tests to ensure all sessions are transitory before next
+ measurement.
:param node: DUT node.
:param proto: Required protocol - TCP/UDP/ICMP.
:type node: dict
:type proto: str
- :returns: Number of established NAT44 sessions.
+ :returns: Number of active established NAT44 sessions.
:rtype: int
:raises ValueError: If not supported protocol.
+ :raises RuntimeError: If output is not formatted as expected.
"""
- nat44_data = dict()
- if proto in [u"UDP", u"TCP", u"ICMP"]:
- for line in NATUtil.show_nat44_summary(node).splitlines():
- sum_k, sum_v = line.split(u":") if u":" in line \
- else (line, None)
- nat44_data[sum_k] = sum_v.strip() if isinstance(sum_v, str) \
- else sum_v
- else:
+ proto_l = proto.strip().lower()
+ if proto_l not in [u"udp", u"tcp", u"icmp"]:
raise ValueError(f"Unsupported protocol: {proto}!")
- return nat44_data.get(f"total {proto.lower()} sessions", 0)
+ summary_text = NATUtil.show_nat44_summary(node)
+ summary_lines = summary_text.splitlines()
+ # Output from VPP v22.02 and before, delete when no longer needed.
+ pattern_2202 = f"total {proto_l} sessions:"
+ if pattern_2202 in summary_text:
+ for line in summary_lines:
+ if pattern_2202 not in line:
+ continue
+ return int(line.split(u":", 1)[1].strip())
+ # Post-2202, the proto info and session info are not on the same line.
+ found = False
+ for line in summary_lines:
+ if not found:
+ if f"{proto_l} sessions:" in line:
+ found = True
+ continue
+ # Proto is found, find the line we are interested in.
+ if u"total" not in line:
+ raise RuntimeError(f"show nat summary: no {proto} total.")
+ # We have the line with relevant numbers.
+ total_part, timed_out_part = line.split(u"(", 1)
+ timed_out_part = timed_out_part.split(u")", 1)[0]
+ total_count = int(total_part.split(u":", 1)[1].strip())
+ timed_out_count = int(timed_out_part.split(u":", 1)[1].strip())
+ active_count = total_count - timed_out_count
+ return active_count
+ raise RuntimeError(u"Unknown format of show nat44 summary")
# DET44 PAPI calls
# DET44 means deterministic mode of NAT44
@@ -402,10 +426,9 @@ class NATUtil:
"""Delete and re-add the deterministic NAT mapping."""
with PapiSocketExecutor(node) as papi_exec:
args_in[u"is_add"] = False
- papi_exec.add(cmd, **args_in)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
args_in[u"is_add"] = True
- papi_exec.add(cmd, **args_in)
- papi_exec.get_replies(err_msg)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
return resetter
diff --git a/resources/libraries/python/NGINX/NGINXTools.py b/resources/libraries/python/NGINX/NGINXTools.py
new file mode 100644
index 0000000000..941fe733e7
--- /dev/null
+++ b/resources/libraries/python/NGINX/NGINXTools.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2022 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""This module implements initialization and cleanup of NGINX framework."""
+
+from robot.api import logger
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
+from resources.libraries.python.topology import NodeType
+from resources.libraries.python.NginxUtil import NginxUtil
+
+
+class NGINXTools:
+ """This class implements:
+ - Initialization of NGINX environment,
+ - Cleanup of NGINX environment.
+ """
+
+ @staticmethod
+ def cleanup_nginx_framework(node, nginx_ins_path):
+ """
+ Cleanup the NGINX framework on the DUT node.
+
+ :param node: Will cleanup the nginx on this nodes.
+ :param nginx_ins_path: NGINX install path.
+ :type node: dict
+ :type nginx_ins_path: str
+ :raises RuntimeError: If it fails to cleanup the nginx.
+ """
+ check_path_cmd = NginxUtil.get_cmd_options(path=nginx_ins_path)
+ exec_cmd_no_error(node, check_path_cmd, timeout=180,
+ message=u"Check NGINX install path failed!")
+ command = f"rm -rf {nginx_ins_path}"
+ message = u"Cleanup the NGINX failed!"
+ exec_cmd_no_error(node, command, timeout=180, message=message)
+
+ @staticmethod
+ def cleanup_nginx_framework_on_all_duts(nodes, nginx_ins_path):
+ """
+ Cleanup the NGINX framework on all DUT nodes.
+
+ :param nodes: Will cleanup the nginx on this nodes.
+ :param nginx_ins_path: NGINX install path.
+ :type nodes: dict
+ :type nginx_ins_path: str
+ :raises RuntimeError: If it fails to cleanup the nginx.
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ NGINXTools.cleanup_nginx_framework(node, nginx_ins_path)
+
+ @staticmethod
+ def install_original_nginx_framework(node, pkg_dir, nginx_version):
+ """
+ Prepare the NGINX framework on the DUT node.
+
+ :param node: Node from topology file.
+ :param pkg_dir: Ldp NGINX install dir.
+ :param nginx_version: NGINX Version.
+ :type node: dict
+ :type pkg_dir: str
+ :type nginx_version: str
+ :raises RuntimeError: If command returns nonzero return code.
+ """
+ cmd = f"test -f {pkg_dir}/nginx-{nginx_version}/sbin/nginx"
+ ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
+ if ret_code == 0:
+ return
+ command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}" \
+ f"/entry/install_nginx.sh nginx-{nginx_version}"
+ message = u"Install the NGINX failed!"
+ exec_cmd_no_error(node, command, sudo=True, timeout=600,
+ message=message)
+
+ @staticmethod
+ def install_vsap_nginx_on_dut(node, pkg_dir):
+ """
+ Prepare the VSAP NGINX framework on all DUT
+
+ :param node: Node from topology file.
+ :param pkg_dir: Path to directory where packages are stored.
+ :type node: dict
+ :type pkg_dir: str
+ :raises RuntimeError: If command returns nonzero return code.
+ """
+ command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
+ stdout, _ = exec_cmd_no_error(node, command)
+
+ if stdout.strip() == u"Ubuntu":
+ logger.console(u"NGINX install on DUT... ")
+ exec_cmd_no_error(
+ node, u"apt-get purge -y 'vsap*' || true", timeout=120,
+ sudo=True
+ )
+ exec_cmd_no_error(
+ node, f"dpkg -i --force-all {pkg_dir}vsap-nginx*.deb",
+ timeout=120, sudo=True,
+ message=u"Installation of vsap-nginx failed!"
+ )
+
+ exec_cmd_no_error(node, u"dpkg -l | grep vsap*",
+ sudo=True)
+
+ logger.console(u"Completed!\n")
+ else:
+ logger.console(u"Ubuntu need!\n")
+
+ @staticmethod
+ def install_nginx_framework_on_all_duts(nodes, pkg_dir, nginx_version=None):
+ """
+ Prepare the NGINX framework on all DUTs.
+
+ :param nodes: Nodes from topology file.
+ :param pkg_dir: Path to directory where packages are stored.
+ :param nginx_version: NGINX version.
+ :type nodes: dict
+ :type pkg_dir: str
+ :type nginx_version: str
+ """
+
+ for node in list(nodes.values()):
+ if node[u"type"] == NodeType.DUT:
+ if nginx_version:
+ NGINXTools.install_original_nginx_framework(node, pkg_dir,
+ nginx_version)
+ else:
+ NGINXTools.install_vsap_nginx_on_dut(node, pkg_dir)
diff --git a/resources/libraries/python/parsers/__init__.py b/resources/libraries/python/NGINX/__init__.py
index 1b58a3cf17..d828cbe7cb 100644
--- a/resources/libraries/python/parsers/__init__.py
+++ b/resources/libraries/python/NGINX/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2021 Intel and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,5 +12,5 @@
# limitations under the License.
"""
-__init__ file for resources/libraries/python/parsers
+__init__ file for directory resources/libraries/python/NGINX
"""
diff --git a/resources/libraries/python/Namespaces.py b/resources/libraries/python/Namespaces.py
index 4bea8b5575..d78d2f6d44 100644
--- a/resources/libraries/python/Namespaces.py
+++ b/resources/libraries/python/Namespaces.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/NginxConfigGenerator.py b/resources/libraries/python/NginxConfigGenerator.py
new file mode 100644
index 0000000000..1a0f5f077a
--- /dev/null
+++ b/resources/libraries/python/NginxConfigGenerator.py
@@ -0,0 +1,244 @@
+# Copyright (c) 2021 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Nginx Configuration File Generator library.
+"""
+
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import NodeType
+from resources.libraries.python.NginxUtil import NginxUtil
+
+__all__ = [u"NginxConfigGenerator"]
+
+
+class NginxConfigGenerator:
+ """NGINX Configuration File Generator."""
+
+ def __init__(self):
+ """Initialize library."""
+ # VPP Node to apply configuration on
+ self._node = u""
+ # NGINX Startup config location
+ self._nginx_path = u"/usr/local/nginx/"
+ # Serialized NGinx Configuration
+ self._nginx_config = u""
+ # VPP Configuration
+ self._nodeconfig = dict()
+
+ def set_node(self, node):
+ """Set DUT node.
+
+ :param node: Node to store configuration on.
+ :type node: dict
+ :raises RuntimeError: If Node type is not DUT.
+ """
+ if node[u"type"] != NodeType.DUT:
+ raise RuntimeError(
+ u"Startup config can only be applied to DUTnode."
+ )
+ self._node = node
+
+ def set_nginx_path(self, packages_dir, nginx_version):
+ """Set NGINX Conf Name.
+
+ :param packages_dir: NGINX install path.
+ :param nginx_version: Test NGINX version.
+ :type packages_dir: str
+ :type nginx_version: str
+ :raises RuntimeError: If Node type is not DUT.
+ """
+ if nginx_version:
+ self._nginx_path = f"{packages_dir}/nginx-{nginx_version}"
+
+ def add_http_server_listen(self, value):
+ """Add Http Server listen port configuration."""
+ path = [u"http", u"server", u"listen"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_server_root(self, value=u"html"):
+ """Add Http Server root configuration."""
+ path = [u"http", u"server", u"root"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_server_index(self, value=u"index.html index.htm"):
+ """Add Http Server index configuration."""
+ path = [u"http", u"server", u"index"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_config_item(self, config, value, path):
+ """Add NGINX configuration item.
+
+ :param config: Startup configuration of node.
+ :param value: Value to insert.
+ :param path: Path where to insert item.
+ :type config: dict
+ :type value: str
+ :type path: list
+ """
+ if len(path) == 1:
+ config[path[0]] = value
+ return
+ if path[0] not in config:
+ config[path[0]] = dict()
+ elif isinstance(config[path[0]], str):
+ config[path[0]] = dict() if config[path[0]] == u"" \
+ else {config[path[0]]: u""}
+ self.add_config_item(config[path[0]], value, path[1:])
+
+ def dump_config(self, obj, level=-1):
+ """Dump the startup configuration in NGINX config format.
+
+ :param obj: Python Object to print.
+ :param level: Nested level for indentation.
+ :type obj: Obj
+ :type level: int
+ :returns: nothing
+ """
+ indent = u" "
+ if level >= 0:
+ self._nginx_config += f"{level * indent}{{\n"
+ if isinstance(obj, dict):
+ for key, val in obj.items():
+ if hasattr(val, u"__iter__") and not isinstance(val, str):
+ self._nginx_config += f"{(level + 1) * indent}{key}\n"
+ self.dump_config(val, level + 1)
+ else:
+ self._nginx_config += f"{(level + 1) * indent}" \
+ f"{key} {val};\n"
+ else:
+ for val in obj:
+ self._nginx_config += f"{(level + 1) * indent}{val};\n"
+ if level >= 0:
+ self._nginx_config += f"{level * indent}}}\n"
+
+ def write_config(self, filename=None):
+ """Generate and write NGINX startup configuration to file.
+
+ :param filename: NGINX configuration file name.
+ :type filename: str
+ """
+ if filename is None:
+ filename = f"{self._nginx_path}/conf/nginx.conf"
+ self.dump_config(self._nodeconfig)
+ cmd = f"echo \"{self._nginx_config}\" | sudo tee {filename}"
+ exec_cmd_no_error(
+ self._node, cmd, message=u"Writing config file failed!"
+ )
+
+ def add_http_server_location(self, size):
+ """Add Http Server location configuration.
+
+ :param size: File size.
+ :type size: int
+ """
+ if size == 0:
+ files = u"return"
+ elif size >= 1024:
+ files = f"{int(size / 1024)}KB.json"
+ else:
+ files = f"{size}B.json"
+ key = f"{files}"
+ size_str = size * u"x"
+ value = "200 '%s'" % size_str
+ path = [u"http", u"server", f"location /{key}", u"return"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_access_log(self, value=u"off"):
+ """Add Http access_log configuration."""
+ path = [u"http", u"access_log"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_include(self, value=u"mime.types"):
+ """Add Http include configuration."""
+ path = [u"http", u"include"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_default_type(self, value=u"application/octet-stream"):
+ """Add Http default_type configuration."""
+ path = [u"http", u"default_type"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_sendfile(self, value=u"on"):
+ """Add Http sendfile configuration."""
+ path = [u"http", u"sendfile"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_keepalive_timeout(self, value):
+ """Add Http keepalive alive timeout configuration."""
+ path = [u"http", u"keepalive_timeout"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_http_keepalive_requests(self, value):
+ """Add Http keepalive alive requests configuration."""
+ path = [u"http", u"keepalive_requests"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_events_use(self, value=u"epoll"):
+ """Add Events use configuration."""
+ path = [u"events", u"use"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_events_worker_connections(self, value=10240):
+ """Add Events worker connections configuration."""
+ path = [u"events", u"worker_connections"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_events_accept_mutex(self, value=u"off"):
+ """Add Events accept mutex configuration."""
+ path = [u"events", u"accept_mutex"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_events_multi_accept(self, value=u"off"):
+ """Add Events multi accept configuration."""
+ path = [u"events", u"multi_accept"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_worker_rlimit_nofile(self, value=10240):
+ """Add Events worker rlimit nofile configuration."""
+ path = [u"worker_rlimit_nofile"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_master_process(self, value=u"on"):
+ """Add master process configuration."""
+ path = [u"master_process"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_daemon(self, value=u"off"):
+ """Add daemon configuration."""
+ path = [u"daemon"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_worker_processes(self, value, smt_used):
+ """Add worker processes configuration."""
+ # nginx workers : vpp used phy workers = 2:1
+ if smt_used:
+ value = value * 4
+ else:
+ value = value * 2
+ path = [u"worker_processes"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def apply_config(self, filename=None, verify_nginx=True):
+ """Generate and write NGINX configuration to file and
+ verify configuration.
+
+ :param filename: NGINX configuration file name.
+ :param verify_nginx: Verify NGINX configuration.
+ :type filename: str
+ :type verify_nginx: bool
+ """
+ self.write_config(filename=filename)
+
+ app_path = f"{self._nginx_path}/sbin/nginx"
+ if verify_nginx:
+ NginxUtil.nginx_config_verify(self._node, app_path)
diff --git a/resources/libraries/python/NginxUtil.py b/resources/libraries/python/NginxUtil.py
new file mode 100644
index 0000000000..a19ac37291
--- /dev/null
+++ b/resources/libraries/python/NginxUtil.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2021 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""NGINX Utilities Library."""
+
+from resources.libraries.python.OptionString import OptionString
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import NodeType
+from resources.libraries.python.DUTSetup import DUTSetup
+
+
+class NginxUtil:
+ """Utilities for NGINX."""
+
+ @staticmethod
+ def get_cmd_options(**kwargs):
+ """Create parameters options.
+
+ :param kwargs: Dict of cmd parameters.
+ :type kwargs: dict
+ :returns: cmd parameters.
+ :rtype: OptionString
+ """
+ cmd_options = OptionString()
+ nginx_path = kwargs.get(u"path", u"/usr/local/nginx")
+ cmd_options.add(nginx_path)
+ options = OptionString(prefix=u"-")
+ # Show Nginx Version
+ options.add(u"v")
+ # Verify Configuration
+ options.add(u"t")
+ # Send signal to a master process: stop, quit, reopen.
+ options.add_with_value_from_dict(
+ u"s", u"signal", kwargs
+ )
+ # Set prefix path (default: /usr/local/nginx/).
+ options.add_with_value_from_dict(
+ u"p", u"prefix", kwargs
+ )
+ # Set configuration file (default: conf/nginx.conf).
+ options.add_with_value_from_dict(
+ u"c", u"filename", kwargs
+ )
+ # Set global directives out of configuration file
+ options.add_with_value_from_dict(
+ u"g", u"directives", kwargs
+ )
+ cmd_options.extend(options)
+ return cmd_options
+
+ @staticmethod
+ def nginx_cmd_stop(node, path):
+ """Stop NGINX cmd app on node.
+ :param node: Topology node.
+ :param path: Nginx install path.
+ :type node: dict
+ :type path: str
+ :returns: nothing
+ """
+ cmd_options = NginxUtil.get_cmd_options(path=path, signal=u"stop")
+
+ exec_cmd_no_error(node, cmd_options, sudo=True, disconnect=True,
+ message=u"Nginx stop failed!")
+
+ @staticmethod
+ def nginx_cmd_start(node, path, filename):
+ """Start NGINX cmd app on node.
+ :param node: Topology node.
+ :param path: Nginx install path.
+ :param filename: Nginx conf name.
+ :type node: dict
+ :type path: str
+ :type filename: str
+
+ :returns: nothing
+ """
+ cmd_options = NginxUtil.get_cmd_options(path=path,
+ filename=filename)
+
+ exec_cmd_no_error(node, cmd_options, sudo=True, disconnect=True,
+ message=u"Nginx start failed!")
+
+ @staticmethod
+ def nginx_config_verify(node, path):
+ """Start NGINX cmd app on node.
+ :param node: Topology node.
+ :param path: Nginx install path.
+ :type node: dict
+ :type path: str
+ :returns: nothing
+ """
+ cmd_options = NginxUtil.get_cmd_options(path=path)
+ exec_cmd_no_error(node, cmd_options, sudo=True, disconnect=True,
+ message=u"Nginx Config failed!")
+
+ @staticmethod
+ def taskset_nginx_pid_to_idle_cores(node, cpu_idle_list):
+ """Set idle cpus to NGINX pid on node.
+
+ :param node: Topology node.
+ :param cpu_idle_list: Idle Cpus.
+ :type node: dict
+ :type cpu_idle_list: list
+ :returns: nothing
+ """
+ if node[u"type"] != NodeType.DUT:
+ raise RuntimeError(u'Node type is not a DUT!')
+ pids = DUTSetup.get_pid(node, u"nginx")
+ for index, pid in enumerate(pids):
+ cmd = f"taskset -pc {cpu_idle_list[index]} {pid}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, timeout=180,
+ message=u"taskset cores to nginx pid failed!"
+ )
diff --git a/resources/libraries/python/NodePath.py b/resources/libraries/python/NodePath.py
index 7f24b0e4fc..5b445bc593 100644
--- a/resources/libraries/python/NodePath.py
+++ b/resources/libraries/python/NodePath.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -92,7 +92,7 @@ class NodePath:
self._path = []
self._path_iter = []
- def compute_path(self, always_same_link=True):
+ def compute_path(self, always_same_link=True, topo_has_dut=True):
"""Compute path for added nodes.
.. note:: First add at least two nodes to the topology.
@@ -100,19 +100,26 @@ class NodePath:
:param always_same_link: If True use always same link between two nodes
in path. If False use different link (if available)
between two nodes if one link was used before.
+ :param topo_has_dut: If False we want to test back to back test on TG.
:type always_same_link: bool
+ :type topo_has_dut: bool
:raises RuntimeError: If not enough nodes for path.
"""
nodes = self._nodes
- if len(nodes) < 2:
+ if len(nodes) < 2 and topo_has_dut:
raise RuntimeError(u"Not enough nodes to compute path")
for idx in range(0, len(nodes) - 1):
topo = Topology()
node1 = nodes[idx]
- node2 = nodes[idx + 1]
n1_list = self._nodes_filter[idx]
- n2_list = self._nodes_filter[idx + 1]
+ if topo_has_dut:
+ node2 = nodes[idx + 1]
+ n2_list = self._nodes_filter[idx + 1]
+ else:
+ node2 = node1
+ n2_list = n1_list
+
links = topo.get_active_connecting_links(
node1, node2, filter_list_node1=n1_list,
filter_list_node2=n2_list
@@ -139,8 +146,11 @@ class NodePath:
link = l_set[0]
self._links.append(link)
+
+ use_subsequent = not topo_has_dut
interface1 = topo.get_interface_by_link_name(node1, link)
- interface2 = topo.get_interface_by_link_name(node2, link)
+ interface2 = topo.get_interface_by_link_name(node2, link,
+ use_subsequent)
self._path.append((interface1, node1))
self._path.append((interface2, node2))
@@ -207,8 +217,9 @@ class NodePath:
raise RuntimeError(u"No path for topology")
return self._path[-2]
- def compute_circular_topology(self, nodes, filter_list=None, nic_pfs=1,
- always_same_link=False, topo_has_tg=True):
+ def compute_circular_topology(
+ self, nodes, filter_list=None, nic_pfs=1,
+ always_same_link=False, topo_has_tg=True, topo_has_dut=True):
"""Return computed circular path.
:param nodes: Nodes to append to the path.
@@ -219,29 +230,44 @@ class NodePath:
between two nodes if one link was used before.
:param topo_has_tg: If True, the topology has a TG node. If False,
the topology consists entirely of DUT nodes.
+ :param topo_has_dut: If True, the topology has a DUT node(s). If False,
+ the topology consists entirely of TG nodes.
:type nodes: dict
:type filter_list: list of strings
:type nic_pfs: int
:type always_same_link: bool
:type topo_has_tg: bool
+ :type topo_has_dut: bool
:returns: Topology information dictionary.
:rtype: dict
:raises RuntimeError: If unsupported combination of parameters.
"""
t_dict = dict()
- duts = [key for key in nodes if u"DUT" in key]
- t_dict[u"duts"] = duts
- t_dict[u"duts_count"] = len(duts)
- t_dict[u"int"] = u"pf"
+ t_dict[u"hosts"] = set()
+ if topo_has_dut:
+ duts = [key for key in nodes if u"DUT" in key]
+ for host in [nodes[dut][u"host"] for dut in duts]:
+ t_dict[u"hosts"].add(host)
+ t_dict[u"duts"] = duts
+ t_dict[u"duts_count"] = len(duts)
+ t_dict[u"int"] = u"pf"
for _ in range(0, nic_pfs // 2):
if topo_has_tg:
- self.append_node(nodes[u"TG"])
- for dut in duts:
- self.append_node(nodes[dut], filter_list=filter_list)
+ if topo_has_dut:
+ self.append_node(nodes[u"TG"])
+ else:
+ self.append_node(nodes[u"TG"], filter_list=filter_list)
+ if topo_has_dut:
+ for dut in duts:
+ self.append_node(nodes[dut], filter_list=filter_list)
if topo_has_tg:
- self.append_node(nodes[u"TG"])
- self.compute_path(always_same_link)
+ t_dict[u"hosts"].add(nodes[u"TG"][u"host"])
+ if topo_has_dut:
+ self.append_node(nodes[u"TG"])
+ else:
+ self.append_node(nodes[u"TG"], filter_list=filter_list)
+ self.compute_path(always_same_link, topo_has_dut)
n_idx = 0 # node index
t_idx = 1 # TG interface index
@@ -257,7 +283,7 @@ class NodePath:
i_pfx = f"if{t_idx}" # [backwards compatible] interface prefix
n_idx = 0
t_idx = t_idx + 1
- elif topo_has_tg:
+ elif topo_has_tg and topo_has_dut:
# Each node has 2 interfaces, starting with 1
# Calculate prefixes appropriately for current
# path topology nomenclature:
diff --git a/resources/libraries/python/NsimUtil.py b/resources/libraries/python/NsimUtil.py
index 85e41a4553..757da067cb 100644
--- a/resources/libraries/python/NsimUtil.py
+++ b/resources/libraries/python/NsimUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -51,15 +51,8 @@ class NsimUtil():
packets_per_reorder=vpp_nsim_attr.get(u"packets_per_reorder", 0)
)
err_msg = f"Failed to configure NSIM on host {host}"
- try:
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
- except AssertionError:
- # Perhaps VPP is an older version
- old_cmd = u"nsim_configure"
- args.pop(u"packets_per_reorder")
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(old_cmd, **args).get_reply(err_msg)
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
if vpp_nsim_attr[u"output_nsim_enable"]:
cmd = u"nsim_output_feature_enable_disable"
diff --git a/resources/libraries/python/OptionString.py b/resources/libraries/python/OptionString.py
index bdb5ee2b4c..35988c4b71 100644
--- a/resources/libraries/python/OptionString.py
+++ b/resources/libraries/python/OptionString.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/PLRsearch/Integrator.py b/resources/libraries/python/PLRsearch/Integrator.py
index 331bd8475b..cc8f838fe6 100644
--- a/resources/libraries/python/PLRsearch/Integrator.py
+++ b/resources/libraries/python/PLRsearch/Integrator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -189,12 +189,15 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False):
:raises numpy.linalg.LinAlgError: If the focus shape gets singular
(due to rounding errors). Try changing scale_coeff.
"""
- debug_list = list()
- trace_list = list()
+ debug_list = []
+ trace_list = []
# Block until input object appears.
- dimension, dilled_function, param_focus_tracker, max_samples = (
- communication_pipe.recv()
- )
+ (
+ dimension,
+ dilled_function,
+ param_focus_tracker,
+ max_samples,
+ ) = communication_pipe.recv()
debug_list.append(
f"Called with param_focus_tracker {param_focus_tracker!r}"
)
@@ -237,39 +240,47 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False):
if max_samples and samples >= max_samples:
break
sample_point = generate_sample(
- param_focus_tracker.averages, param_focus_tracker.covariance_matrix,
- dimension, scale_coeff
+ param_focus_tracker.averages,
+ param_focus_tracker.covariance_matrix,
+ dimension,
+ scale_coeff,
)
- trace(u"sample_point", sample_point)
+ trace("sample_point", sample_point)
samples += 1
- trace(u"samples", samples)
+ trace("samples", samples)
value, log_weight = value_logweight_function(trace, *sample_point)
- trace(u"value", value)
- trace(u"log_weight", log_weight)
- trace(u"focus tracker before adding", param_focus_tracker)
+ trace("value", value)
+ trace("log_weight", log_weight)
+ trace("focus tracker before adding", param_focus_tracker)
# Update focus related statistics.
param_distance = param_focus_tracker.add_without_dominance_get_distance(
sample_point, log_weight
)
# The code above looked at weight (not importance).
# The code below looks at importance (not weight).
- log_rarity = param_distance / 2.0
- trace(u"log_rarity", log_rarity)
+ log_rarity = param_distance / 2.0 / scale_coeff
+ trace("log_rarity", log_rarity)
log_importance = log_weight + log_rarity
- trace(u"log_importance", log_importance)
+ trace("log_importance", log_importance)
value_tracker.add(value, log_importance)
# Update sampled statistics.
param_sampled_tracker.add_get_shift(sample_point, log_importance)
debug_list.append(f"integrator used {samples!s} samples")
debug_list.append(
- u" ".join([
- u"value_avg", str(value_tracker.average),
- u"param_sampled_avg", repr(param_sampled_tracker.averages),
- u"param_sampled_cov", repr(param_sampled_tracker.covariance_matrix),
- u"value_log_variance", str(value_tracker.log_variance),
- u"value_log_secondary_variance",
- str(value_tracker.secondary.log_variance)
- ])
+ " ".join(
+ [
+ "value_avg",
+ str(value_tracker.average),
+ "param_sampled_avg",
+ repr(param_sampled_tracker.averages),
+ "param_sampled_cov",
+ repr(param_sampled_tracker.covariance_matrix),
+ "value_log_variance",
+ str(value_tracker.log_variance),
+ "value_log_secondary_variance",
+ str(value_tracker.secondary.log_variance),
+ ]
+ )
)
communication_pipe.send(
(value_tracker, param_focus_tracker, debug_list, trace_list, samples)
diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py
index 226b482d76..326aa2e2d2 100644
--- a/resources/libraries/python/PLRsearch/PLRsearch.py
+++ b/resources/libraries/python/PLRsearch/PLRsearch.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -53,8 +53,14 @@ class PLRsearch:
log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10)))
def __init__(
- self, measurer, trial_duration_per_trial, packet_loss_ratio_target,
- trial_number_offset=0, timeout=7200.0, trace_enabled=False):
+ self,
+ measurer,
+ trial_duration_per_trial,
+ packet_loss_ratio_target,
+ trial_number_offset=0,
+ timeout=7200.0,
+ trace_enabled=False,
+ ):
"""Store rate measurer and additional parameters.
The measurer must never report negative loss count.
@@ -176,7 +182,7 @@ class PLRsearch:
f"Started search with min_rate {min_rate!r}, "
f"max_rate {max_rate!r}"
)
- trial_result_list = list()
+ trial_result_list = []
trial_number = self.trial_number_offset
focus_trackers = (None, None)
transmit_rate = (min_rate + max_rate) / 2.0
@@ -186,34 +192,54 @@ class PLRsearch:
trial_number += 1
logging.info(f"Trial {trial_number!r}")
results = self.measure_and_compute(
- self.trial_duration_per_trial * trial_number, transmit_rate,
- trial_result_list, min_rate, max_rate, focus_trackers
+ self.trial_duration_per_trial * trial_number,
+ transmit_rate,
+ trial_result_list,
+ min_rate,
+ max_rate,
+ focus_trackers,
)
measurement, average, stdev, avg1, avg2, focus_trackers = results
+ # Workaround for unsent packets and other anomalies.
+ measurement.plr_loss_count = min(
+ measurement.intended_count,
+ int(measurement.intended_count * measurement.loss_ratio + 0.9),
+ )
+ logging.debug(
+ f"loss ratio {measurement.plr_loss_count}"
+ f" / {measurement.intended_count}"
+ )
zeros += 1
# TODO: Ratio of fill rate to drain rate seems to have
# exponential impact. Make it configurable, or is 4:3 good enough?
- if measurement.loss_fraction >= self.packet_loss_ratio_target:
+ if measurement.plr_loss_count >= (
+ measurement.intended_count * self.packet_loss_ratio_target
+ ):
for _ in range(4 * zeros):
- lossy_loads.append(measurement.target_tr)
- if measurement.loss_count > 0:
+ lossy_loads.append(measurement.intended_load)
+ lossy_loads.sort()
zeros = 0
- lossy_loads.sort()
+ logging.debug("High enough loss, lossy loads added.")
+ else:
+ logging.debug(
+ f"Not a high loss, zero counter bumped to {zeros}."
+ )
if stop_time <= time.time():
return average, stdev
trial_result_list.append(measurement)
if (trial_number - self.trial_number_offset) <= 1:
next_load = max_rate
elif (trial_number - self.trial_number_offset) <= 3:
- next_load = (measurement.relative_receive_rate / (
- 1.0 - self.packet_loss_ratio_target))
+ next_load = measurement.relative_forwarding_rate / (
+ 1.0 - self.packet_loss_ratio_target
+ )
else:
next_load = (avg1 + avg2) / 2.0
if zeros > 0:
if lossy_loads[0] > next_load:
diminisher = math.pow(2.0, 1 - zeros)
next_load = lossy_loads[0] + diminisher * next_load
- next_load /= (1.0 + diminisher)
+ next_load /= 1.0 + diminisher
# On zero measurement, we need to drain obsoleted low losses
# even if we did not use them to increase next_load,
# in order to get to usable loses at higher loads.
@@ -263,22 +289,22 @@ class PLRsearch:
# TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization
chi = (load - mrr) / spread
chi0 = -mrr / spread
- trace(u"stretch: load", load)
- trace(u"mrr", mrr)
- trace(u"spread", spread)
- trace(u"chi", chi)
- trace(u"chi0", chi0)
+ trace("stretch: load", load)
+ trace("mrr", mrr)
+ trace("spread", spread)
+ trace("chi", chi)
+ trace("chi0", chi0)
if chi > 0:
log_lps = math.log(
load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread
)
- trace(u"big loss direct log_lps", log_lps)
+ trace("big loss direct log_lps", log_lps)
else:
two_positive = log_plus(chi, 2 * chi0 - log_2)
two_negative = log_plus(chi0, 2 * chi - log_2)
if two_positive <= two_negative:
log_lps = log_minus(chi, chi0) + log_spread
- trace(u"small loss crude log_lps", log_lps)
+ trace("small loss crude log_lps", log_lps)
return log_lps
two = log_minus(two_positive, two_negative)
three_positive = log_plus(two_positive, 3 * chi - log_3)
@@ -286,11 +312,11 @@ class PLRsearch:
three = log_minus(three_positive, three_negative)
if two == three:
log_lps = two + log_spread
- trace(u"small loss approx log_lps", log_lps)
+ trace("small loss approx log_lps", log_lps)
else:
log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0))
log_lps += log_spread
- trace(u"small loss direct log_lps", log_lps)
+ trace("small loss direct log_lps", log_lps)
return log_lps
@staticmethod
@@ -329,26 +355,26 @@ class PLRsearch:
# TODO: The stretch sign is just to have less minuses. Worth changing?
chi = (mrr - load) / spread
chi0 = mrr / spread
- trace(u"Erf: load", load)
- trace(u"mrr", mrr)
- trace(u"spread", spread)
- trace(u"chi", chi)
- trace(u"chi0", chi0)
+ trace("Erf: load", load)
+ trace("mrr", mrr)
+ trace("spread", spread)
+ trace("chi", chi)
+ trace("chi0", chi0)
if chi >= -1.0:
- trace(u"positive, b roughly bigger than m", None)
+ trace("positive, b roughly bigger than m", None)
if chi > math.exp(10):
first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10)
- trace(u"approximated first", first)
+ trace("approximated first", first)
else:
first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi))
- trace(u"exact first", first)
+ trace("exact first", first)
first -= chi * chi
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = log_minus(first, second)
- trace(u"first", first)
+ trace("first", first)
else:
- trace(u"negative, b roughly smaller than m", None)
+ trace("negative, b roughly smaller than m", None)
exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi)
exp_first *= math.exp(-chi * chi)
exp_first -= 2 * chi
@@ -359,17 +385,17 @@ class PLRsearch:
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = math.log(exp_first - math.exp(second))
- trace(u"exp_first", exp_first)
- trace(u"second", second)
- trace(u"intermediate", intermediate)
+ trace("exp_first", exp_first)
+ trace("second", second)
+ trace("intermediate", intermediate)
result = intermediate + math.log(spread) - math.log(erfc(-chi0))
- trace(u"result", result)
+ trace("result", result)
return result
@staticmethod
def find_critical_rate(
- trace, lfit_func, min_rate, max_rate, loss_ratio_target,
- mrr, spread):
+ trace, lfit_func, min_rate, max_rate, loss_ratio_target, mrr, spread
+ ):
"""Given ratio target and parameters, return the achieving offered load.
This is basically an inverse function to lfit_func
@@ -411,12 +437,12 @@ class PLRsearch:
loss_rate = math.exp(lfit_func(trace, rate, mrr, spread))
loss_ratio = loss_rate / rate
if loss_ratio > loss_ratio_target:
- trace(u"halving down", rate)
+ trace("halving down", rate)
rate_hi = rate
elif loss_ratio < loss_ratio_target:
- trace(u"halving up", rate)
+ trace("halving up", rate)
rate_lo = rate
- trace(u"found", rate)
+ trace("found", rate)
return rate
@staticmethod
@@ -426,14 +452,22 @@ class PLRsearch:
Integrator assumes uniform distribution, but over different parameters.
Weight and likelihood are used interchangeably here anyway.
- Each trial has an offered load, a duration and a loss count.
- Fitting function is used to compute the average loss per second.
- Poisson distribution (with average loss per trial) is used
+ Each trial has an intended load, a sent count and a loss count
+ (probably counting unsent packets as loss, as they signal
+ the load is too high for the traffic generator).
+ The fitting function is used to compute the average loss rate.
+ Geometric distribution (with average loss per trial) is used
to get likelihood of one trial result, the overal likelihood
is a product of all trial likelihoods.
As likelihoods can be extremely small, logarithms are tracked instead.
- TODO: Copy ReceiveRateMeasurement from MLRsearch.
+ The current implementation does not use direct loss rate
+ from the fitting function, as the input and output units may not match
+ (e.g. intended load in TCP transactions, loss in packets).
+ Instead, the expected average loss is scaled according to the number
+ of packets actually sent.
+
+ TODO: Copy MeasurementResult from MLRsearch.
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
@@ -442,40 +476,47 @@ class PLRsearch:
:param spread: The spread parameter for the fitting function.
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type mrr: float
:type spread: float
:returns: Logarithm of result weight for given function and parameters.
:rtype: float
"""
log_likelihood = 0.0
- trace(u"log_weight for mrr", mrr)
- trace(u"spread", spread)
+ trace("log_weight for mrr", mrr)
+ trace("spread", spread)
for result in trial_result_list:
- trace(u"for tr", result.target_tr)
- trace(u"lc", result.loss_count)
- trace(u"d", result.duration)
- # _rel_ values use units of target_tr (transactions per second).
+ trace("for tr", result.intended_load)
+ trace("plc", result.plr_loss_count)
+ trace("d", result.intended_duration)
+ # _rel_ values use units of intended_load (transactions per second).
log_avg_rel_loss_per_second = lfit_func(
- trace, result.target_tr, mrr, spread
+ trace, result.intended_load, mrr, spread
)
# _abs_ values use units of loss count (maybe packets).
# There can be multiple packets per transaction.
log_avg_abs_loss_per_trial = log_avg_rel_loss_per_second + math.log(
- result.transmit_count / result.target_tr
+ result.offered_count / result.intended_load
)
# Geometric probability computation for logarithms.
log_trial_likelihood = log_plus(0.0, -log_avg_abs_loss_per_trial)
- log_trial_likelihood *= -result.loss_count
+ log_trial_likelihood *= -result.plr_loss_count
log_trial_likelihood -= log_plus(0.0, +log_avg_abs_loss_per_trial)
log_likelihood += log_trial_likelihood
- trace(u"avg_loss_per_trial", math.exp(log_avg_abs_loss_per_trial))
- trace(u"log_trial_likelihood", log_trial_likelihood)
+ trace("avg_loss_per_trial", math.exp(log_avg_abs_loss_per_trial))
+ trace("log_trial_likelihood", log_trial_likelihood)
return log_likelihood
def measure_and_compute(
- self, trial_duration, transmit_rate, trial_result_list,
- min_rate, max_rate, focus_trackers=(None, None), max_samples=None):
+ self,
+ trial_duration,
+ transmit_rate,
+ trial_result_list,
+ min_rate,
+ max_rate,
+ focus_trackers=(None, None),
+ max_samples=None,
+ ):
"""Perform both measurement and computation at once.
High level steps: Prepare and launch computation worker processes,
@@ -516,7 +557,7 @@ class PLRsearch:
:param max_samples: Limit for integrator samples, for debugging.
:type trial_duration: float
:type transmit_rate: float
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type min_rate: float
:type max_rate: float
:type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker
@@ -556,6 +597,20 @@ class PLRsearch:
:rtype: multiprocessing.Connection
"""
+ boss_pipe_end, worker_pipe_end = multiprocessing.Pipe()
+ # Starting the worker first. Contrary to documentation
+ # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.connection.Connection
+ # sending of large object without active listener on the other side
+ # results in a deadlock, not in a ValueError.
+ # See https://stackoverflow.com/questions/15137292/large-objects-and-multiprocessing-pipes-and-send
+ worker = multiprocessing.Process(
+ target=Integrator.try_estimate_nd,
+ args=(worker_pipe_end, 5.0, self.trace_enabled),
+ )
+ worker.daemon = True
+ worker.start()
+
+ # Only now it is safe to send the function to compute with.
def value_logweight_func(trace, x_mrr, x_spread):
"""Return log of critical rate and log of likelihood.
@@ -594,22 +649,18 @@ class PLRsearch:
)
value = math.log(
self.find_critical_rate(
- trace, fitting_function, min_rate, max_rate,
- self.packet_loss_ratio_target, mrr, spread
+ trace,
+ fitting_function,
+ min_rate,
+ max_rate,
+ self.packet_loss_ratio_target,
+ mrr,
+ spread,
)
)
return value, logweight
dilled_function = dill.dumps(value_logweight_func)
- boss_pipe_end, worker_pipe_end = multiprocessing.Pipe()
- # Do not send yet, run the worker first to avoid a deadlock.
- # See https://stackoverflow.com/a/15716500
- worker = multiprocessing.Process(
- target=Integrator.try_estimate_nd,
- args=(worker_pipe_end, 10.0, self.trace_enabled)
- )
- worker.daemon = True
- worker.start()
boss_pipe_end.send(
(dimension, dilled_function, focus_tracker, max_samples)
)
@@ -651,14 +702,18 @@ class PLRsearch:
raise RuntimeError(f"Worker {name} did not finish!")
result_or_traceback = pipe.recv()
try:
- value_tracker, focus_tracker, debug_list, trace_list, sampls = (
- result_or_traceback
- )
- except ValueError:
+ (
+ value_tracker,
+ focus_tracker,
+ debug_list,
+ trace_list,
+ sampls,
+ ) = result_or_traceback
+ except ValueError as exc:
raise RuntimeError(
f"Worker {name} failed with the following traceback:\n"
f"{result_or_traceback}"
- )
+ ) from exc
logging.info(f"Logs from worker {name!r}:")
for message in debug_list:
logging.info(message)
@@ -669,8 +724,8 @@ class PLRsearch:
)
return _PartialResult(value_tracker, focus_tracker, sampls)
- stretch_result = stop_computing(u"stretch", stretch_pipe)
- erf_result = stop_computing(u"erf", erf_pipe)
+ stretch_result = stop_computing("stretch", stretch_pipe)
+ erf_result = stop_computing("erf", erf_pipe)
result = PLRsearch._get_result(measurement, stretch_result, erf_result)
logging.info(
f"measure_and_compute finished with trial result "
@@ -692,7 +747,7 @@ class PLRsearch:
:param measurement: The trial measurement obtained during computation.
:param stretch_result: Computation output for stretch fitting function.
:param erf_result: Computation output for erf fitting function.
- :type measurement: ReceiveRateMeasurement
+ :type measurement: MeasurementResult
:type stretch_result: _PartialResult
:type erf_result: _PartialResult
:returns: Combined results.
@@ -717,7 +772,7 @@ class PLRsearch:
# Named tuples, for multiple local variables to be passed as return value.
_PartialResult = namedtuple(
- u"_PartialResult", u"value_tracker focus_tracker samples"
+ "_PartialResult", "value_tracker focus_tracker samples"
)
"""Two stat trackers and sample counter.
@@ -730,8 +785,8 @@ _PartialResult = namedtuple(
"""
_ComputeResult = namedtuple(
- u"_ComputeResult",
- u"measurement avg stdev stretch_exp_avg erf_exp_avg trackers"
+ "_ComputeResult",
+ "measurement avg stdev stretch_exp_avg erf_exp_avg trackers",
)
"""Measurement, 4 computation result values, pair of trackers.
@@ -741,7 +796,7 @@ _ComputeResult = namedtuple(
:param stretch_exp_avg: Stretch fitting function estimate average exponentiated.
:param erf_exp_avg: Erf fitting function estimate average, exponentiated.
:param trackers: Pair of focus trackers to start next iteration with.
-:type measurement: ReceiveRateMeasurement
+:type measurement: MeasurementResult
:type avg: float
:type stdev: float
:type stretch_exp_avg: float
diff --git a/resources/libraries/python/PLRsearch/__init__.py b/resources/libraries/python/PLRsearch/__init__.py
index 6d1559d5f0..22360cb182 100644
--- a/resources/libraries/python/PLRsearch/__init__.py
+++ b/resources/libraries/python/PLRsearch/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/PLRsearch/log_plus.py b/resources/libraries/python/PLRsearch/log_plus.py
index 62378f6f2c..aabefdb5be 100644
--- a/resources/libraries/python/PLRsearch/log_plus.py
+++ b/resources/libraries/python/PLRsearch/log_plus.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -76,14 +76,14 @@ def log_minus(first, second):
:raises RuntimeError: If the difference would be non-positive.
"""
if first is None:
- raise RuntimeError(u"log_minus: does not support None first")
+ raise RuntimeError("log_minus: does not support None first")
if second is None:
return first
if second >= first:
- raise RuntimeError(u"log_minus: first has to be bigger than second")
+ raise RuntimeError("log_minus: first has to be bigger than second")
factor = -math.expm1(second - first)
if factor <= 0.0:
- msg = u"log_minus: non-positive number to log"
+ msg = "log_minus: non-positive number to log"
else:
return first + math.log(factor)
raise RuntimeError(msg)
diff --git a/resources/libraries/python/PLRsearch/stat_trackers.py b/resources/libraries/python/PLRsearch/stat_trackers.py
index 2a7a05cae6..e598fd840e 100644
--- a/resources/libraries/python/PLRsearch/stat_trackers.py
+++ b/resources/libraries/python/PLRsearch/stat_trackers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -64,8 +64,10 @@ class ScalarStatTracker:
:returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r}," \
+ return (
+ f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r},"
f"average={self.average!r},log_variance={self.log_variance!r})"
+ )
def copy(self):
"""Return new ScalarStatTracker instance with the same state as self.
@@ -110,7 +112,8 @@ class ScalarStatTracker:
if absolute_shift > 0.0:
log_square_shift = 2 * math.log(absolute_shift)
log_variance = log_plus(
- log_variance, log_square_shift + log_sample_ratio)
+ log_variance, log_square_shift + log_sample_ratio
+ )
if log_variance is not None:
log_variance += old_log_sum_weight - new_log_sum_weight
self.log_sum_weight = new_log_sum_weight
@@ -133,10 +136,17 @@ class ScalarDualStatTracker(ScalarStatTracker):
One typical use is for Monte Carlo integrator to decide whether
the partial sums so far are reliable enough.
"""
+
def __init__(
- self, log_sum_weight=None, average=0.0, log_variance=None,
- log_sum_secondary_weight=None, secondary_average=0.0,
- log_secondary_variance=None, max_log_weight=None):
+ self,
+ log_sum_weight=None,
+ average=0.0,
+ log_variance=None,
+ log_sum_secondary_weight=None,
+ secondary_average=0.0,
+ log_secondary_variance=None,
+ max_log_weight=None,
+ ):
"""Initialize new tracker instance, empty by default.
:param log_sum_weight: Natural logarithm of sum of weights
@@ -177,12 +187,14 @@ class ScalarDualStatTracker(ScalarStatTracker):
:rtype: str
"""
sec = self.secondary
- return f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"\
- f"average={self.average!r},log_variance={self.log_variance!r}," \
- f"log_sum_secondary_weight={sec.log_sum_weight!r}," \
- f"secondary_average={sec.average!r}," \
- f"log_secondary_variance={sec.log_variance!r}," \
+ return (
+ f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"
+ f"average={self.average!r},log_variance={self.log_variance!r},"
+ f"log_sum_secondary_weight={sec.log_sum_weight!r},"
+ f"secondary_average={sec.average!r},"
+ f"log_secondary_variance={sec.log_variance!r},"
f"max_log_weight={self.max_log_weight!r})"
+ )
def add(self, scalar_value, log_weight=0.0):
"""Return updated both stats after addition of another sample.
@@ -197,7 +209,7 @@ class ScalarDualStatTracker(ScalarStatTracker):
"""
# Using super() as copy() and add() are not expected to change
# signature, so this way diamond inheritance will be supported.
- primary = super(ScalarDualStatTracker, self)
+ primary = super()
if self.max_log_weight is None or log_weight >= self.max_log_weight:
self.max_log_weight = log_weight
self.secondary = primary.copy()
@@ -242,8 +254,12 @@ class VectorStatTracker:
"""
def __init__(
- self, dimension=2, log_sum_weight=None, averages=None,
- covariance_matrix=None):
+ self,
+ dimension=2,
+ log_sum_weight=None,
+ averages=None,
+ covariance_matrix=None,
+ ):
"""Initialize new tracker instance, two-dimensional empty by default.
If any of latter two arguments is None, it means
@@ -272,10 +288,12 @@ class VectorStatTracker:
:returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return f"VectorStatTracker(dimension={self.dimension!r}," \
- f"log_sum_weight={self.log_sum_weight!r}," \
- f"averages={self.averages!r}," \
+ return (
+ f"VectorStatTracker(dimension={self.dimension!r},"
+ f"log_sum_weight={self.log_sum_weight!r},"
+ f"averages={self.averages!r},"
f"covariance_matrix={self.covariance_matrix!r})"
+ )
def copy(self):
"""Return new instance with the same state as self.
@@ -287,8 +305,10 @@ class VectorStatTracker:
:rtype: VectorStatTracker
"""
return VectorStatTracker(
- self.dimension, self.log_sum_weight, self.averages[:],
- copy.deepcopy(self.covariance_matrix)
+ self.dimension,
+ self.log_sum_weight,
+ self.averages[:],
+ copy.deepcopy(self.covariance_matrix),
)
def reset(self):
diff --git a/resources/libraries/python/PapiExecutor.py b/resources/libraries/python/PapiExecutor.py
index 6b21680526..a55638ab7c 100644
--- a/resources/libraries/python/PapiExecutor.py
+++ b/resources/libraries/python/PapiExecutor.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,17 +12,21 @@
# limitations under the License.
"""Python API executor library.
+
+TODO: Document sync and async handling properly.
"""
import copy
import glob
import json
+import logging
import shutil
import struct # vpp-papi can raise struct.error
import subprocess
import sys
import tempfile
import time
+from collections import deque, UserDict
from pprint import pformat
from robot.api import logger
@@ -32,15 +36,19 @@ from resources.libraries.python.LocalExecution import run
from resources.libraries.python.FilteredLogger import FilteredLogger
from resources.libraries.python.PapiHistory import PapiHistory
from resources.libraries.python.ssh import (
- SSH, SSHTimeout, exec_cmd_no_error, scp_node)
+ SSH,
+ SSHTimeout,
+ exec_cmd_no_error,
+ scp_node,
+)
from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppApiCrc import VppApiCrcChecker
__all__ = [
- u"PapiExecutor",
- u"PapiSocketExecutor",
- u"Disconnector",
+ "PapiExecutor",
+ "PapiSocketExecutor",
+ "Disconnector",
]
@@ -63,47 +71,50 @@ def dictize(obj):
:param obj: Arbitrary object to dictize.
:type obj: object
:returns: Dictized object.
- :rtype: same as obj type or collections.OrderedDict
+ :rtype: same as obj type or collections.UserDict
"""
- if not hasattr(obj, u"_asdict"):
+ if not hasattr(obj, "_asdict"):
return obj
- ret = obj._asdict()
- old_get = ret.__getitem__
- new_get = lambda self, key: dictize(old_get(self, key))
- ret.__getitem__ = new_get
- return ret
+ overriden = UserDict(obj._asdict())
+ old_get = overriden.__getitem__
+ overriden.__getitem__ = lambda self, key: dictize(old_get(self, key))
+ return overriden
-class PapiSocketExecutor:
- """Methods for executing VPP Python API commands on forwarded socket.
+def dictize_and_check_retval(obj, err_msg):
+ """Make namedtuple-like object accessible as dict, check retval if exists.
- Previously, we used an implementation with single client instance
- and connection being handled by a resource manager.
- On "with" statement, the instance connected, and disconnected
- on exit from the "with" block.
- This was limiting (no nested with blocks) and mainly it was slow:
- 0.7 seconds per disconnect cycle on Skylake, more than 3 second on Taishan.
+ If the object contains "retval" field, raise when the value is non-zero.
- The currently used implementation caches the connected client instances,
- providing speedup and making "with" blocks unnecessary.
- But with many call sites, "with" blocks are still the main usage pattern.
- Documentation still lists that as the intended pattern.
+ See dictize() for what it means to dictize.
- As a downside, clients need to be explicitly told to disconnect
- before VPP restart.
- There is some amount of retries and disconnects on disconnect
- (so unresponsive VPPs do not breach test much more than needed),
- but it is hard to verify all that works correctly.
- Especially, if Robot crashes, files and ssh processes may leak.
+ :param obj: Arbitrary object to dictize.
+ :param err_msg: The (additional) text for the raised exception.
+ :type obj: object
+ :type err_msg: str
+ :returns: Dictized object.
+ :rtype: same as obj type or collections.UserDict
+ :raises AssertionError: If retval field is present with nonzero value.
+ """
+ ret = dictize(obj)
+ # *_details messages do not contain retval.
+ retval = ret.get("retval", 0)
+ if retval != 0:
+ raise AssertionError(f"{err_msg}\nRetval nonzero in object {ret!r}")
+ return ret
- Delay for accepting socket connection is 10s.
- TODO: Decrease 10s to value that is long enough for creating connection
- and short enough to not affect performance.
+
+class PapiSocketExecutor:
+ """Methods for executing VPP Python API commands on forwarded socket.
The current implementation downloads and parses .api.json files only once
and caches client instances for reuse.
Cleanup metadata is added as additional attributes
- directly to client instances.
+ directly to the client instances.
+
+ The current implementation caches the connected client instances.
+ As a downside, clients need to be explicitly told to disconnect
+ before VPP restart.
The current implementation seems to run into read error occasionally.
Not sure if the error is in Python code on Robot side, ssh forwarding,
@@ -111,14 +122,16 @@ class PapiSocketExecutor:
seems to help, hoping repeated command execution does not lead to surprises.
The reconnection is logged at WARN level, so it is prominently shown
in log.html, so we can see how frequently it happens.
+ There are similar retries cleanups in other places
+ (so unresponsive VPPs do not break test much more than needed),
+ but it is hard to verify all that works correctly.
+ Especially, if Robot crashes, files and ssh processes may leak.
- TODO: Support handling of retval!=0 without try/except in caller.
-
- Note: Use only with "with" statement, e.g.:
+ TODO: Decrease current timeout value when creating connections
+ so broken VPP does not prolong job duration too much
+ while good VPP (almost) never fails to connect.
- cmd = 'show_version'
- with PapiSocketExecutor(node) as papi_exec:
- reply = papi_exec.add(cmd).get_reply(err_msg)
+ TODO: Support handling of retval!=0 without try/except in caller.
This class processes two classes of VPP PAPI methods:
1. Simple request / reply: method='request'.
@@ -128,27 +141,37 @@ class PapiSocketExecutor:
The recommended ways of use are (examples):
- 1. Simple request / reply
-
- a. One request with no arguments:
+ 1. Simple request / reply. Example with no arguments:
- cmd = 'show_version'
+ cmd = "show_version"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
- b. Three requests with arguments, the second and the third ones are the same
- but with different arguments.
+ 2. Dump functions:
+ cmd = "sw_interface_rx_placement_dump"
with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, sw_if_index=ifc["vpp_sw_index"])
+ details = papi_exec.get_details(err_msg)
+
+ 3. Multiple requests with one reply each.
+ In this example, there are three requests with arguments,
+ the second and the third ones are the same but with different arguments.
+ This example also showcases method chaining.
+
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
replies = papi_exec.add(cmd1, **args1).add(cmd2, **args2).\
add(cmd2, **args3).get_replies(err_msg)
- 2. Dump functions
+ The "is_async=True" part in the last example enables "async handling mode",
+ which imposes limitations but gains speed and saves memory.
+ This is different than async mode of VPP PAPI, as the default handling mode
+ also uses async PAPI connections.
- cmd = 'sw_interface_rx_placement_dump'
- with PapiSocketExecutor(node) as papi_exec:
- details = papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']).\
- get_details(err_msg)
+ The implementation contains more hidden details, such as
+ support for old VPP PAPI async mode behavior, API CRC checking
+ conditional usage of control ping, and possible susceptibility to VPP-2033.
+ See docstring of methods for more detailed info.
"""
# Class cache for reuse between instances.
@@ -172,16 +195,21 @@ class PapiSocketExecutor:
conn_cache = dict()
"""Mapping from node key to connected client instance."""
- def __init__(self, node, remote_vpp_socket=Constants.SOCKSVR_PATH):
+ def __init__(
+ self, node, remote_vpp_socket=Constants.SOCKSVR_PATH, is_async=False
+ ):
"""Store the given arguments, declare managed variables.
:param node: Node to connect to and forward unix domain socket from.
:param remote_vpp_socket: Path to remote socket to tunnel to.
+ :param is_async: Whether to use async handling.
:type node: dict
:type remote_vpp_socket: str
+ :type is_async: bool
"""
self._node = node
self._remote_vpp_socket = remote_vpp_socket
+ self._is_async = is_async
# The list of PAPI commands to be executed on the node.
self._api_command_list = list()
@@ -196,32 +224,40 @@ class PapiSocketExecutor:
cls = self.__class__
if cls.api_package_path:
return
- cls.api_root_dir = tempfile.TemporaryDirectory(dir=u"/tmp")
+ # Pylint suggests to use "with" statement, which we cannot,
+ # do as the dir should stay for multiple ensure_vpp_instance calls.
+ cls.api_root_dir = tempfile.TemporaryDirectory(dir="/tmp")
root_path = cls.api_root_dir.name
# Pack, copy and unpack Python part of VPP installation from _node.
# TODO: Use rsync or recursive version of ssh.scp_node instead?
node = self._node
- exec_cmd_no_error(node, [u"rm", u"-rf", u"/tmp/papi.txz"])
+ exec_cmd_no_error(node, ["rm", "-rf", "/tmp/papi.txz"])
# Papi python version depends on OS (and time).
- # Python 2.7 or 3.4, site-packages or dist-packages.
- installed_papi_glob = u"/usr/lib/python3*/*-packages/vpp_papi"
+ # Python 3.4 or higher, site-packages or dist-packages.
+ installed_papi_glob = "/usr/lib/python3*/*-packages/vpp_papi"
# We need to wrap this command in bash, in order to expand globs,
# and as ssh does join, the inner command has to be quoted.
- inner_cmd = u" ".join([
- u"tar", u"cJf", u"/tmp/papi.txz", u"--exclude=*.pyc",
- installed_papi_glob, u"/usr/share/vpp/api"
- ])
- exec_cmd_no_error(node, [u"bash", u"-c", u"'" + inner_cmd + u"'"])
- scp_node(node, root_path + u"/papi.txz", u"/tmp/papi.txz", get=True)
- run([u"tar", u"xf", root_path + u"/papi.txz", u"-C", root_path])
- cls.api_json_path = root_path + u"/usr/share/vpp/api"
+ inner_cmd = " ".join(
+ [
+ "tar",
+ "cJf",
+ "/tmp/papi.txz",
+ "--exclude=*.pyc",
+ installed_papi_glob,
+ "/usr/share/vpp/api",
+ ]
+ )
+ exec_cmd_no_error(node, ["bash", "-c", f"'{inner_cmd}'"])
+ scp_node(node, root_path + "/papi.txz", "/tmp/papi.txz", get=True)
+ run(["tar", "xf", root_path + "/papi.txz", "-C", root_path])
+ cls.api_json_path = root_path + "/usr/share/vpp/api"
# Perform initial checks before .api.json files are gone,
# by creating the checker instance.
cls.crc_checker = VppApiCrcChecker(cls.api_json_path)
# When present locally, we finally can find the installation path.
cls.api_package_path = glob.glob(root_path + installed_papi_glob)[0]
# Package path has to be one level above the vpp_papi directory.
- cls.api_package_path = cls.api_package_path.rsplit(u"/", 1)[0]
+ cls.api_package_path = cls.api_package_path.rsplit("/", 1)[0]
def ensure_vpp_instance(self):
"""Create or reuse a closed client instance, return it.
@@ -249,14 +285,39 @@ class PapiSocketExecutor:
# It is right, we should refactor the code and move initialization
# of package outside.
from vpp_papi.vpp_papi import VPPApiClient as vpp_class
- vpp_class.apidir = cls.api_json_path
- # We need to create instance before removing from sys.path.
- vpp_instance = vpp_class(
- use_socket=True, server_address=u"TBD", async_thread=False,
- read_timeout=14, logger=FilteredLogger(logger, u"INFO")
- )
- # Cannot use loglevel parameter, robot.api.logger lacks support.
- # TODO: Stop overriding read_timeout when VPP-1722 is fixed.
+ try:
+ # The old way. Deduplicate when pre-2402 support is not needed.
+
+ vpp_class.apidir = cls.api_json_path
+ # We need to create instance before removing from sys.path.
+ # Cannot use loglevel parameter, robot.api.logger lacks the support.
+ vpp_instance = vpp_class(
+ use_socket=True,
+ server_address="TBD",
+ async_thread=False,
+ # Large read timeout was originally there for VPP-1722,
+ # it may still be helping against AVF device creation failures.
+ read_timeout=14,
+ logger=FilteredLogger(logger, "INFO"),
+ )
+ except vpp_class.VPPRuntimeError:
+ # The 39871 way.
+
+ # We need to create instance before removing from sys.path.
+ # Cannot use loglevel parameter, robot.api.logger lacks the support.
+ vpp_instance = vpp_class(
+ apidir=cls.api_json_path,
+ use_socket=True,
+ server_address="TBD",
+ async_thread=False,
+ # Large read timeout was originally there for VPP-1722,
+ # it may still be helping against AVF device creation failures.
+ read_timeout=14,
+ logger=FilteredLogger(logger, "INFO"),
+ )
+ # The following is needed to prevent union (e.g. Ip4) debug logging
+ # of VPP part of PAPI from spamming robot logs.
+ logging.getLogger("vpp_papi.serializer").setLevel(logging.INFO)
finally:
if sys.path[-1] == cls.api_package_path:
sys.path.pop()
@@ -282,8 +343,8 @@ class PapiSocketExecutor:
:rtype: tuple of str
"""
return (
- node[u"host"],
- node[u"port"],
+ node["host"],
+ node["port"],
remote_socket,
# TODO: Do we support sockets paths such as "~/vpp/api.socket"?
# If yes, add also:
@@ -300,7 +361,8 @@ class PapiSocketExecutor:
:rtype: tuple of str
"""
return self.__class__.key_for_node_and_socket(
- self._node, self._remote_vpp_socket,
+ self._node,
+ self._remote_vpp_socket,
)
def set_connected_client(self, client):
@@ -327,10 +389,11 @@ class PapiSocketExecutor:
If check_connected, RuntimeError is raised when the client is
not in cache. None is returned if client is not in cache
(and the check is disabled).
+ Successful retrieval from cache is logged only when check_connected.
This hides details of what the node key is.
- :param check_connected: Whether cache miss raises.
+ :param check_connected: Whether cache miss raises (and success logs).
:type check_connected: bool
:returns: Connected client instance, or None if uncached and no check.
:rtype: Optional[vpp_papi.VPPApiClient]
@@ -338,11 +401,9 @@ class PapiSocketExecutor:
"""
key = self.key_for_self()
ret = self.__class__.conn_cache.get(key, None)
-
- if ret is None:
- if check_connected:
+ if check_connected:
+ if ret is None:
raise RuntimeError(f"Client not cached for key: {key}")
- else:
# When reading logs, it is good to see which VPP is accessed.
logger.debug(f"Activated cached PAPI client for key: {key}")
return ret
@@ -364,6 +425,8 @@ class PapiSocketExecutor:
- This socket controls the local ssh process doing the forwarding.
csit_local_vpp_socket
- This is the forwarded socket to talk with remote VPP.
+ csit_deque
+ - Queue for responses.
The attribute names do not start with underscore,
so pylint does not complain about accessing private attribute.
@@ -378,7 +441,7 @@ class PapiSocketExecutor:
if vpp_instance is not None:
return self
# No luck, create and connect a new instance.
- time_enter = time.time()
+ time_enter = time.monotonic()
node = self._node
# Parsing takes longer than connecting, prepare instance before tunnel.
vpp_instance = self.ensure_vpp_instance()
@@ -386,44 +449,55 @@ class PapiSocketExecutor:
# If connection fails, it is better to attempt disconnect anyway.
self.set_connected_client(vpp_instance)
# Set additional attributes.
- vpp_instance.csit_temp_dir = tempfile.TemporaryDirectory(dir=u"/tmp")
+ vpp_instance.csit_temp_dir = tempfile.TemporaryDirectory(dir="/tmp")
temp_path = vpp_instance.csit_temp_dir.name
- api_socket = temp_path + u"/vpp-api.sock"
+ api_socket = temp_path + "/vpp-api.sock"
vpp_instance.csit_local_vpp_socket = api_socket
- ssh_socket = temp_path + u"/ssh.sock"
+ ssh_socket = temp_path + "/ssh.sock"
vpp_instance.csit_control_socket = ssh_socket
# Cleanup possibilities.
- ret_code, _ = run([u"ls", ssh_socket], check=False)
+ ret_code, _ = run(["ls", ssh_socket], check=False)
if ret_code != 2:
# This branch never seems to be hit in CI,
# but may be useful when testing manually.
run(
- [u"ssh", u"-S", ssh_socket, u"-O", u"exit", u"0.0.0.0"],
- check=False, log=True
+ ["ssh", "-S", ssh_socket, "-O", "exit", "0.0.0.0"],
+ check=False,
+ log=True,
)
# TODO: Is any sleep necessary? How to prove if not?
- run([u"sleep", u"0.1"])
- run([u"rm", u"-vrf", ssh_socket])
+ run(["sleep", "0.1"])
+ run(["rm", "-vrf", ssh_socket])
# Even if ssh can perhaps reuse this file,
# we need to remove it for readiness detection to work correctly.
- run([u"rm", u"-rvf", api_socket])
+ run(["rm", "-rvf", api_socket])
# We use sleep command. The ssh command will exit in 30 second,
# unless a local socket connection is established,
# in which case the ssh command will exit only when
# the ssh connection is closed again (via control socket).
# The log level is to suppress "Warning: Permanently added" messages.
ssh_cmd = [
- u"ssh", u"-S", ssh_socket, u"-M", u"-L",
- api_socket + u":" + self._remote_vpp_socket,
- u"-p", str(node[u"port"]),
- u"-o", u"LogLevel=ERROR",
- u"-o", u"UserKnownHostsFile=/dev/null",
- u"-o", u"StrictHostKeyChecking=no",
- u"-o", u"ExitOnForwardFailure=yes",
- node[u"username"] + u"@" + node[u"host"],
- u"sleep", u"30"
+ "ssh",
+ "-S",
+ ssh_socket,
+ "-M",
+ "-L",
+ f"{api_socket}:{self._remote_vpp_socket}",
+ "-p",
+ str(node["port"]),
+ "-o",
+ "LogLevel=ERROR",
+ "-o",
+ "UserKnownHostsFile=/dev/null",
+ "-o",
+ "StrictHostKeyChecking=no",
+ "-o",
+ "ExitOnForwardFailure=yes",
+ f"{node['username']}@{node['host']}",
+ "sleep",
+ "30",
]
- priv_key = node.get(u"priv_key")
+ priv_key = node.get("priv_key")
if priv_key:
# This is tricky. We need a file to pass the value to ssh command.
# And we need ssh command, because paramiko does not support sockets
@@ -432,48 +506,52 @@ class PapiSocketExecutor:
key_file.write(priv_key)
# Make sure the content is written, but do not close yet.
key_file.flush()
- ssh_cmd[1:1] = [u"-i", key_file.name]
- password = node.get(u"password")
+ ssh_cmd[1:1] = ["-i", key_file.name]
+ password = node.get("password")
if password:
# Prepend sshpass command to set password.
- ssh_cmd[:0] = [u"sshpass", u"-p", password]
- time_stop = time.time() + 10.0
+ ssh_cmd[:0] = ["sshpass", "-p", password]
+ time_stop = time.monotonic() + 10.0
# subprocess.Popen seems to be the best way to run commands
# on background. Other ways (shell=True with "&" and ssh with -f)
# seem to be too dependent on shell behavior.
# In particular, -f does NOT return values for run().
subprocess.Popen(ssh_cmd)
# Check socket presence on local side.
- while time.time() < time_stop:
+ while time.monotonic() < time_stop:
# It can take a moment for ssh to create the socket file.
- ret_code, _ = run(
- [u"ls", u"-l", api_socket], check=False
- )
+ ret_code, _ = run(["ls", "-l", api_socket], check=False)
if not ret_code:
break
- time.sleep(0.1)
+ time.sleep(0.01)
else:
- raise RuntimeError(u"Local side socket has not appeared.")
+ raise RuntimeError("Local side socket has not appeared.")
if priv_key:
# Socket up means the key has been read. Delete file by closing it.
key_file.close()
# Everything is ready, set the local socket address and connect.
vpp_instance.transport.server_address = api_socket
# It seems we can get read error even if every preceding check passed.
- # Single retry seems to help.
+ # Single retry seems to help. TODO: Confirm this is still needed.
for _ in range(2):
try:
- vpp_instance.connect_sync(u"csit_socket")
+ vpp_instance.connect("csit_socket", do_async=True)
except (IOError, struct.error) as err:
logger.warn(f"Got initial connect error {err!r}")
vpp_instance.disconnect()
else:
break
else:
- raise RuntimeError(u"Failed to connect to VPP over a socket.")
- logger.trace(
- f"Establishing socket connection took {time.time()-time_enter}s"
- )
+ raise RuntimeError("Failed to connect to VPP over a socket.")
+ # Only after rls2302 all relevant VPP builds should have do_async.
+ if hasattr(vpp_instance.transport, "do_async"):
+ deq = deque()
+ vpp_instance.csit_deque = deq
+ vpp_instance.register_event_callback(lambda x, y: deq.append(y))
+ else:
+ vpp_instance.csit_deque = None
+ duration_conn = time.monotonic() - time_enter
+ logger.trace(f"Establishing socket connection took {duration_conn}s.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
@@ -498,10 +576,17 @@ class PapiSocketExecutor:
return
logger.debug(f"Disconnecting by key: {key}")
client_instance.disconnect()
- run([
- u"ssh", u"-S", client_instance.csit_control_socket, u"-O",
- u"exit", u"0.0.0.0"
- ], check=False)
+ run(
+ [
+ "ssh",
+ "-S",
+ client_instance.csit_control_socket,
+ "-O",
+ "exit",
+ "0.0.0.0",
+ ],
+ check=False,
+ )
# Temp dir has autoclean, but deleting explicitly
# as an error can happen.
try:
@@ -517,8 +602,8 @@ class PapiSocketExecutor:
@classmethod
def disconnect_by_node_and_socket(
- cls, node, remote_socket=Constants.SOCKSVR_PATH
- ):
+ cls, node, remote_socket=Constants.SOCKSVR_PATH
+ ):
"""Disconnect a connected client instance, noop it not connected.
Also remove the local sockets by deleting the temporary directory.
@@ -576,10 +661,8 @@ class PapiSocketExecutor:
"""Add next command to internal command list; return self.
Unless disabled, new entry to papi history is also added at this point.
- The argument name 'csit_papi_command' must be unique enough as it cannot
- be repeated in kwargs.
- The kwargs dict is deep-copied, so it is safe to use the original
- with partial modifications for subsequent commands.
+ The kwargs dict is serialized or deep-copied, so it is safe to use
+ the original with partial modifications for subsequent calls.
Any pending conflicts from .api.json processing are raised.
Then the command name is checked for known CRCs.
@@ -589,6 +672,16 @@ class PapiSocketExecutor:
Each CRC issue is raised only once, so subsequent tests
can raise other issues.
+ With async handling mode, this method also serializes and sends
+ the command, skips CRC check to gain speed, and saves memory
+ by putting a sentinel (instead of deepcopy) to api command list.
+
+ For scale tests, the call sites are responsible to set history values
+ in a way that hints what is done without overwhelming the papi history.
+
+ Note to contributors: Do not rename "csit_papi_command"
+ to anything VPP could possibly use as an API field name.
+
:param csit_papi_command: VPP API command.
:param history: Enable/disable adding command to PAPI command history.
:param kwargs: Optional key-value arguments.
@@ -601,23 +694,39 @@ class PapiSocketExecutor:
"""
self.crc_checker.report_initial_conflicts()
if history:
+ # No need for deepcopy yet, serialization isolates from edits.
PapiHistory.add_to_papi_history(
self._node, csit_papi_command, **kwargs
)
self.crc_checker.check_api_name(csit_papi_command)
- self._api_command_list.append(
- dict(
- api_name=csit_papi_command,
- api_args=copy.deepcopy(kwargs)
+ if self._is_async:
+ # Save memory but still count the number of expected replies.
+ self._api_command_list.append(0)
+ api_object = self.get_connected_client(check_connected=False).api
+ func = getattr(api_object, csit_papi_command)
+ # No need for deepcopy yet, serialization isolates from edits.
+ func(**kwargs)
+ else:
+ # No serialization, so deepcopy is needed here.
+ self._api_command_list.append(
+ dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))
)
- )
return self
def get_replies(self, err_msg="Failed to get replies."):
- """Get replies from VPP Python API.
+ """Get reply for each command from VPP Python API.
+
+ This method expects one reply per command,
+ and gains performance by reading replies only after
+ sending all commands.
The replies are parsed into dict-like objects,
- "retval" field is guaranteed to be zero on success.
+ "retval" field (if present) is guaranteed to be zero on success.
+
+ Do not use this for messages with variable number of replies,
+ use get_details instead.
+ Do not use for commands trigering VPP-2033,
+ use series of get_reply instead.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -625,15 +734,18 @@ class PapiSocketExecutor:
:rtype: list of dict
:raises RuntimeError: If retval is nonzero, parsing or ssh error.
"""
- return self._execute(err_msg=err_msg)
+ if not self._is_async:
+ raise RuntimeError("Sync handling does not suport get_replies.")
+ return self._execute(err_msg=err_msg, do_async=True)
- def get_reply(self, err_msg=u"Failed to get reply."):
- """Get reply from VPP Python API.
+ def get_reply(self, err_msg="Failed to get reply."):
+ """Get reply to single command from VPP Python API.
- The reply is parsed into dict-like object,
- "retval" field is guaranteed to be zero on success.
+ This method waits for a single reply (no control ping),
+ thus avoiding bugs like VPP-2033.
- TODO: Discuss exception types to raise, unify with inner methods.
+ The reply is parsed into a dict-like object,
+ "retval" field (if present) is guaranteed to be zero on success.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -641,18 +753,19 @@ class PapiSocketExecutor:
:rtype: dict
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
- replies = self.get_replies(err_msg=err_msg)
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_reply.")
+ replies = self._execute(err_msg=err_msg, do_async=False)
if len(replies) != 1:
raise RuntimeError(f"Expected single reply, got {replies!r}")
return replies[0]
- def get_sw_if_index(self, err_msg=u"Failed to get reply."):
+ def get_sw_if_index(self, err_msg="Failed to get reply."):
"""Get sw_if_index from reply from VPP Python API.
Frequently, the caller is only interested in sw_if_index field
- of the reply, this wrapper makes such call sites shorter.
-
- TODO: Discuss exception types to raise, unify with inner methods.
+ of the reply, this wrapper around get_reply (thus safe against VPP-2033)
+ makes such call sites shorter.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -660,12 +773,13 @@ class PapiSocketExecutor:
:rtype: int
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_sw_if_index")
reply = self.get_reply(err_msg=err_msg)
- logger.trace(f"Getting index from {reply!r}")
- return reply[u"sw_if_index"]
+ return reply["sw_if_index"]
def get_details(self, err_msg="Failed to get dump details."):
- """Get dump details from VPP Python API.
+ """Get details (for possibly multiple dumps) from VPP Python API.
The details are parsed into dict-like objects.
The number of details per single dump command can vary,
@@ -674,19 +788,27 @@ class PapiSocketExecutor:
logging everything at once for debugging purposes),
it is recommended to call get_details for each dump (type) separately.
+ This method uses control ping to detect end of replies,
+ so it is not suitable for commands which trigger VPP-2033
+ (but arguably no dump currently triggers it).
+
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
:returns: Details, dict objects with fields due to API without "retval".
:rtype: list of dict
"""
- return self._execute(err_msg)
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_details.")
+ return self._execute(err_msg, do_async=False, single_reply=False)
@staticmethod
def run_cli_cmd(
- node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH):
+ node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH
+ ):
"""Run a CLI command as cli_inband, return the "reply" field of reply.
Optionally, log the field value.
+ This is a convenience wrapper around get_reply.
:param node: Node to run command on.
:param cli_cmd: The CLI command to be run on the node.
@@ -699,18 +821,18 @@ class PapiSocketExecutor:
:returns: CLI output.
:rtype: str
"""
- cmd = u"cli_inband"
- args = dict(
- cmd=cli_cmd
+ cmd = "cli_inband"
+ args = dict(cmd=cli_cmd)
+ err_msg = (
+ f"Failed to run 'cli_inband {cli_cmd}' PAPI command"
+ f" on host {node['host']}"
)
- err_msg = f"Failed to run 'cli_inband {cli_cmd}' PAPI command " \
- f"on host {node[u'host']}"
with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)["reply"]
if log:
logger.info(
- f"{cli_cmd} ({node[u'host']} - {remote_vpp_socket}):\n"
+ f"{cli_cmd} ({node['host']} - {remote_vpp_socket}):\n"
f"{reply.strip()}"
)
return reply
@@ -719,6 +841,8 @@ class PapiSocketExecutor:
def run_cli_cmd_on_all_sockets(node, cli_cmd, log=True):
"""Run a CLI command as cli_inband, on all sockets in topology file.
+ Just a run_cli_cmd, looping over sockets.
+
:param node: Node to run command on.
:param cli_cmd: The CLI command to be run on the node.
:param log: If True, the response is logged.
@@ -737,6 +861,8 @@ class PapiSocketExecutor:
def dump_and_log(node, cmds):
"""Dump and log requested information, return None.
+ Just a get_details (with logging), looping over commands.
+
:param node: DUT node.
:param cmds: Dump commands to be executed.
:type node: dict
@@ -747,64 +873,231 @@ class PapiSocketExecutor:
dump = papi_exec.add(cmd).get_details()
logger.debug(f"{cmd}:\n{pformat(dump)}")
- def _execute(self, err_msg=u"Undefined error message", exp_rv=0):
+ @staticmethod
+ def _read_internal(vpp_instance, timeout=None):
+ """Blockingly read within timeout.
+
+ This covers behaviors both before and after 37758.
+ One read attempt is guaranteed even with zero timeout.
+
+ TODO: Simplify after 2302 RCA is done.
+
+ :param vpp_instance: Client instance to read from.
+ :param timeout: How long to wait for reply (or transport default).
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type timeout: Optional[float]
+ :returns: Message read or None if nothing got read.
+ :rtype: Optional[namedtuple]
+ """
+ timeout = vpp_instance.read_timeout if timeout is None else timeout
+ if vpp_instance.csit_deque is None:
+ return vpp_instance.read_blocking(timeout=timeout)
+ time_stop = time.monotonic() + timeout
+ while 1:
+ try:
+ return vpp_instance.csit_deque.popleft()
+ except IndexError:
+ # We could busy-wait but that seems to starve the reader thread.
+ time.sleep(0.01)
+ if time.monotonic() > time_stop:
+ return None
+
+ @staticmethod
+ def _read(vpp_instance, tries=3):
+ """Blockingly read within timeout, retry on early None.
+
+ For (sometimes) unknown reasons, VPP client in async mode likes
+ to return None occasionally before time runs out.
+ This function retries in that case.
+
+ Most of the time, early None means VPP crashed (see VPP-2033),
+ but is is better to give VPP more chances to respond without failure.
+
+ TODO: Perhaps CSIT now never triggers VPP-2033,
+ so investigate and remove this layer if even more speed is needed.
+
+ :param vpp_instance: Client instance to read from.
+ :param tries: Maximum number of tries to attempt.
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type tries: int
+ :returns: Message read or None if nothing got read even with retries.
+ :rtype: Optional[namedtuple]
+ """
+ timeout = vpp_instance.read_timeout
+ for _ in range(tries):
+ time_stop = time.monotonic() + 0.9 * timeout
+ reply = PapiSocketExecutor._read_internal(vpp_instance)
+ if reply is None and time.monotonic() < time_stop:
+ logger.trace("Early None. Retry?")
+ continue
+ return reply
+ logger.trace(f"Got {tries} early Nones, probably a real None.")
+ return None
+
+ @staticmethod
+ def _drain(vpp_instance, err_msg, timeout=30.0):
+ """Keep reading with until None or timeout.
+
+ This is needed to mitigate the risk of a state with unread responses
+ (e.g. after non-zero retval in the middle of get_replies)
+ causing failures in everything subsequent (until disconnect).
+
+ The reads are done without any waiting.
+
+ It is possible some responses have not arrived yet,
+ but that is unlikely as Python is usually slower than VPP.
+
+ :param vpp_instance: Client instance to read from.
+ :param err_msg: Error message to use when overstepping timeout.
+ :param timeout: How long to try before giving up.
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type err_msg: str
+ :type timeout: float
+ :raises RuntimeError: If read keeps returning nonzero after timeout.
+ """
+ time_stop = time.monotonic() + timeout
+ while time.monotonic() < time_stop:
+ if PapiSocketExecutor._read_internal(vpp_instance, 0.0) is None:
+ return
+ raise RuntimeError(f"{err_msg}\nTimed out while draining.")
+
+ def _execute(self, err_msg, do_async, single_reply=True):
"""Turn internal command list into data and execute; return replies.
This method also clears the internal command list.
- IMPORTANT!
- Do not use this method in L1 keywords. Use:
- - get_replies()
- - get_reply()
- - get_sw_if_index()
- - get_details()
-
:param err_msg: The message used if the PAPI command(s) execution fails.
+ :param do_async: If true, assume one reply per command and do not wait
+ for each reply before sending next request.
+ Dump commands (and calls causing VPP-2033) need False.
+ :param single_reply: For sync emulation mode (cannot be False
+ if do_async is True). When false use control ping.
+ When true, wait for a single reply.
:type err_msg: str
- :returns: Papi responses parsed into a dict-like object,
+ :type do_async: bool
+ :type single_reply: bool
+ :returns: Papi replies parsed into a dict-like object,
with fields due to API (possibly including retval).
- :rtype: list of dict
+ :rtype: NoneType or list of dict
:raises RuntimeError: If the replies are not all correct.
"""
- vpp_instance = self.get_connected_client()
local_list = self._api_command_list
# Clear first as execution may fail.
self._api_command_list = list()
- replies = list()
+ if do_async:
+ if not single_reply:
+ raise RuntimeError("Async papi needs one reply per request.")
+ return self._execute_async(local_list, err_msg=err_msg)
+ return self._execute_sync(
+ local_list, err_msg=err_msg, single_reply=single_reply
+ )
+
+ def _execute_sync(self, local_list, err_msg, single_reply):
+ """Execute commands waiting for replies one by one; return replies.
+
+ This implementation either expects a single response per request,
+ or uses control ping to emulate sync PAPI calls.
+ Reliable, but slow. Required for dumps. Needed for calls
+ which trigger VPP-2033.
+
+ CRC checking is done for the replies (requests are checked in .add).
+
+ :param local_list: The list of PAPI commands to be executed on the node.
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :param single_reply: When false use control ping.
+ When true, wait for a single reply.
+ :type local_list: list of dict
+ :type err_msg: str
+ :type single_reply: bool
+ :returns: Papi replies parsed into a dict-like object,
+ with fields due to API (possibly including retval).
+ :rtype: List[UserDict]
+ :raises AttributeError: If VPP does not know the command.
+ :raises RuntimeError: If the replies are not all correct.
+ """
+ vpp_instance = self.get_connected_client()
+ control_ping_fn = getattr(vpp_instance.api, "control_ping")
+ ret_list = list()
for command in local_list:
- api_name = command[u"api_name"]
+ api_name = command["api_name"]
papi_fn = getattr(vpp_instance.api, api_name)
+ replies = list()
try:
- try:
- reply = papi_fn(**command[u"api_args"])
- except (IOError, struct.error) as err:
- # Occasionally an error happens, try reconnect.
- logger.warn(f"Reconnect after error: {err!r}")
- vpp_instance.disconnect()
- # Testing shows immediate reconnect fails.
- time.sleep(1)
- vpp_instance.connect_sync(u"csit_socket")
- logger.trace(u"Reconnected.")
- reply = papi_fn(**command[u"api_args"])
+ # Send the command maybe followed by control ping.
+ main_context = papi_fn(**command["api_args"])
+ if single_reply:
+ replies.append(PapiSocketExecutor._read(vpp_instance))
+ else:
+ ping_context = control_ping_fn()
+ # Receive the replies.
+ while 1:
+ reply = PapiSocketExecutor._read(vpp_instance)
+ if reply is None:
+ raise RuntimeError(
+ f"{err_msg}\nSync PAPI timed out."
+ )
+ if reply.context == ping_context:
+ break
+ if reply.context != main_context:
+ raise RuntimeError(
+ f"{err_msg}\nUnexpected context: {reply!r}"
+ )
+ replies.append(reply)
except (AttributeError, IOError, struct.error) as err:
- raise AssertionError(err_msg) from err
- # *_dump commands return list of objects, convert, ordinary reply.
- if not isinstance(reply, list):
- reply = [reply]
- for item in reply:
- self.crc_checker.check_api_name(item.__class__.__name__)
- dict_item = dictize(item)
- if u"retval" in dict_item.keys():
- # *_details messages do not contain retval.
- retval = dict_item[u"retval"]
- if retval != exp_rv:
- # TODO: What exactly to log and raise here?
- raise AssertionError(
- f"Retval {retval!r} does not match expected "
- f"retval {exp_rv!r}"
- )
- replies.append(dict_item)
- return replies
+ # TODO: Add retry if it is still needed.
+ raise AssertionError(f"{err_msg}") from err
+ finally:
+ # Discard any unprocessed replies to avoid secondary failures.
+ PapiSocketExecutor._drain(vpp_instance, err_msg)
+ # Process replies for this command.
+ for reply in replies:
+ self.crc_checker.check_api_name(reply.__class__.__name__)
+ dictized_reply = dictize_and_check_retval(reply, err_msg)
+ ret_list.append(dictized_reply)
+ return ret_list
+
+ def _execute_async(self, local_list, err_msg):
+ """Read, process and return replies.
+
+ The messages were already sent by .add() in this mode,
+ local_list is used just so we know how many replies to read.
+
+ Beware: It is not clear what to do when socket read fails
+ in the middle of async processing.
+
+ The implementation assumes each command results in exactly one reply,
+ there is no reordering in either commands nor replies,
+ and context numbers increase one by one (and are matching for replies).
+
+ To speed processing up, reply CRC values are not checked.
+
+ The current implementation does not limit the number of messages
+ in-flight, we rely on VPP PAPI background thread to move replies
+ from socket to queue fast enough.
+
+ :param local_list: The list of PAPI commands to get replies for.
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :type local_list: list
+ :type err_msg: str
+ :returns: Papi replies parsed into a dict-like object, with fields
+ according to API (possibly including retval).
+ :rtype: List[UserDict]
+ :raises RuntimeError: If the replies are not all correct.
+ """
+ vpp_instance = self.get_connected_client()
+ ret_list = list()
+ try:
+ for index, _ in enumerate(local_list):
+ # Blocks up to timeout.
+ reply = PapiSocketExecutor._read(vpp_instance)
+ if reply is None:
+ time_msg = f"PAPI async timeout: idx {index}"
+ raise RuntimeError(f"{err_msg}\n{time_msg}")
+ ret_list.append(dictize_and_check_retval(reply, err_msg))
+ finally:
+ # Discard any unprocessed replies to avoid secondary failures.
+ PapiSocketExecutor._drain(vpp_instance, err_msg)
+ return ret_list
class Disconnector:
@@ -829,8 +1122,7 @@ class Disconnector:
"""
cls = PapiSocketExecutor
# Iterate over copy of entries so deletions do not mess with iterator.
- keys_copy = list(cls.conn_cache.keys())
- for key in keys_copy:
+ for key in list(cls.conn_cache.keys()):
cls.disconnect_by_key(key)
@@ -870,6 +1162,8 @@ class PapiExecutor:
is "stats".
- the second parameter must be 'path' as it is used by PapiExecutor
method 'add'.
+ - even if the parameter contains multiple paths, there is only one
+ reply item (for each .add).
"""
def __init__(self, node):
@@ -889,17 +1183,15 @@ class PapiExecutor:
def __enter__(self):
try:
self._ssh.connect(self._node)
- except IOError:
- raise RuntimeError(
- f"Cannot open SSH connection to host {self._node[u'host']} "
- f"to execute PAPI command(s)"
- )
+ except IOError as err:
+ msg = f"PAPI: Cannot open SSH connection to {self._node['host']}"
+ raise RuntimeError(msg) from err
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._ssh.disconnect(self._node)
- def add(self, csit_papi_command=u"vpp-stats", history=True, **kwargs):
+ def add(self, csit_papi_command="vpp-stats", history=True, **kwargs):
"""Add next command to internal command list; return self.
The argument name 'csit_papi_command' must be unique enough as it cannot
@@ -921,15 +1213,16 @@ class PapiExecutor:
self._node, csit_papi_command, **kwargs
)
self._api_command_list.append(
- dict(
- api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)
- )
+ dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))
)
return self
def get_stats(
- self, err_msg=u"Failed to get statistics.", timeout=120,
- socket=Constants.SOCKSTAT_PATH):
+ self,
+ err_msg="Failed to get statistics.",
+ timeout=120,
+ socket=Constants.SOCKSTAT_PATH,
+ ):
"""Get VPP Stats from VPP Python API.
:param err_msg: The message used if the PAPI command(s) execution fails.
@@ -941,12 +1234,15 @@ class PapiExecutor:
:returns: Requested VPP statistics.
:rtype: list of dict
"""
- paths = [cmd[u"api_args"][u"path"] for cmd in self._api_command_list]
+ paths = [cmd["api_args"]["path"] for cmd in self._api_command_list]
self._api_command_list = list()
stdout = self._execute_papi(
- paths, method=u"stats", err_msg=err_msg, timeout=timeout,
- socket=socket
+ paths,
+ method="stats",
+ err_msg=err_msg,
+ timeout=timeout,
+ socket=socket,
)
return json.loads(stdout)
@@ -986,19 +1282,16 @@ class PapiExecutor:
api_data_processed = list()
for api in api_d:
api_args_processed = dict()
- for a_k, a_v in api[u"api_args"].items():
+ for a_k, a_v in api["api_args"].items():
api_args_processed[str(a_k)] = process_value(a_v)
api_data_processed.append(
- dict(
- api_name=api[u"api_name"],
- api_args=api_args_processed
- )
+ dict(api_name=api["api_name"], api_args=api_args_processed)
)
return api_data_processed
def _execute_papi(
- self, api_data, method=u"request", err_msg=u"", timeout=120,
- socket=None):
+ self, api_data, method="request", err_msg="", timeout=120, socket=None
+ ):
"""Execute PAPI command(s) on remote node and store the result.
:param api_data: List of APIs with their arguments.
@@ -1017,15 +1310,19 @@ class PapiExecutor:
:raises AssertionError: If PAPI command(s) execution has failed.
"""
if not api_data:
- raise RuntimeError(u"No API data provided.")
+ raise RuntimeError("No API data provided.")
- json_data = json.dumps(api_data) \
- if method in (u"stats", u"stats_request") \
+ json_data = (
+ json.dumps(api_data)
+ if method in ("stats", "stats_request")
else json.dumps(self._process_api_data(api_data))
+ )
- sock = f" --socket {socket}" if socket else u""
- cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}" \
+ sock = f" --socket {socket}" if socket else ""
+ cmd = (
+ f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}"
f" --method {method} --data '{json_data}'{sock}"
+ )
try:
ret_code, stdout, _ = self._ssh.exec_command_sudo(
cmd=cmd, timeout=timeout, log_stdout_err=False
@@ -1033,14 +1330,14 @@ class PapiExecutor:
# TODO: Fail on non-empty stderr?
except SSHTimeout:
logger.error(
- f"PAPI command(s) execution timeout on host "
- f"{self._node[u'host']}:\n{api_data}"
+ f"PAPI command(s) execution timeout on host"
+ f" {self._node['host']}:\n{api_data}"
)
raise
except Exception as exc:
raise RuntimeError(
- f"PAPI command(s) execution on host {self._node[u'host']} "
- f"failed: {api_data}"
+ f"PAPI command(s) execution on host {self._node['host']}"
+ f" failed: {api_data}"
) from exc
if ret_code != 0:
raise AssertionError(err_msg)
diff --git a/resources/libraries/python/PapiHistory.py b/resources/libraries/python/PapiHistory.py
index cacfbd6b19..18b2774908 100644
--- a/resources/libraries/python/PapiHistory.py
+++ b/resources/libraries/python/PapiHistory.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -48,7 +48,7 @@ class PapiHistory:
PapiHistory.reset_papi_history(node)
@staticmethod
- def add_to_papi_history(node, csit_papi_command, papi=True, **kwargs):
+ def add_to_papi_history(node, csit_papi_command, **kwargs):
"""Add command to PAPI command history on DUT node.
Repr strings are used for argument values.
@@ -70,29 +70,17 @@ class PapiHistory:
VPP Stats:
vpp-stats(path=['^/if', '/err/ip4-input', '/sys/node/ip4-input'])
- VAT:
- sw_interface_set_flags sw_if_index 3 admin-up link-up
-
:param node: DUT node to add command to PAPI command history for.
:param csit_papi_command: Command to be added to PAPI command history.
- :param papi: Says if the command to store is PAPi or VAT. Remove when
- VAT executor is completely removed.
:param kwargs: Optional key-value arguments.
:type node: dict
:type csit_papi_command: str
- :type papi: bool
:type kwargs: dict
"""
- if papi:
- args = list()
- for key, val in kwargs.items():
- args.append(f"{key}={val!r}")
- item = f"{csit_papi_command}({u','.join(args)})"
- else:
- # This else part is here to store VAT commands.
- # VAT history is not used.
- # TODO: Remove when VatExecutor is completely removed.
- item = f"{csit_papi_command}"
+ args = list()
+ for key, val in kwargs.items():
+ args.append(f"{key}={val!r}")
+ item = f"{csit_papi_command}({u','.join(args)})"
DICT__DUTS_PAPI_HISTORY[node[u"host"]].append(item)
@staticmethod
@@ -120,4 +108,9 @@ class PapiHistory:
PapiHistory.show_papi_history(node)
-PapiHistory.reset_papi_history_on_all_duts(DICT__nodes)
+# This module can be imported outside usual Robot test context,
+# e.g. in pylint or by tools generating docs from docstrings.
+# For the tools to work, we need to avoid processing
+# when DICT__nodes value is not usable.
+if DICT__nodes:
+ PapiHistory.reset_papi_history_on_all_duts(DICT__nodes)
diff --git a/resources/libraries/python/PerfUtil.py b/resources/libraries/python/PerfUtil.py
deleted file mode 100644
index 6444cc595f..0000000000
--- a/resources/libraries/python/PerfUtil.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linux perf utility."""
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.OptionString import OptionString
-from resources.libraries.python.ssh import exec_cmd
-from resources.libraries.python.topology import NodeType
-
-__all__ = [u"PerfUtil"]
-
-
-class PerfUtil:
- """Class contains methods for perf utility."""
-
- @staticmethod
- def perf_stat(node, cpu_list=None, duration=1):
- """Get perf stat read for duration.
-
- :param node: Node in the topology.
- :param cpu_list: CPU List as a string separated by comma.
- :param duration: Measure time in seconds.
- :type node: dict
- :type cpu_list: str
- :type duration: int
- """
- if cpu_list:
- cpu_list = list(dict.fromkeys(cpu_list.split(u",")))
- cpu_list = ",".join(str(cpu) for cpu in cpu_list)
-
- cmd_opts = OptionString(prefix=u"--")
- cmd_opts.add(u"no-aggr")
- cmd_opts.add_with_value_if(
- u"cpu", cpu_list, cpu_list
- )
- cmd_opts.add_if(
- u"all-cpus", not(cpu_list)
- )
- cmd_opts.add_with_value_if(
- u"event", f"'{{{Constants.PERF_STAT_EVENTS}}}'",
- Constants.PERF_STAT_EVENTS
- )
- cmd_opts.add_with_value(
- u"interval-print", 1000
- )
- cmd_opts.add_with_value(
- u"field-separator", u"';'"
- )
-
- cmd_base = OptionString()
- cmd_base.add(f"perf stat")
- cmd_base.extend(cmd_opts)
- cmd_base.add(u"--")
- cmd_base.add_with_value(u"sleep", int(duration))
-
- exec_cmd(node, cmd_base, sudo=True)
-
- @staticmethod
- def perf_stat_on_all_duts(nodes, cpu_list=None, duration=1):
- """Get perf stat read for duration on all DUTs.
-
- :param nodes: Nodes in the topology.
- :param cpu_list: CPU List.
- :param duration: Measure time in seconds.
- :type nodes: dict
- :type cpu_list: str
- :type duration: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- PerfUtil.perf_stat(node, cpu_list=cpu_list, duration=duration)
diff --git a/resources/libraries/python/Policer.py b/resources/libraries/python/Policer.py
index 37b1c7f745..28ed0b0aa9 100644
--- a/resources/libraries/python/Policer.py
+++ b/resources/libraries/python/Policer.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -72,7 +72,7 @@ class Policer:
def policer_set_configuration(
node, policer_name, cir, eir, cbs, ebs, rate_type, round_type,
policer_type, conform_action_type, exceed_action_type,
- violate_action_type, color_aware, is_add=True, conform_dscp=None,
+ violate_action_type, color_aware, conform_dscp=None,
exceed_dscp=None, violate_dscp=None):
"""Configure policer on VPP node.
@@ -89,7 +89,6 @@ class Policer:
:param exceed_action_type: Exceed action type.
:param violate_action_type: Violate action type.
:param color_aware: Color-blind (cb) or color-aware (ca).
- :param is_add: Add policer if True, else delete.
:param conform_dscp: DSCP for conform mark_and_transmit action.
:param exceed_dscp: DSCP for exceed mark_and_transmit action.
:param violate_dscp: DSCP for vilate mark_and_transmit action.
@@ -106,7 +105,6 @@ class Policer:
:type exceed_action_type: str
:type violate_action_type: str
:type color_aware: str
- :type is_add: bool
:type conform_dscp: str
:type exceed_dscp: str
:type violate_dscp: str
@@ -130,10 +128,8 @@ class Policer:
else 0
)
- cmd = u"policer_add_del"
- args = dict(
- is_add=is_add,
- name=str(policer_name),
+ cmd = u"policer_add"
+ infos = dict(
cir=int(cir),
eir=int(eir),
cb=int(cbs),
@@ -148,6 +144,10 @@ class Policer:
violate_action=violate_action,
color_aware=bool(color_aware == u"'ca'")
)
+ args = dict(
+ name=str(policer_name),
+ infos=infos,
+ )
err_msg = f"Failed to configure policer {policer_name} " \
f"on host {node['host']}"
diff --git a/resources/libraries/python/QATUtil.py b/resources/libraries/python/QATUtil.py
new file mode 100644
index 0000000000..e16221fb30
--- /dev/null
+++ b/resources/libraries/python/QATUtil.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""QAT util library."""
+
+from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VPPUtil import VPPUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
+
+
+class QATUtil:
+ """Contains methods for setting up QATs."""
+
+ @staticmethod
+ def crypto_device_verify_on_all_duts(nodes):
+ """Verify if Crypto QAT device and its virtual functions are initialized
+ on all DUTs.
+
+ :param nodes: Nodes in the topology.
+ :type nodes: dict
+ """
+ VPPUtil.stop_vpp_service_on_all_duts(nodes)
+
+ for node in nodes.values():
+ if node["type"] == NodeType.DUT:
+ cryptodevs = Topology.get_cryptodev(node)
+ if not cryptodevs:
+ return
+ for device in cryptodevs.values():
+ QATUtil.crypto_device_init(node, device)
+
+ @staticmethod
+ def crypto_device_init(node, device):
+ """Init Crypto QAT device virtual functions on DUT.
+
+ :param node: DUT node.
+ :device: Crypto device entry from topology file.
+ :type node: dict
+ :type device: dict
+ """
+ DUTSetup.verify_kernel_module(node, device["module"], force_load=True)
+
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, device["pci_address"].replace(":", r"\:")
+ )
+ if current_driver is not None:
+ DUTSetup.pci_driver_unbind(node, device["pci_address"])
+ # Bind to kernel driver.
+ DUTSetup.pci_driver_bind(node, device["pci_address"], device["driver"])
+
+ cmd = f"adf_ctl status | grep {device['pci_address']} | "
+ cmd += "awk '{print $1}'"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed to check crypto device!"
+ )
+ if stdout.strip():
+ qat_dev = stdout.split("_")[-1]
+ conf_file = f"/etc/{device['driver']}_{qat_dev.strip()}.conf"
+ exec_cmd_no_error(
+ node, f"adf_ctl --config {conf_file} {stdout.strip()} restart",
+ sudo=True, message="Failed to restart crypto device!"
+ )
+ else:
+ raise ValueError("Crypto device error")
+
+ # Initialize QAT VFs.
+ if int(device["numvfs"]) > 0:
+ path = f"drivers/{device['driver']}"
+ DUTSetup.set_sriov_numvfs(
+ node, device["pci_address"], path=path,
+ numvfs=device["numvfs"]
+ )
+
+ if device["driver"] not in ["c4xxx"]:
+ for cvf in range(int(device["numvfs"])):
+ DUTSetup.pci_vf_driver_unbind(
+ node, device["pci_address"], cvf
+ )
+ DUTSetup.pci_vf_driver_bind(
+ node, device["pci_address"], cvf, "vfio-pci"
+ )
diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py
index 766372ad9c..259b4c6981 100644
--- a/resources/libraries/python/QemuManager.py
+++ b/resources/libraries/python/QemuManager.py
@@ -154,7 +154,8 @@ class QemuManager:
smp=len(self.machines_affinity[name]),
mem=4096,
vnf=kwargs[u"vnf"],
- img=Constants.QEMU_VM_KERNEL
+ img=Constants.QEMU_VM_KERNEL,
+ page_size=kwargs[u"page_size"]
)
self.machines[name].add_default_params()
self.machines[name].add_kernelvm_params()
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index 51fba6105e..2df89ee87c 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022-2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -42,7 +42,7 @@ class QemuUtils:
def __init__(
self, node, qemu_id=1, smp=1, mem=512, vnf=None,
- img=Constants.QEMU_VM_IMAGE):
+ img=Constants.QEMU_VM_IMAGE, page_size=u""):
"""Initialize QemuUtil class.
:param node: Node to run QEMU on.
@@ -51,12 +51,14 @@ class QemuUtils:
:param mem: Amount of memory.
:param vnf: Network function workload.
:param img: QEMU disk image or kernel image path.
+ :param page_size: Hugepage Size.
:type node: dict
:type qemu_id: int
:type smp: int
:type mem: int
:type vnf: str
:type img: str
+ :type page_size: str
"""
self._nic_id = 0
self._node = node
@@ -65,16 +67,13 @@ class QemuUtils:
# Architecture specific options
if self._arch == u"aarch64":
- dpdk_target = u"arm64-armv8a"
self._opt[u"machine_args"] = \
u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
self._opt[u"console"] = u"ttyAMA0"
else:
- dpdk_target = u"x86_64-native"
self._opt[u"machine_args"] = u"pc,accel=kvm,usb=off,mem-merge=off"
self._opt[u"console"] = u"ttyS0"
- self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \
- f"{dpdk_target}-linux-gcc/app"
+ self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/build/app"
self._vm_info = {
u"host": node[u"host"],
u"type": NodeType.VM,
@@ -94,6 +93,8 @@ class QemuUtils:
self._opt[u"smp"] = int(smp)
self._opt[u"img"] = img
self._opt[u"vnf"] = vnf
+ self._opt[u"page_size"] = page_size
+
# Temporary files.
self._temp = dict()
self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
@@ -118,6 +119,9 @@ class QemuUtils:
def add_default_params(self):
"""Set default QEMU command line parameters."""
+ mem_path = f"/dev/hugepages1G" \
+ if self._opt[u"page_size"] == u"1G" else u"/dev/hugepages"
+
self._params.add(u"daemonize")
self._params.add(u"nodefaults")
self._params.add_with_value(
@@ -136,11 +140,11 @@ class QemuUtils:
)
self._params.add_with_value(
u"object", f"memory-backend-file,id=mem,"
- f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on"
+ f"size={self._opt.get(u'mem')}M,"
+ f"mem-path={mem_path},share=on"
)
self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
self._params.add_with_value(u"numa", u"node,memdev=mem")
- self._params.add_with_value(u"balloon", u"none")
def add_net_user(self, net="10.0.2.0/24"):
"""Set managment port forwarding."""
@@ -206,6 +210,8 @@ class QemuUtils:
def add_kernelvm_params(self):
"""Set KernelVM QEMU parameters."""
+ hugepages = 3 if self._opt[u"page_size"] == u"1G" else 512
+
self._params.add_with_value(
u"serial", f"file:{self._temp.get(u'log')}"
)
@@ -224,7 +230,8 @@ class QemuUtils:
self._params.add_with_value(
u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
f"root=virtioroot console={self._opt.get(u'console')} "
- f"tsc=reliable hugepages=512 "
+ f"tsc=reliable hugepages={hugepages} "
+ f"hugepagesz={self._opt.get(u'page_size')} "
f"init={self._temp.get(u'ini')} fastboot'"
)
@@ -247,9 +254,11 @@ class QemuUtils:
:type virtio_feature_mask: int
"""
self._nic_id += 1
+ if jumbo_frames:
+ logger.debug(u"Jumbo frames temporarily disabled!")
self._params.add_with_value(
u"chardev", f"socket,id=char{self._nic_id},"
- f"path={socket}{u',server' if server is True else u''}"
+ f"path={socket}{u',server=on' if server is True else u''}"
)
self._params.add_with_value(
u"netdev", f"vhost-user,id=vhost{self._nic_id},"
@@ -315,9 +324,10 @@ class QemuUtils:
vpp_config.add_unix_exec(running)
vpp_config.add_socksvr()
vpp_config.add_main_heap_size(u"512M")
- vpp_config.add_main_heap_page_size(u"2M")
+ vpp_config.add_main_heap_page_size(self._opt[u"page_size"])
+ vpp_config.add_default_hugepage_size(self._opt[u"page_size"])
vpp_config.add_statseg_size(u"512M")
- vpp_config.add_statseg_page_size(u"2M")
+ vpp_config.add_statseg_page_size(self._opt[u"page_size"])
vpp_config.add_statseg_per_node_counters(u"on")
vpp_config.add_buffers_per_numa(107520)
vpp_config.add_cpu_main_core(u"0")
@@ -368,7 +378,7 @@ class QemuUtils:
eal_pci_whitelist0=u"0000:00:06.0",
eal_pci_whitelist1=u"0000:00:07.0",
eal_in_memory=True,
- pmd_num_mbufs=16384,
+ pmd_num_mbufs=32768,
pmd_fwd_mode=u"io",
pmd_nb_ports=u"2",
pmd_portmask=u"0x3",
@@ -395,7 +405,7 @@ class QemuUtils:
eal_pci_whitelist0=u"0000:00:06.0",
eal_pci_whitelist1=u"0000:00:07.0",
eal_in_memory=True,
- pmd_num_mbufs=16384,
+ pmd_num_mbufs=32768,
pmd_fwd_mode=u"mac",
pmd_nb_ports=u"2",
pmd_portmask=u"0x3",
@@ -595,7 +605,7 @@ class QemuUtils:
except AttributeError:
self._wait_default()
- def _wait_default(self, retries=60):
+ def _wait_default(self, retries=120):
"""Wait until QEMU with VPP is booted.
:param retries: Number of retries.
@@ -722,7 +732,9 @@ class QemuUtils:
message = f"QEMU: Start failed on {self._node[u'host']}!"
try:
DUTSetup.check_huge_page(
- self._node, u"/dev/hugepages", int(self._opt.get(u"mem")))
+ self._node, self._opt.get(u"mem-path"),
+ int(self._opt.get(u"mem"))
+ )
exec_cmd_no_error(
self._node, cmd_opts, timeout=300, sudo=True, message=message
diff --git a/resources/libraries/python/SRv6.py b/resources/libraries/python/SRv6.py
index 4ff8866bda..0170df5ef6 100644
--- a/resources/libraries/python/SRv6.py
+++ b/resources/libraries/python/SRv6.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -222,13 +222,14 @@ class SRv6:
:type sid_list: list
:type mode: str
"""
- cmd = u"sr_policy_add"
+ cmd = u"sr_policy_add_v2"
args = dict(
bsid_addr=IPv6Address(bsid).packed,
weight=1,
is_encap=bool(mode == u"encap"),
- is_spray=False,
- sids=SRv6.create_srv6_sid_list(sid_list)
+ type=0, # Neither SPRAY nor TEF are needed yet.
+ sids=SRv6.create_srv6_sid_list(sid_list),
+ # encap_src is optional, do not set yet.
)
err_msg = f"Failed to add SR policy for BindingSID {bsid} " \
f"on host {node[u'host']}"
@@ -243,7 +244,7 @@ class SRv6:
:param node: Given node to show SRv6 policies on.
:type node: dict
"""
- cmd = u"sr_policies_dump"
+ cmd = u"sr_policies_v2_dump"
PapiSocketExecutor.dump_and_log(node, (cmd,))
@staticmethod
diff --git a/resources/libraries/python/SchedUtils.py b/resources/libraries/python/SchedUtils.py
index bb14c29de8..cfc75344f9 100644
--- a/resources/libraries/python/SchedUtils.py
+++ b/resources/libraries/python/SchedUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py
index 45447e923b..95ca8a7d51 100644
--- a/resources/libraries/python/SetupFramework.py
+++ b/resources/libraries/python/SetupFramework.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -19,6 +19,7 @@ supposed to end up here.
from os import environ, remove
from tempfile import NamedTemporaryFile
import threading
+import traceback
from robot.api import logger
@@ -55,7 +56,8 @@ def pack_framework_dir():
run(
[
u"tar", u"--sparse", u"--exclude-vcs", u"--exclude=output*.xml",
- u"--exclude=./tmp", u"-zcf", file_name, u"."
+ u"--exclude=./tmp", u"--exclude=./env", u"--exclude=./.git",
+ u"-zcf", file_name, u"."
], msg=u"Could not pack testing framework"
)
@@ -104,7 +106,7 @@ def extract_tarball_at_node(tarball, node):
node, cmd,
message=f"Failed to extract {tarball} at node {node[u'type']} "
f"host {node[u'host']}, port {node[u'port']}",
- timeout=30, include_reason=True
+ timeout=600, include_reason=True
)
logger.console(
f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} "
@@ -115,9 +117,13 @@ def extract_tarball_at_node(tarball, node):
def create_env_directory_at_node(node):
"""Create fresh virtualenv to a directory, install pip requirements.
+ Return stdout and stderr of the command,
+ so we see which installs are behaving weird (e.g. attempting download).
+
:param node: Node to create virtualenv on.
:type node: dict
- :returns: nothing
+ :returns: Stdout and stderr.
+ :rtype: str, str
:raises RuntimeError: When failed to setup virtualenv.
"""
logger.console(
@@ -126,9 +132,10 @@ def create_env_directory_at_node(node):
)
cmd = f"cd {con.REMOTE_FW_DIR} && rm -rf env && virtualenv " \
f"-p $(which python3) --system-site-packages --never-download env " \
- f"&& source env/bin/activate && pip3 install -r requirements.txt"
- exec_cmd_no_error(
- node, cmd, timeout=100, include_reason=True,
+ f"&& source env/bin/activate && ANSIBLE_SKIP_CONFLICT_CHECK=1 " \
+ f"pip3 install -r requirements.txt"
+ stdout, stderr = exec_cmd_no_error(
+ node, cmd, timeout=300, include_reason=True,
message=f"Failed install at node {node[u'type']} host {node[u'host']}, "
f"port {node[u'port']}"
)
@@ -136,19 +143,22 @@ def create_env_directory_at_node(node):
f"Virtualenv setup on {node[u'type']} host {node[u'host']}, "
f"port {node[u'port']} done."
)
+ return stdout, stderr
-def setup_node(node, tarball, remote_tarball, results=None):
+def setup_node(node, tarball, remote_tarball, results=None, logs=None):
"""Copy a tarball to a node and extract it.
:param node: A node where the tarball will be copied and extracted.
:param tarball: Local path of tarball to be copied.
:param remote_tarball: Remote path of the tarball.
:param results: A list where to store the result of node setup, optional.
+ :param logs: A list where to store anything that should be logged.
:type node: dict
:type tarball: str
:type remote_tarball: str
:type results: list
+ :type logs: list
:returns: True - success, False - error
:rtype: bool
"""
@@ -156,12 +166,18 @@ def setup_node(node, tarball, remote_tarball, results=None):
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
if node[u"type"] == NodeType.TG:
- create_env_directory_at_node(node)
- except RuntimeError as exc:
- logger.console(
- f"Node {node[u'type']} host {node[u'host']}, port {node[u'port']} "
- f"setup failed, error: {exc!r}"
- )
+ stdout, stderr = create_env_directory_at_node(node)
+ if isinstance(logs, list):
+ logs.append(f"{node[u'host']} Env stdout: {stdout}")
+ logs.append(f"{node[u'host']} Env stderr: {stderr}")
+ except Exception:
+ # any exception must result in result = False
+ # since this runs in a thread and can't be caught anywhere else
+ err_msg = f"Node {node[u'type']} host {node[u'host']}, " \
+ f"port {node[u'port']} setup failed."
+ logger.console(err_msg)
+ if isinstance(logs, list):
+ logs.append(f"{err_msg} Exception: {traceback.format_exc()}")
result = False
else:
logger.console(
@@ -199,7 +215,7 @@ def delete_framework_dir(node):
node, f"sudo rm -rf {con.REMOTE_FW_DIR}",
message=f"Framework delete failed at node {node[u'type']} "
f"host {node[u'host']}, port {node[u'port']}",
- timeout=100, include_reason=True
+ timeout=100, include_reason=True,
)
logger.console(
f"Deleting framework directory on {node[u'type']} host {node[u'host']},"
@@ -207,23 +223,26 @@ def delete_framework_dir(node):
)
-def cleanup_node(node, results=None):
+def cleanup_node(node, results=None, logs=None):
"""Delete a tarball from a node.
:param node: A node where the tarball will be delete.
:param results: A list where to store the result of node cleanup, optional.
+ :param logs: A list where to store anything that should be logged.
:type node: dict
:type results: list
+ :type logs: list
:returns: True - success, False - error
:rtype: bool
"""
try:
delete_framework_dir(node)
- except RuntimeError:
- logger.error(
- f"Cleanup of node {node[u'type']} host {node[u'host']}, "
- f"port {node[u'port']} failed."
- )
+ except Exception:
+ err_msg = f"Cleanup of node {node[u'type']} host {node[u'host']}, " \
+ f"port {node[u'port']} failed."
+ logger.console(err_msg)
+ if isinstance(logs, list):
+ logs.append(f"{err_msg} Exception: {traceback.format_exc()}")
result = False
else:
logger.console(
@@ -240,9 +259,9 @@ def cleanup_node(node, results=None):
class SetupFramework:
"""Setup suite run on topology nodes.
- Many VAT/CLI based tests need the scripts at remote hosts before executing
- them. This class packs the whole testing directory and copies it over
- to all nodes in topology under /tmp/
+ Some tests need the scripts at remote hosts before executing them.
+ This class packs the whole testing directory and copies it over
+ to all nodes in topology under /tmp/.
"""
@staticmethod
@@ -261,16 +280,17 @@ class SetupFramework:
remote_tarball = f"{tarball}"
results = list()
+ logs = list()
threads = list()
for node in nodes.values():
- args = node, tarball, remote_tarball, results
+ args = node, tarball, remote_tarball, results, logs
thread = threading.Thread(target=setup_node, args=args)
thread.start()
threads.append(thread)
logger.info(
- f"Executing node setups in parallel, waiting for threads to end"
+ u"Executing node setups in parallel, waiting for threads to end."
)
for thread in threads:
@@ -278,6 +298,9 @@ class SetupFramework:
logger.info(f"Results: {results}")
+ for log in logs:
+ logger.trace(log)
+
delete_local_tarball(tarball)
if all(results):
logger.console(u"All nodes are ready.")
@@ -303,10 +326,12 @@ class CleanupFramework:
"""
results = list()
+ logs = list()
threads = list()
for node in nodes.values():
- thread = threading.Thread(target=cleanup_node, args=(node, results))
+ thread = threading.Thread(target=cleanup_node,
+ args=(node, results, logs))
thread.start()
threads.append(thread)
@@ -319,6 +344,9 @@ class CleanupFramework:
logger.info(f"Results: {results}")
+ for log in logs:
+ logger.trace(log)
+
if all(results):
logger.console(u"All nodes cleaned up.")
else:
diff --git a/resources/libraries/python/SysctlUtil.py b/resources/libraries/python/SysctlUtil.py
index f8c169a833..6c5e9e2081 100644
--- a/resources/libraries/python/SysctlUtil.py
+++ b/resources/libraries/python/SysctlUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/TGSetup.py b/resources/libraries/python/TGSetup.py
index e105921e23..7b3cee4098 100644
--- a/resources/libraries/python/TGSetup.py
+++ b/resources/libraries/python/TGSetup.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/TRexConfigGenerator.py b/resources/libraries/python/TRexConfigGenerator.py
new file mode 100644
index 0000000000..c50b42610c
--- /dev/null
+++ b/resources/libraries/python/TRexConfigGenerator.py
@@ -0,0 +1,301 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TRex Configuration File Generator library."""
+
+import re
+import yaml
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import NodeType, NodeSubTypeTG
+from resources.libraries.python.topology import Topology
+
+
+__all__ = ["TrexConfigGenerator", "TrexConfig"]
+
+def pci_dev_check(pci_dev):
+ """Check if provided PCI address is in correct format.
+
+ :param pci_dev: PCI address (expected format: xxxx:xx:xx.x).
+ :type pci_dev: str
+ :returns: True if PCI address is in correct format.
+ :rtype: bool
+ :raises ValueError: If PCI address is in incorrect format.
+ """
+ pattern = re.compile(
+ r"^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:[0-9A-Fa-f]{2}\.[0-9A-Fa-f]$"
+ )
+ if not re.match(pattern, pci_dev):
+ raise ValueError(
+ f"PCI address {pci_dev} is not in valid format xxxx:xx:xx.x"
+ )
+ return True
+
+
+class TrexConfigGenerator:
+ """TRex Startup Configuration File Generator."""
+
+ def __init__(self):
+ """Initialize library.
+ """
+ self._node = ""
+ self._node_key = ""
+ self._node_config = dict()
+ self._node_serialized_config = ""
+ self._startup_configuration_path = "/etc/trex_cfg.yaml"
+
+ def set_node(self, node, node_key=None):
+ """Set topology node.
+
+ :param node: Node to store configuration on.
+ :param node_key: Topology node key.
+ :type node: dict
+ :type node_key: str
+ :raises RuntimeError: If Node type is not TG and subtype is not TREX.
+ """
+ if node.get("type") is None:
+ msg = "Node type is not defined!"
+ elif node["type"] != NodeType.TG:
+ msg = f"Node type is {node['type']!r}, not a TG!"
+ elif node.get("subtype") is None:
+ msg = "TG subtype is not defined"
+ elif node["subtype"] != NodeSubTypeTG.TREX:
+ msg = f"TG subtype {node['subtype']!r} is not supported"
+ else:
+ self._node = node
+ self._node_key = node_key
+ return
+ raise RuntimeError(msg)
+
+ def get_serialized_config(self):
+ """Get serialized startup configuration in YAML format.
+
+ :returns: Startup configuration in YAML format.
+ :rtype: str
+ """
+ self.serialize_config(self._node_config)
+ return self._node_serialized_config
+
+ def serialize_config(self, obj):
+ """Serialize the startup configuration in YAML format.
+
+ :param obj: Python Object to print.
+ :type obj: Obj
+ """
+ self._node_serialized_config = yaml.dump([obj], default_style=None)
+
+ def add_config_item(self, config, value, path):
+ """Add startup configuration item.
+
+ :param config: Startup configuration of node.
+ :param value: Value to insert.
+ :param path: Path where to insert item.
+ :type config: dict
+ :type value: str
+ :type path: list
+ """
+ if len(path) == 1:
+ config[path[0]] = value
+ return
+ if path[0] not in config:
+ config[path[0]] = dict()
+ elif isinstance(config[path[0]], str):
+ config[path[0]] = dict() if config[path[0]] == "" \
+ else {config[path[0]]: ""}
+ self.add_config_item(config[path[0]], value, path[1:])
+
+ def add_version(self, value=2):
+ """Add config file version.
+
+ :param value: Version of configuration file.
+ :type value: int
+ """
+ path = ["version"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_c(self, value):
+ """Add core count.
+
+ :param value: Core count.
+ :type value: int
+ """
+ path = ["c"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_limit_memory(self, value):
+ """Add memory limit.
+
+ :param value: Memory limit.
+ :type value: str
+ """
+ path = ["limit_memory"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_interfaces(self, devices):
+ """Add PCI device configuration.
+
+ :param devices: PCI device(s) (format xxxx:xx:xx.x).
+ :type devices: list(str)
+ """
+ for device in devices:
+ pci_dev_check(device)
+
+ path = ["interfaces"]
+ self.add_config_item(self._node_config, devices, path)
+
+ def add_rx_desc(self, value):
+ """Add RX descriptors.
+
+ :param value: RX descriptors count.
+ :type value: int
+ """
+ path = ["rx_desc"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_tx_desc(self, value):
+ """Add TX descriptors.
+
+ :param value: TX descriptors count.
+ :type value: int
+ """
+ path = ["tx_desc"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_port_info(self, value):
+ """Add port information configuration.
+
+ :param value: Port information configuration.
+ :type value: list(dict)
+ """
+ path = ["port_info"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_master_thread_id(self, value):
+ """Add platform master thread ID.
+
+ :param value: Master thread ID.
+ :type value: int
+ """
+ path = ["platform", "master_thread_id"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_latency_thread_id(self, value):
+ """Add platform latency thread ID.
+
+ :param value: Latency thread ID.
+ :type value: int
+ """
+ path = ["platform", "latency_thread_id"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_dual_if(self, value):
+ """Add platform dual interface configuration.
+
+ :param value: Dual interface configuration.
+ :type value: list(dict)
+ """
+ path = ["platform", "dual_if"]
+ self.add_config_item(self._node_config, value, path)
+
+ def write_config(self, path=None):
+ """Generate and write TRex startup configuration to file.
+
+ :param path: Override startup configuration path.
+ :type path: str
+ """
+ self.serialize_config(self._node_config)
+
+ if path is None:
+ path = self._startup_configuration_path
+
+ command = f"echo \"{self._node_serialized_config}\" | sudo tee {path}"
+ message = "Writing TRex startup configuration failed!"
+ exec_cmd_no_error(self._node, command, message=message)
+
+
+class TrexConfig:
+ """TRex Configuration Class.
+ """
+ @staticmethod
+ def add_startup_configuration(node, tg_topology):
+ """Apply TRex startup configuration.
+
+ :param node: TRex node in the topology.
+ :param tg_topology: Ordered TRex links.
+ :type node: dict
+ :type tg_topology: list(dict)
+ """
+ pci_addresses = list()
+ dual_if = list()
+ port_info = list()
+ master_thread_id = None
+ latency_thread_id = None
+ cores = None
+ sockets = list()
+
+ for idx, link in enumerate(tg_topology):
+ pci_addresses.append(
+ Topology().get_interface_pci_addr(node, link["interface"])
+ )
+ if len(tg_topology) > 2:
+ # Multiple dual_ifs must not share the cores.
+ tg_dtc = Constants.TREX_CORE_COUNT_MULTI
+ tg_dtc_offset = Constants.TREX_CORE_COUNT_MULTI * (idx // 2)
+ else:
+ # Single dual_if can share cores.
+ tg_dtc = Constants.TREX_CORE_COUNT
+ tg_dtc_offset = 0
+ master_thread_id, latency_thread_id, socket, threads = \
+ CpuUtils.get_affinity_trex(
+ node, link["interface"], tg_dtc=tg_dtc,
+ tg_dtc_offset=tg_dtc_offset
+ )
+ dual_if.append(dict(socket=socket, threads=threads))
+ cores = len(threads)
+ port_info.append(
+ dict(
+ src_mac=Topology().get_interface_mac(
+ node, link["interface"]
+ ),
+ dest_mac=link["dst_mac"]
+ )
+ )
+ sockets.append(socket)
+
+ limit_memory = f"{Constants.TREX_LIMIT_MEMORY}"
+ if len(tg_topology) <= 2 and 0 in sockets and 1 in sockets:
+ limit_memory = (
+ f"{Constants.TREX_LIMIT_MEMORY},{Constants.TREX_LIMIT_MEMORY}"
+ )
+ if len(tg_topology) > 2:
+ limit_memory = (
+ f"{Constants.TREX_LIMIT_MEMORY_MULTI}"
+ )
+
+ trex_config = TrexConfigGenerator()
+ trex_config.set_node(node)
+ trex_config.add_version()
+ trex_config.add_interfaces(pci_addresses)
+ trex_config.add_c(cores)
+ trex_config.add_limit_memory(limit_memory)
+ trex_config.add_port_info(port_info)
+ if Constants.TREX_RX_DESCRIPTORS_COUNT != 0:
+ trex_config.add_rx_desc(Constants.TREX_RX_DESCRIPTORS_COUNT)
+ if Constants.TREX_TX_DESCRIPTORS_COUNT != 0:
+ trex_config.add_rx_desc(Constants.TREX_TX_DESCRIPTORS_COUNT)
+ trex_config.add_platform_master_thread_id(int(master_thread_id))
+ trex_config.add_platform_latency_thread_id(int(latency_thread_id))
+ trex_config.add_platform_dual_if(dual_if)
+ trex_config.write_config()
diff --git a/resources/libraries/python/Tap.py b/resources/libraries/python/Tap.py
index c729d602b1..7380344b72 100644
--- a/resources/libraries/python/Tap.py
+++ b/resources/libraries/python/Tap.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -64,7 +64,7 @@ class Tap:
:returns: Returns a interface index.
:rtype: int
"""
- cmd = u"tap_create_v2"
+ cmd = u"tap_create_v3"
args = dict(
id=Constants.BITWISE_NON_ZERO,
use_random_mac=bool(mac is None),
@@ -210,7 +210,7 @@ class TapFeatureMask:
@staticmethod
def is_feature_enabled(tap_feature_mask, tap_feature_flag):
"""Checks if concrete tap feature is enabled within
- tap_feature_mask
+ tap_feature_mask
:param tap_feature_mask: Mask of enabled tap features
:param tap_feature_flag: Checked tap feature
:type tap_feature_mask: int
diff --git a/resources/libraries/python/TelemetryUtil.py b/resources/libraries/python/TelemetryUtil.py
new file mode 100644
index 0000000000..63d0bf60a7
--- /dev/null
+++ b/resources/libraries/python/TelemetryUtil.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Telemetry utility."""
+
+from resources.libraries.python.model.ExportResult import append_telemetry
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import NodeType
+
+__all__ = ["TelemetryUtil"]
+
+
+class TelemetryUtil:
+ """Class contains methods for telemetry utility."""
+
+ @staticmethod
+ def _run_telemetry(
+ node, profile, sid=None, spath=None, rate="", export=False):
+ """Get telemetry read on node.
+
+ :param node: Node in the topology.
+ :param profile: Telemetry configuration profile.
+ :param sid: Socket ID used to describe recipient side of socket.
+ :param spath: Socket path.
+ :param rate: Telemetry load, unique within the test (optional).
+ :param export: If false, do not attempt JSON export (default false).
+ :type node: dict
+ :type profile: str
+ :type sid: str
+ :type spath: str
+ :type rate: str
+ :type export: bool
+ """
+ config = ""
+ config += f"{Constants.REMOTE_FW_DIR}/"
+ config += f"{Constants.RESOURCES_TPL_TELEMETRY}/"
+ config += f"{profile}"
+
+ cd_cmd = ""
+ cd_cmd += f"sh -c \"cd {Constants.REMOTE_FW_DIR}/"
+ cd_cmd += f"{Constants.RESOURCES_TOOLS}"
+
+ if spath:
+ bin_cmd = f"python3 -m telemetry --config {config} --hook {spath}\""
+ else:
+ bin_cmd = f"python3 -m telemetry --config {config}\""
+ exec_cmd_no_error(node, f"{cd_cmd} && {bin_cmd}", sudo=True)
+
+ if not export:
+ return
+
+ hostname = exec_cmd_no_error(node, "hostname")[0].strip()
+ stdout, _ = exec_cmd_no_error(
+ node, "cat /tmp/metric.prom", sudo=True, log_stdout_err=False
+ )
+ prefix = "{"
+ prefix += f"hostname=\"{hostname}\","
+ if sid:
+ prefix += f"hook=\"{sid}\","
+ prefix += f"rate=\"{rate}\","
+ for line in stdout.splitlines():
+ if line and not line.startswith("#"):
+ append_telemetry(
+ prefix.join(line.rsplit("{", 1)).replace("\"", "'")
+ )
+
+ def run_telemetry_on_all_duts(self, nodes, profile, rate="", export=False):
+ """Get telemetry read on all DUTs.
+
+ :param nodes: Nodes in the topology.
+ :param profile: Telemetry configuration profile.
+ :param rate: Telemetry load, unique within the test (optional).
+ :param export: If false, do not attempt JSON export (default false).
+ :type nodes: dict
+ :type profile: str
+ :type rate: str
+ :type export: bool
+ """
+ for node in nodes.values():
+ if node["type"] == NodeType.DUT:
+ try:
+ for sid, spath in node["sockets"]["CLI"].items():
+ self._run_telemetry(
+ node, profile=profile, sid=sid, spath=spath,
+ rate=rate, export=export
+ )
+ except IndexError:
+ pass
diff --git a/resources/libraries/python/TestConfig.py b/resources/libraries/python/TestConfig.py
index 9e104e2098..eb093a4651 100644
--- a/resources/libraries/python/TestConfig.py
+++ b/resources/libraries/python/TestConfig.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -23,7 +23,6 @@ from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.IPUtil import IPUtil
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
-from resources.libraries.python.VatExecutor import VatExecutor
class TestConfig:
@@ -117,38 +116,6 @@ class TestConfig:
src_ip_start = ip_address(src_ip_start)
dst_ip_start = ip_address(dst_ip_start)
- if vxlan_count > 10:
- commands = list()
- for i in range(0, vxlan_count):
- try:
- src_ip = src_ip_start + i * ip_step
- dst_ip = dst_ip_start + i * ip_step
- except AddressValueError:
- logger.warn(
- u"Can't do more iterations - IP address limit "
- u"has been reached."
- )
- vxlan_count = i
- break
- commands.append(
- f"sw_interface_add_del_address sw_if_index "
- f"{Topology.get_interface_sw_index(node, node_vxlan_if)} "
- f"{src_ip}/{128 if src_ip.version == 6 else 32}\n"
- )
- commands.append(
- f"vxlan_add_del_tunnel src {src_ip} dst {dst_ip} "
- f"vni {vni_start + i}\n"
- )
- commands.append(
- f"create_vlan_subif sw_if_index "
- f"{Topology.get_interface_sw_index(node, node_vlan_if)} "
- f"vlan {i + 1}\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/create_vxlan_interfaces.config", commands
- )
- return vxlan_count
-
cmd1 = u"sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, node_vxlan_if),
@@ -156,7 +123,7 @@ class TestConfig:
del_all=False,
prefix=None
)
- cmd2 = u"vxlan_add_del_tunnel"
+ cmd2 = u"vxlan_add_del_tunnel_v3"
args2 = dict(
is_add=True,
instance=Constants.BITWISE_NON_ZERO,
@@ -174,7 +141,7 @@ class TestConfig:
vlan_id=None
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
try:
src_ip = src_ip_start + i * ip_step
@@ -198,9 +165,9 @@ class TestConfig:
args2[u"vni"] = int(vni_start) + i
args3[u"vlan_id"] = i + 1
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd1, history=history, **args1).\
- add(cmd2, history=history, **args2).\
- add(cmd3, history=history, **args3)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
+ papi_exec.add(cmd3, history=history, **args3)
papi_exec.get_replies()
return vxlan_count
@@ -220,50 +187,6 @@ class TestConfig:
:type node_vlan_if: str
"""
if_data = InterfaceUtil.vpp_get_interface_data(node)
- if vxlan_count > 10:
- commands = list()
- for i in range(0, vxlan_count):
- vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel")
- vxlan_subif_name = f"vxlan_tunnel{i}"
- founds = dict(vxlan=False, vlan=False)
- vxlan_subif_idx = None
- vlan_subif_key = Topology.add_new_port(node, u"vlan_subif")
- vlan_subif_name = \
- f"{Topology.get_interface_name(node, node_vlan_if)}.{i + 1}"
- vlan_idx = None
- for data in if_data:
- if_name = data[u"interface_name"]
- if not founds[u"vxlan"] and if_name == vxlan_subif_name:
- vxlan_subif_idx = data[u"sw_if_index"]
- founds[u"vxlan"] = True
- elif not founds[u"vlan"] and if_name == vlan_subif_name:
- vlan_idx = data[u"sw_if_index"]
- founds[u"vlan"] = True
- if founds[u"vxlan"] and founds[u"vlan"]:
- break
- Topology.update_interface_sw_if_index(
- node, vxlan_subif_key, vxlan_subif_idx)
- Topology.update_interface_name(
- node, vxlan_subif_key, vxlan_subif_name)
- commands.append(
- f"sw_interface_set_flags sw_if_index {vxlan_subif_idx} "
- f"admin-up link-up\n"
- )
- Topology.update_interface_sw_if_index(
- node, vlan_subif_key, vlan_idx
- )
- Topology.update_interface_name(
- node, vlan_subif_key, vlan_subif_name
- )
- commands.append(
- f"sw_interface_set_flags sw_if_index {vlan_idx} admin-up "
- f"link-up\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/put_subinterfaces_up.config", commands
- )
- return
-
cmd = u"sw_interface_set_flags"
args1 = dict(
sw_if_index=None,
@@ -274,7 +197,7 @@ class TestConfig:
flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel")
vxlan_subif_name = f"vxlan_tunnel{i}"
@@ -310,9 +233,8 @@ class TestConfig:
)
args2[u"sw_if_index"] = vlan_idx
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd, history=history, **args1). \
- add(cmd, history=history, **args2)
- papi_exec.add(cmd, **args1).add(cmd, **args2)
+ papi_exec.add(cmd, history=history, **args1)
+ papi_exec.add(cmd, history=history, **args2)
papi_exec.get_replies()
@staticmethod
@@ -344,43 +266,6 @@ class TestConfig:
"""
dst_ip_start = ip_address(dst_ip_start)
- if vxlan_count > 1:
- idx_vxlan_if = Topology.get_interface_sw_index(node, node_vxlan_if)
- commands = list()
- for i in range(0, vxlan_count):
- dst_ip = dst_ip_start + i * ip_step
- commands.append(
- f"exec ip neighbor "
- f"{Topology.get_interface_name(node, node_vxlan_if)} "
- f"{dst_ip} "
- f"{Topology.get_interface_mac(op_node, op_node_if)} static "
- f"\n"
- )
- commands.append(
- f"ip_route_add_del "
- f"{dst_ip}/{128 if dst_ip.version == 6 else 32} count 1 "
- f"via {dst_ip} sw_if_index {idx_vxlan_if}\n"
- )
- sw_idx_vxlan = Topology.get_interface_sw_index(
- node, f"vxlan_tunnel{i + 1}"
- )
- commands.append(
- f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vxlan} "
- f"bd_id {bd_id_start + i} shg 0 enable\n"
- )
- sw_idx_vlan = Topology.get_interface_sw_index(
- node, f"vlan_subif{i + 1}"
- )
- commands.append(
- f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vlan} "
- f"bd_id {bd_id_start + i} shg 0 enable\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/configure_routes_and_bridge_domains.config",
- commands
- )
- return
-
cmd1 = u"ip_neighbor_add_del"
neighbor = dict(
sw_if_index=Topology.get_interface_sw_index(node, node_vxlan_if),
@@ -422,7 +307,7 @@ class TestConfig:
enable=1
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
args1[u"neighbor"][u"ip_address"] = \
str(dst_ip_start + i * ip_step)
@@ -439,8 +324,9 @@ class TestConfig:
)
args4[u"bd_id"] = int(bd_id_start+i)
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd1, history=history, **args1). \
- add(cmd2, history=history, **args2). \
- add(cmd3, history=history, **args3). \
- add(cmd3, history=history, **args4)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
+ papi_exec.add(cmd3, history=history, **args3)
+ # Yes, args4 go with cmd3, there is no cmd4.
+ papi_exec.add(cmd3, history=history, **args4)
papi_exec.get_replies()
diff --git a/resources/libraries/python/Trace.py b/resources/libraries/python/Trace.py
index 9c02286b0f..f82ab95f2e 100644
--- a/resources/libraries/python/Trace.py
+++ b/resources/libraries/python/Trace.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -37,12 +37,21 @@ class Trace:
@staticmethod
def clear_packet_trace_on_all_duts(nodes):
- """Clear VPP packet trace.
+ """Clear VPP packet trace on all duts.
:param nodes: Nodes where the packet trace will be cleared.
:type nodes: dict
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- PapiSocketExecutor.run_cli_cmd_on_all_sockets(
- node, u"clear trace")
+ Trace.clear_packet_trace_on_dut(node)
+
+ @staticmethod
+ def clear_packet_trace_on_dut(node):
+ """Clear VPP packet trace on dut.
+
+ :param node: Node where the packet trace will be cleared.
+ :type node: dict
+ """
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"clear trace")
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py
index 23337b2848..936cb3a06d 100644
--- a/resources/libraries/python/TrafficGenerator.py
+++ b/resources/libraries/python/TrafficGenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,23 +13,28 @@
"""Performance testing traffic generator library."""
+import math
import time
+from typing import Callable, List, Optional, Union
+
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from .Constants import Constants
-from .CpuUtils import CpuUtils
from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+ AbstractMeasurer, Config, GoalResult, MeasurementResult,
+ MultipleLossRatioSearch, SearchGoal,
+)
from .PLRsearch.PLRsearch import PLRsearch
from .OptionString import OptionString
from .ssh import exec_cmd_no_error, exec_cmd
from .topology import NodeType
from .topology import NodeSubTypeTG
from .topology import Topology
+from .TRexConfigGenerator import TrexConfig
+from .DUTSetup import DUTSetup as DS
__all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
@@ -127,22 +132,18 @@ class TrexMode:
STL = u"STL"
-# TODO: Pylint says too-many-instance-attributes.
class TrafficGenerator(AbstractMeasurer):
"""Traffic Generator."""
- # TODO: Remove "trex" from lines which could work with other TGs.
-
# Use one instance of TrafficGenerator for all tests in test suite
ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
def __init__(self):
- # TODO: Separate into few dataclasses/dicts.
- # Pylint dislikes large unstructured state, and it is right.
self._node = None
self._mode = None
# TG interface order mapping
self._ifaces_reordered = False
+ self._ifaces = []
# Result holding fields, to be removed.
self._result = None
self._loss = None
@@ -171,9 +172,13 @@ class TrafficGenerator(AbstractMeasurer):
self.sleep_till_duration = None
self.transaction_type = None
self.duration_limit = None
+ self.ramp_up_start = None
+ self.ramp_up_stop = None
+ self.ramp_up_rate = None
+ self.ramp_up_duration = None
+ self.state_timeout = None
# Transient data needed for async measurements.
- self._xstats = (None, None)
- # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
+ self._xstats = []
@property
def node(self):
@@ -246,101 +251,111 @@ class TrafficGenerator(AbstractMeasurer):
f"{self._node[u'subtype']} not running in {expected_mode} mode!"
)
- # TODO: pylint says disable=too-many-locals.
- def initialize_traffic_generator(
- self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
- tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
- tg_if2_dst_mac=None):
- """TG initialization.
+ @staticmethod
+ def get_tg_type(tg_node):
+ """Log and return the installed traffic generator type.
- TODO: Document why do we need (and how do we use) _ifaces_reordered.
+ :param tg_node: Node from topology file.
+ :type tg_node: dict
+ :returns: Traffic generator type string.
+ :rtype: str
+ :raises RuntimeError: If command returns nonzero return code.
+ """
+ return str(check_subtype(tg_node))
- :param tg_node: Traffic generator node.
- :param tg_if1: TG - name of first interface.
- :param tg_if2: TG - name of second interface.
- :param tg_if1_adj_node: TG if1 adjecent node.
- :param tg_if1_adj_if: TG if1 adjecent interface.
- :param tg_if2_adj_node: TG if2 adjecent node.
- :param tg_if2_adj_if: TG if2 adjecent interface.
- :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
- :param tg_if1_dst_mac: Interface 1 destination MAC address.
- :param tg_if2_dst_mac: Interface 2 destination MAC address.
+ @staticmethod
+ def get_tg_version(tg_node):
+ """Log and return the installed traffic generator version.
+
+ :param tg_node: Node from topology file.
:type tg_node: dict
- :type tg_if1: str
- :type tg_if2: str
- :type tg_if1_adj_node: dict
- :type tg_if1_adj_if: str
- :type tg_if2_adj_node: dict
- :type tg_if2_adj_if: str
- :type osi_layer: str
- :type tg_if1_dst_mac: str
- :type tg_if2_dst_mac: str
- :returns: nothing
- :raises RuntimeError: In case of issue during initialization.
+ :returns: Traffic generator version string.
+ :rtype: str
+ :raises RuntimeError: If command returns nonzero return code.
"""
subtype = check_subtype(tg_node)
if subtype == NodeSubTypeTG.TREX:
- self._node = tg_node
- self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
- if1 = dict()
- if2 = dict()
- if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
- if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
- if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
- if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
-
- if osi_layer == u"L2":
- if1[u"adj_addr"] = if2[u"addr"]
- if2[u"adj_addr"] = if1[u"addr"]
- elif osi_layer in (u"L3", u"L7"):
- if1[u"adj_addr"] = Topology().get_interface_mac(
- tg_if1_adj_node, tg_if1_adj_if
- )
- if2[u"adj_addr"] = Topology().get_interface_mac(
- tg_if2_adj_node, tg_if2_adj_if
- )
- else:
- raise ValueError(u"Unknown OSI layer!")
+ command = f"cat {Constants.TREX_INSTALL_DIR}/VERSION"
+ message = u"Get T-Rex version failed!"
+ stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
+ return stdout.strip()
+ return "none"
- # in case of switched environment we can override MAC addresses
- if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
- if1[u"adj_addr"] = tg_if1_dst_mac
- if2[u"adj_addr"] = tg_if2_dst_mac
+ def initialize_traffic_generator(self, osi_layer, pfs=2):
+ """TG initialization.
- if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
- if1, if2 = if2, if1
- self._ifaces_reordered = True
+ :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
+ :param pfs: Number of physical interfaces to configure.
+ :type osi_layer: str
+ :type pfs: int
+ :raises ValueError: If OSI layer is unknown.
+ """
+ if osi_layer not in ("L2", "L3", "L7"):
+ raise ValueError("Unknown OSI layer!")
- master_thread_id, latency_thread_id, socket, threads = \
- CpuUtils.get_affinity_trex(
- self._node, tg_if1, tg_if2,
- tg_dtc=Constants.TREX_CORE_COUNT)
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ self._node = topology["TG"]
+ subtype = check_subtype(self._node)
- if osi_layer in (u"L2", u"L3", u"L7"):
- exec_cmd_no_error(
- self._node,
- f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
- f"- version: 2\n"
- f" c: {len(threads)}\n"
- f" limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
- f" interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
- f" port_info:\n"
- f" - dest_mac: \'{if1[u'adj_addr']}\'\n"
- f" src_mac: \'{if1[u'addr']}\'\n"
- f" - dest_mac: \'{if2[u'adj_addr']}\'\n"
- f" src_mac: \'{if2[u'addr']}\'\n"
- f" platform :\n"
- f" master_thread_id: {master_thread_id}\n"
- f" latency_thread_id: {latency_thread_id}\n"
- f" dual_if:\n"
- f" - socket: {socket}\n"
- f" threads: {threads}\n"
- f"EOF'",
- sudo=True, message=u"T-Rex config generation!"
- )
- else:
- raise ValueError(u"Unknown OSI layer!")
+ if subtype == NodeSubTypeTG.TREX:
+ trex_topology = list()
+ self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
+
+ for link in range(1, pfs, 2):
+ tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
+ tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
+ if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+ ifl = BuiltIn().get_variable_value("${int}")
+ last = topology["duts_count"]
+ tg_if1_adj_addr = Topology().get_interface_mac(
+ topology["DUT1"],
+ BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{link}}}[0]"
+ )
+ )
+ tg_if2_adj_addr = Topology().get_interface_mac(
+ topology[f"DUT{last}"],
+ BuiltIn().get_variable_value(
+ f"${{DUT{last}_{ifl}{link+1}}}[0]"
+ )
+ )
+
+ if1_pci = topology[f"TG_pf{link}_pci"][0]
+ if2_pci = topology[f"TG_pf{link+1}_pci"][0]
+ if min(if1_pci, if2_pci) != if1_pci:
+ self._ifaces.append(str(link))
+ self._ifaces.append(str(link-1))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
+ )
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ else:
+ self._ifaces.append(str(link-1))
+ self._ifaces.append(str(link))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
+ )
+ TrexConfig.add_startup_configuration(
+ self._node, trex_topology
+ )
TrafficGenerator.startup_trex(
self._node, osi_layer, subtype=subtype
)
@@ -363,32 +378,41 @@ class TrafficGenerator(AbstractMeasurer):
if subtype == NodeSubTypeTG.TREX:
for _ in range(0, 3):
# Kill TRex only if it is already running.
- cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
+ cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
exec_cmd_no_error(
- tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
+ tg_node, cmd, sudo=True, message="Kill TRex failed!"
)
- # Configure TRex.
- ports = ''
- for port in tg_node[u"interfaces"].values():
- if u'Mellanox' not in port.get(u'model'):
- ports += f" {port.get(u'pci_address')}"
-
- cmd = f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && " \
- f"./dpdk_nic_bind.py -u {ports} || true\""
- exec_cmd_no_error(
- tg_node, cmd, sudo=True,
- message=u"Unbind PCI ports from driver failed!"
- )
+ # Prepare interfaces for TRex.
+ tg_port_drv = Constants.TREX_PORT_DRIVER
+ mlx_driver = ""
+ for port in tg_node["interfaces"].values():
+ if "Mellanox" in port.get("model"):
+ mlx_driver = port.get("driver")
+ pci_addr = port.get("pci_address")
+ cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+ if cur_driver == mlx_driver:
+ pass
+ elif not cur_driver:
+ DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+ else:
+ DS.pci_driver_unbind(tg_node, pci_addr)
+ DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+ else:
+ pci_addr = port.get("pci_address")
+ cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+ if cur_driver:
+ DS.pci_driver_unbind(tg_node, pci_addr)
+ DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
# Start TRex.
cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
- trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
- trex_cmd.add(u"-i")
- trex_cmd.add(u"--prefix $(hostname)")
- trex_cmd.add(u"--hdrh")
- trex_cmd.add(u"--no-scapy-server")
- trex_cmd.add_if(u"--astf", osi_layer == u"L7")
+ trex_cmd = OptionString(["nohup", "./t-rex-64"])
+ trex_cmd.add("-i")
+ trex_cmd.add("--prefix $(hostname)")
+ trex_cmd.add("--hdrh")
+ trex_cmd.add("--no-scapy-server")
+ trex_cmd.add_if("--astf", osi_layer == "L7")
# OptionString does not create double space if extra is empty.
trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
@@ -396,34 +420,33 @@ class TrafficGenerator(AbstractMeasurer):
try:
exec_cmd_no_error(tg_node, cmd, sudo=True)
except RuntimeError:
- cmd = u"sh -c \"cat /tmp/trex.log\""
+ cmd = "sh -c \"cat /tmp/trex.log\""
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"Get TRex logs failed!"
+ message="Get TRex logs failed!"
)
- raise RuntimeError(u"Start TRex failed!")
+ raise RuntimeError("Start TRex failed!")
# Test T-Rex API responsiveness.
- cmd = u"python3"
- cmd += f" {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
- if osi_layer in (u"L2", u"L3"):
- cmd += f"trex_stl_assert.py"
- elif osi_layer == u"L7":
- cmd += f"trex_astf_assert.py"
+ cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
+ if osi_layer in ("L2", "L3"):
+ cmd += "trex_stl_assert.py"
+ elif osi_layer == "L7":
+ cmd += "trex_astf_assert.py"
else:
- raise ValueError(u"Unknown OSI layer!")
+ raise ValueError("Unknown OSI layer!")
try:
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"T-Rex API is not responding!", retries=20
+ message="T-Rex API is not responding!", retries=20
)
except RuntimeError:
continue
return
# After max retries TRex is still not responding to API critical
# error occurred.
- exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
- raise RuntimeError(u"Start T-Rex failed after multiple retries!")
+ exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
+ raise RuntimeError("Start T-Rex failed after multiple retries!")
@staticmethod
def is_trex_running(node):
@@ -434,7 +457,7 @@ class TrafficGenerator(AbstractMeasurer):
:returns: True if T-Rex is running otherwise False.
:rtype: bool
"""
- ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
+ ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
return bool(int(ret) == 0)
@staticmethod
@@ -467,17 +490,17 @@ class TrafficGenerator(AbstractMeasurer):
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_astf_stop.py'")
- command_line.change_prefix(u"--")
- for index, value in enumerate(self._xstats):
+ command_line.add("--xstat")
+ for value in self._xstats:
if value is not None:
- value = value.replace(u"'", u"\"")
- command_line.add_equals(f"xstat{index}", f"'{value}'")
+ value = value.replace("'", "\"")
+ command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex ASTF runtime error!"
+ message="T-Rex ASTF runtime error!"
)
self._parse_traffic_results(stdout)
@@ -491,17 +514,17 @@ class TrafficGenerator(AbstractMeasurer):
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_stop.py'")
- command_line.change_prefix(u"--")
- for index, value in enumerate(self._xstats):
+ command_line.add("--xstat")
+ for value in self._xstats:
if value is not None:
- value = value.replace(u"'", u"\"")
- command_line.add_equals(f"xstat{index}", f"'{value}'")
+ value = value.replace("'", "\"")
+ command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex STL runtime error!"
+ message="T-Rex STL runtime error!"
)
self._parse_traffic_results(stdout)
@@ -509,7 +532,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Stop all traffic on TG.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -523,7 +546,42 @@ class TrafficGenerator(AbstractMeasurer):
raise ValueError(u"Unsupported T-Rex traffic profile!")
self._stop_time = time.monotonic()
- return self.get_measurement_result()
+ return self._get_measurement_result()
+
+ def _compute_duration(self, duration, multiplier):
+ """Compute duration for profile driver.
+
+ The final result is influenced by transaction scale and duration limit.
+ It is assumed a higher level function has already set those on self.
+ The duration argument is the target value from search point of view,
+ before the overrides are applied here.
+
+ Minus one (signalling async traffic start) is kept.
+
+ Completeness flag is also included. Duration limited or async trials
+ are not considered complete for ramp-up purposes.
+
+ :param duration: Time expressed in seconds for how long to send traffic.
+ :param multiplier: Traffic rate in transactions per second.
+ :type duration: float
+ :type multiplier: float
+ :returns: New duration and whether it was a complete ramp-up candidate.
+ :rtype: float, bool
+ """
+ if duration < 0.0:
+ # Keep the async -1.
+ return duration, False
+ computed_duration = duration
+ if self.transaction_scale:
+ computed_duration = self.transaction_scale / multiplier
+ # Log the computed duration,
+ # so we can compare with what telemetry suggests
+ # the real duration was.
+ logger.debug(f"Expected duration {computed_duration}")
+ if not self.duration_limit:
+ return computed_duration, True
+ limited_duration = min(computed_duration, self.duration_limit)
+ return limited_duration, (limited_duration == computed_duration)
def trex_astf_start_remote_exec(
self, duration, multiplier, async_call=False):
@@ -571,19 +629,7 @@ class TrafficGenerator(AbstractMeasurer):
if not isinstance(duration, (float, int)):
duration = float(duration)
- # Duration logic.
- computed_duration = duration
- if duration > 0.0:
- if self.transaction_scale:
- computed_duration = self.transaction_scale / multiplier
- # Log the computed duration,
- # so we can compare with what telemetry suggests
- # the real duration was.
- logger.debug(f"Expected duration {computed_duration}")
- computed_duration += 0.1115
- # Else keep -1.
- if self.duration_limit:
- computed_duration = min(computed_duration, self.duration_limit)
+ computed_duration, _ = self._compute_duration(duration, multiplier)
command_line = OptionString().add(u"python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
@@ -595,6 +641,9 @@ class TrafficGenerator(AbstractMeasurer):
)
command_line.add_with_value(u"duration", f"{computed_duration!r}")
command_line.add_with_value(u"frame_size", self.frame_size)
+ command_line.add_with_value(
+ u"n_data_frames", Constants.ASTF_N_DATA_FRAMES
+ )
command_line.add_with_value(u"multiplier", multiplier)
command_line.add_with_value(u"port_0", p_0)
command_line.add_with_value(u"port_1", p_1)
@@ -604,6 +653,9 @@ class TrafficGenerator(AbstractMeasurer):
command_line.add_if(u"async_start", async_call)
command_line.add_if(u"latency", self.use_latency)
command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+ command_line.add_with_value(
+ u"delay", Constants.PERF_TRIAL_ASTF_DELAY
+ )
self._start_time = time.monotonic()
self._rate = multiplier
@@ -620,7 +672,7 @@ class TrafficGenerator(AbstractMeasurer):
self._sent = None
self._loss = None
self._latency = None
- xstats = [None, None]
+ xstats = []
self._l7_data = dict()
self._l7_data[u"client"] = dict()
self._l7_data[u"client"][u"active_flows"] = None
@@ -653,10 +705,8 @@ class TrafficGenerator(AbstractMeasurer):
index = 0
for line in stdout.splitlines():
if f"Xstats snapshot {index}: " in line:
- xstats[index] = line[19:]
+ xstats.append(line[19:])
index += 1
- if index == 2:
- break
self._xstats = tuple(xstats)
else:
self._target_duration = duration
@@ -684,38 +734,36 @@ class TrafficGenerator(AbstractMeasurer):
:raises RuntimeError: In case of T-Rex driver issue.
"""
self.check_mode(TrexMode.STL)
- p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
if not isinstance(duration, (float, int)):
duration = float(duration)
- if self.duration_limit:
- duration = min(duration, self.duration_limit)
- command_line = OptionString().add(u"python3")
+ duration, _ = self._compute_duration(duration=duration, multiplier=rate)
+
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_profile.py'")
- command_line.change_prefix(u"--")
+ command_line.change_prefix("--")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
command_line.add_with_value(
- u"profile", f"'{dirname}/{self.traffic_profile}.py'"
+ "profile", f"'{dirname}/{self.traffic_profile}.py'"
)
- command_line.add_with_value(u"duration", f"{duration!r}")
- command_line.add_with_value(u"frame_size", self.frame_size)
- command_line.add_with_value(u"rate", f"{rate!r}")
- command_line.add_with_value(u"port_0", p_0)
- command_line.add_with_value(u"port_1", p_1)
+ command_line.add_with_value("duration", f"{duration!r}")
+ command_line.add_with_value("frame_size", self.frame_size)
+ command_line.add_with_value("rate", f"{rate!r}")
+ command_line.add_with_value("ports", " ".join(self._ifaces))
command_line.add_with_value(
- u"traffic_directions", self.traffic_directions
+ "traffic_directions", self.traffic_directions
)
- command_line.add_if(u"async_start", async_call)
- command_line.add_if(u"latency", self.use_latency)
- command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
+ command_line.add_if("async_start", async_call)
+ command_line.add_if("latency", self.use_latency)
+ command_line.add_if("force", Constants.TREX_SEND_FORCE)
+ command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
- # TODO: This is ugly. Handle parsing better.
self._start_time = time.monotonic()
- self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
+ self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
stdout, _ = exec_cmd_no_error(
self._node, command_line, timeout=int(duration) + 60,
- message=u"T-Rex STL runtime error"
+ message="T-Rex STL runtime error"
)
if async_call:
@@ -727,14 +775,12 @@ class TrafficGenerator(AbstractMeasurer):
self._loss = None
self._latency = None
- xstats = [None, None]
+ xstats = []
index = 0
for line in stdout.splitlines():
if f"Xstats snapshot {index}: " in line:
- xstats[index] = line[19:]
+ xstats.append(line[19:])
index += 1
- if index == 2:
- break
self._xstats = tuple(xstats)
else:
self._target_duration = duration
@@ -755,6 +801,10 @@ class TrafficGenerator(AbstractMeasurer):
transaction_type=u"packet",
duration_limit=0.0,
use_latency=False,
+ ramp_up_rate=None,
+ ramp_up_duration=None,
+ state_timeout=240.0,
+ ramp_up_only=False,
):
"""Send traffic from all configured interfaces on TG.
@@ -775,6 +825,8 @@ class TrafficGenerator(AbstractMeasurer):
Bidirectional STL profiles are treated as transactions with two packets.
+ The return value is None for async.
+
:param duration: Duration of test traffic generation in seconds.
:param rate: Traffic rate in transactions per second.
:param frame_size: Frame size (L2) in Bytes.
@@ -797,6 +849,10 @@ class TrafficGenerator(AbstractMeasurer):
duration.
:param use_latency: Whether to measure latency during the trial.
Default: False.
+ :param ramp_up_rate: Rate to use in ramp-up trials [pps].
+ :param ramp_up_duration: Duration of ramp-up trials [s].
+ :param state_timeout: Time of life of DUT state [s].
+ :param ramp_up_only: If true, do not perform main trial measurement.
:type duration: float
:type rate: float
:type frame_size: str
@@ -809,8 +865,12 @@ class TrafficGenerator(AbstractMeasurer):
:type transaction_type: str
:type duration_limit: float
:type use_latency: bool
+ :type ramp_up_rate: float
+ :type ramp_up_duration: float
+ :type state_timeout: float
+ :type ramp_up_only: bool
:returns: TG results.
- :rtype: str
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
self.set_rate_provider_defaults(
@@ -823,10 +883,19 @@ class TrafficGenerator(AbstractMeasurer):
transaction_type=transaction_type,
duration_limit=duration_limit,
use_latency=use_latency,
+ ramp_up_rate=ramp_up_rate,
+ ramp_up_duration=ramp_up_duration,
+ state_timeout=state_timeout,
+ )
+ return self._send_traffic_on_tg_with_ramp_up(
+ duration=duration,
+ rate=rate,
+ async_call=async_call,
+ ramp_up_only=ramp_up_only,
)
- self._send_traffic_on_tg_internal(duration, rate, async_call)
- def _send_traffic_on_tg_internal(self, duration, rate, async_call=False):
+ def _send_traffic_on_tg_internal(
+ self, duration, rate, async_call=False):
"""Send traffic from all configured interfaces on TG.
This is an internal function, it assumes set_rate_provider_defaults
@@ -838,6 +907,9 @@ class TrafficGenerator(AbstractMeasurer):
need to specify their own values, and we do not want the measure call
to overwrite them with defaults.
+ This function is used both for automated ramp-up trials
+ and for explicitly called trials.
+
:param duration: Duration of test traffic generation in seconds.
:param rate: Traffic rate in transactions per second.
:param async_call: Async mode.
@@ -845,7 +917,7 @@ class TrafficGenerator(AbstractMeasurer):
:type rate: float
:type async_call: bool
:returns: TG results.
- :rtype: str
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -856,14 +928,108 @@ class TrafficGenerator(AbstractMeasurer):
)
elif u"trex-stl" in self.traffic_profile:
unit_rate_str = str(rate) + u"pps"
- # TODO: Suport transaction_scale et al?
self.trex_stl_start_remote_exec(
duration, unit_rate_str, async_call
)
else:
raise ValueError(u"Unsupported T-Rex traffic profile!")
- return self._result
+ return None if async_call else self._get_measurement_result()
+
+ def _send_traffic_on_tg_with_ramp_up(
+ self, duration, rate, async_call=False, ramp_up_only=False):
+ """Send traffic from all interfaces on TG, maybe after ramp-up.
+
+ This is an internal function, it assumes set_rate_provider_defaults
+ has been called to remember most values.
+ The reason why need to remember various values is that
+ the traffic can be asynchronous, and parsing needs those values.
+ The reason why this is a separate function from the one
+ which calls set_rate_provider_defaults is that some search algorithms
+ need to specify their own values, and we do not want the measure call
+ to overwrite them with defaults.
+
+ If ramp-up tracking is detected, a computation is performed,
+ and if state timeout is near, trial at ramp-up rate and duration
+ is inserted before the main trial measurement.
+
+ The ramp_up_only parameter forces a ramp-up without immediate
+ trial measurement, which is useful in case self remembers
+ a previous ramp-up trial that belongs to a different test (phase).
+
+ Return None if trial is async or ramp-up only.
+
+ :param duration: Duration of test traffic generation in seconds.
+ :param rate: Traffic rate in transactions per second.
+ :param async_call: Async mode.
+ :param ramp_up_only: If true, do not perform main trial measurement.
+ :type duration: float
+ :type rate: float
+ :type async_call: bool
+ :type ramp_up_only: bool
+ :returns: TG results.
+ :rtype: MeasurementResult or None
+ :raises ValueError: If TG traffic profile is not supported.
+ """
+ complete = False
+ if self.ramp_up_rate:
+ # Figure out whether we need to insert a ramp-up trial.
+ if ramp_up_only or self.ramp_up_start is None:
+ # We never ramped up yet (at least not in this test case).
+ ramp_up_needed = True
+ else:
+ # We ramped up before, but maybe it was too long ago.
+ # Adding a constant overhead to be safe.
+ time_now = time.monotonic() + 1.0
+ computed_duration, complete = self._compute_duration(
+ duration=duration,
+ multiplier=rate,
+ )
+ # There are two conditions for inserting ramp-up.
+ # If early sessions are expiring already,
+ # or if late sessions are to expire before measurement is over.
+ ramp_up_start_delay = time_now - self.ramp_up_start
+ ramp_up_stop_delay = time_now - self.ramp_up_stop
+ ramp_up_stop_delay += computed_duration
+ bigger_delay = max(ramp_up_start_delay, ramp_up_stop_delay)
+ # Final boolean decision.
+ ramp_up_needed = (bigger_delay >= self.state_timeout)
+ if ramp_up_needed:
+ logger.debug(
+ u"State may time out during next real trial, "
+ u"inserting a ramp-up trial."
+ )
+ self.ramp_up_start = time.monotonic()
+ self._send_traffic_on_tg_internal(
+ duration=self.ramp_up_duration,
+ rate=self.ramp_up_rate,
+ async_call=async_call,
+ )
+ self.ramp_up_stop = time.monotonic()
+ logger.debug(u"Ramp-up done.")
+ else:
+ logger.debug(
+ u"State will probably not time out during next real trial, "
+ u"no ramp-up trial needed just yet."
+ )
+ if ramp_up_only:
+ return None
+ trial_start = time.monotonic()
+ result = self._send_traffic_on_tg_internal(
+ duration=duration,
+ rate=rate,
+ async_call=async_call,
+ )
+ trial_end = time.monotonic()
+ if self.ramp_up_rate:
+ # Optimization: No loss acts as a good ramp-up, if it was complete.
+ if complete and result is not None and result.loss_ratio == 0.0:
+ logger.debug(u"Good trial acts as a ramp-up")
+ self.ramp_up_start = trial_start
+ self.ramp_up_stop = trial_end
+ else:
+ logger.debug(u"Loss or incomplete, does not act as a ramp-up.")
+ return result
def no_traffic_loss_occurred(self):
"""Fail if loss occurred in traffic run.
@@ -879,14 +1045,12 @@ class TrafficGenerator(AbstractMeasurer):
def fail_if_no_traffic_forwarded(self):
"""Fail if no traffic forwarded.
- TODO: Check number of passed transactions instead.
-
:returns: nothing
:raises Exception: If no traffic forwarded.
"""
if self._received is None:
raise RuntimeError(u"The traffic generation has not been issued")
- if self._received == u"0":
+ if self._received == 0:
raise RuntimeError(u"No traffic forwarded")
def partial_traffic_loss_accepted(
@@ -1030,22 +1194,21 @@ class TrafficGenerator(AbstractMeasurer):
self._l7_data[u"server"][u"tcp"][u"rx_bytes"] = \
int(self._result.get(u"server_tcp_rx_bytes", 0))
- def get_measurement_result(self):
- """Return the result of last measurement as ReceiveRateMeasurement.
+ def _get_measurement_result(self):
+ """Return the result of last measurement as MeasurementResult.
Separate function, as measurements can end either by time
or by explicit call, this is the common block at the end.
- The target_tr field of ReceiveRateMeasurement is in
+ The intended_load field of MeasurementResult is in
transactions per second. Transmit count and loss count units
depend on the transaction type. Usually they are in transactions
- per second, or aggregate packets per second.
-
- TODO: Fail on running or already reported measurement.
+ per second, or aggregated packets per second.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
"""
+ duration_with_overheads = time.monotonic() - self._start_time
try:
# Client duration seems to include a setup period
# where TRex does not send any packets yet.
@@ -1072,18 +1235,29 @@ class TrafficGenerator(AbstractMeasurer):
if not target_duration:
target_duration = approximated_duration
transmit_rate = self._rate
+ unsent = 0
if self.transaction_type == u"packet":
partial_attempt_count = self._sent
- expected_attempt_count = self._sent
- fail_count = self._loss
+ packet_rate = transmit_rate * self.ppta
+ # We have a float. TRex way of rounding it is not obvious.
+ # The biggest source of mismatch is Inter Stream Gap.
+ # So the code tolerates 10 usec of missing packets.
+ expected_attempt_count = (target_duration - 1e-5) * packet_rate
+ expected_attempt_count = math.ceil(expected_attempt_count)
+ # TRex can send more.
+ expected_attempt_count = max(expected_attempt_count, self._sent)
+ unsent = expected_attempt_count - self._sent
+ pass_count = self._received
+ loss_count = self._loss
elif self.transaction_type == u"udp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_cps.")
partial_attempt_count = self._l7_data[u"client"][u"sent"]
# We do not care whether TG is slow, it should have attempted all.
expected_attempt_count = self.transaction_scale
+ unsent = expected_attempt_count - partial_attempt_count
pass_count = self._l7_data[u"client"][u"received"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"tcp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_cps.")
@@ -1091,17 +1265,19 @@ class TrafficGenerator(AbstractMeasurer):
partial_attempt_count = ctca
# We do not care whether TG is slow, it should have attempted all.
expected_attempt_count = self.transaction_scale
+ unsent = expected_attempt_count - partial_attempt_count
# From TCP point of view, server/connects counts full connections,
# but we are testing NAT session so client/connects counts that
# (half connections from TCP point of view).
pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"udp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_pps.")
partial_attempt_count = self._sent
expected_attempt_count = self.transaction_scale * self.ppta
- fail_count = self._loss + (expected_attempt_count - self._sent)
+ unsent = expected_attempt_count - self._sent
+ loss_count = self._loss
elif self.transaction_type == u"tcp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1114,26 +1290,31 @@ class TrafficGenerator(AbstractMeasurer):
# A simple workaround is to add absolute difference.
# Probability of retransmissions exactly cancelling
# packets unsent due to duration stretching is quite low.
- fail_count = self._loss + abs(expected_attempt_count - self._sent)
+ unsent = abs(expected_attempt_count - self._sent)
+ loss_count = self._loss
else:
raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
- if fail_count < 0 and not self.negative_loss:
- fail_count = 0
- measurement = ReceiveRateMeasurement(
- duration=target_duration,
- target_tr=transmit_rate,
- transmit_count=expected_attempt_count,
- loss_count=fail_count,
- approximated_duration=approximated_duration,
- partial_transmit_count=partial_attempt_count,
+ if unsent and isinstance(self._approximated_duration, float):
+ # Do not report unsent for "manual".
+ logger.debug(f"Unsent packets/transactions: {unsent}")
+ if loss_count < 0 and not self.negative_loss:
+ loss_count = 0
+ measurement = MeasurementResult(
+ intended_duration=target_duration,
+ intended_load=transmit_rate,
+ offered_count=partial_attempt_count,
+ loss_count=loss_count,
+ offered_duration=approximated_duration,
+ duration_with_overheads=duration_with_overheads,
+ intended_count=expected_attempt_count,
)
measurement.latency = self.get_latency_int()
return measurement
- def measure(self, duration, transmit_rate):
+ def measure(self, intended_duration, intended_load):
"""Run trial measurement, parse and return results.
- The input rate is for transactions. Stateles bidirectional traffic
+ The intended load is for transactions. Stateles bidirectional traffic
is understood as sequence of (asynchronous) transactions,
two packets each.
@@ -1141,36 +1322,32 @@ class TrafficGenerator(AbstractMeasurer):
the count either transactions or packets (aggregated over directions).
Optionally, this method sleeps if measurement finished before
- the time specified as duration.
+ the time specified as intended_duration (PLRsearch needs time for math).
- :param duration: Trial duration [s].
- :param transmit_rate: Target rate in transactions per second.
- :type duration: float
- :type transmit_rate: float
+ :param intended_duration: Trial duration [s].
+ :param intended_load: Target rate in transactions per second.
+ :type intended_duration: float
+ :type intended_load: float
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises RuntimeError: If TG is not set or if node is not TG
or if subtype is not specified.
:raises NotImplementedError: If TG is not supported.
"""
- duration = float(duration)
+ intended_duration = float(intended_duration)
time_start = time.monotonic()
- time_stop = time_start + duration
+ time_stop = time_start + intended_duration
if self.resetter:
self.resetter()
- self._send_traffic_on_tg_internal(
- duration=duration,
- rate=transmit_rate,
+ result = self._send_traffic_on_tg_with_ramp_up(
+ duration=intended_duration,
+ rate=intended_load,
async_call=False,
)
- result = self.get_measurement_result()
logger.debug(f"trial measurement result: {result!r}")
# In PLRsearch, computation needs the specified time to complete.
if self.sleep_till_duration:
- sleeptime = time_stop - time.monotonic()
- if sleeptime > 0.0:
- # TODO: Sometimes we have time to do additional trials here,
- # adapt PLRsearch to accept all the results.
+ while (sleeptime := time_stop - time.monotonic()) > 0.0:
time.sleep(sleeptime)
return result
@@ -1188,6 +1365,9 @@ class TrafficGenerator(AbstractMeasurer):
negative_loss=True,
sleep_till_duration=False,
use_latency=False,
+ ramp_up_rate=None,
+ ramp_up_duration=None,
+ state_timeout=240.0,
):
"""Store values accessed by measure().
@@ -1208,7 +1388,6 @@ class TrafficGenerator(AbstractMeasurer):
:param transaction_type: An identifier specifying which counters
and formulas to use when computing attempted and failed
transactions. Default: "packet".
- TODO: Does this also specify parsing for the measured duration?
:param duration_limit: Zero or maximum limit for computed (or given)
duration.
:param negative_loss: If false, negative loss is reported as zero loss.
@@ -1216,6 +1395,9 @@ class TrafficGenerator(AbstractMeasurer):
sleep until it matches duration. Needed for PLRsearch.
:param use_latency: Whether to measure latency during the trial.
Default: False.
+ :param ramp_up_rate: Rate to use in ramp-up trials [pps].
+ :param ramp_up_duration: Duration of ramp-up trials [s].
+ :param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
:type ppta: int
@@ -1228,11 +1410,14 @@ class TrafficGenerator(AbstractMeasurer):
:type negative_loss: bool
:type sleep_till_duration: bool
:type use_latency: bool
+ :type ramp_up_rate: float
+ :type ramp_up_duration: float
+ :type state_timeout: float
"""
self.frame_size = frame_size
self.traffic_profile = str(traffic_profile)
self.resetter = resetter
- self.ppta = ppta
+ self.ppta = int(ppta)
self.traffic_directions = int(traffic_directions)
self.transaction_duration = float(transaction_duration)
self.transaction_scale = int(transaction_scale)
@@ -1241,62 +1426,68 @@ class TrafficGenerator(AbstractMeasurer):
self.negative_loss = bool(negative_loss)
self.sleep_till_duration = bool(sleep_till_duration)
self.use_latency = bool(use_latency)
+ self.ramp_up_rate = float(ramp_up_rate)
+ self.ramp_up_duration = float(ramp_up_duration)
+ self.state_timeout = float(state_timeout)
class OptimizedSearch:
"""Class to be imported as Robot Library, containing search keywords.
Aside of setting up measurer and forwarding arguments,
- the main business is to translate min/max rate from unidir to aggregate.
+ the main business is to translate min/max rate from unidir to aggregated.
"""
@staticmethod
- def perform_optimized_ndrpdr_search(
- frame_size,
- traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
- packet_loss_ratio=0.005,
- final_relative_width=0.005,
- final_trial_duration=30.0,
- initial_trial_duration=1.0,
- number_of_intermediate_phases=2,
- timeout=720.0,
- doublings=1,
- ppta=1,
- resetter=None,
- traffic_directions=2,
- transaction_duration=0.0,
- transaction_scale=0,
- transaction_type=u"packet",
- use_latency=False,
- ):
+ def perform_mlr_search(
+ frame_size: Union[int, str],
+ traffic_profile: str,
+ min_load: float,
+ max_load: float,
+ loss_ratio: float = 0.005,
+ relative_width: float = 0.005,
+ initial_trial_duration: float = 1.0,
+ final_trial_duration: float = 1.0,
+ duration_sum: float = 21.0,
+ expansion_coefficient: int = 2,
+ preceding_targets: int = 2,
+ search_duration_max: float = 1200.0,
+ ppta: int = 1,
+ resetter: Optional[Callable[[], None]] = None,
+ traffic_directions: int = 2,
+ transaction_duration: float = 0.0,
+ transaction_scale: int = 0,
+ transaction_type: str = "packet",
+ use_latency: bool = False,
+ ramp_up_rate: float = 0.0,
+ ramp_up_duration: float = 0.0,
+ state_timeout: float = 240.0,
+ ) -> List[GoalResult]:
"""Setup initialized TG, perform optimized search, return intervals.
- If transaction_scale is nonzero, all non-init trial durations
- are set to 2.0 (as they do not affect the real trial duration)
+ If transaction_scale is nonzero, all init and non-init trial durations
+ are set to 1.0 (as they do not affect the real trial duration)
and zero intermediate phases are used.
- The initial phase still uses 1.0 seconds, to force remeasurement.
- That makes initial phase act as a warmup.
+ This way no re-measurement happens.
+ Warmup has to be handled via resetter or ramp-up mechanisms.
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
- :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
- :param final_relative_width: Final lower bound transmit rate
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
+ :param loss_ratio: Ratio of packets lost, for PDR [1].
+ :param relative_width: Final lower bound intended load
cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
:param initial_trial_duration: Trial duration for the initial phase
and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param duration_sum: Max sum of duration for deciding [s].
+ :param expansion_coefficient: In external search multiply width by this.
+ :param preceding_targets: Number of intermediate phases
to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
- :param doublings: How many doublings to do in external search step.
- Default 1 is suitable for fairly stable tests,
- less stable tests might get better overal duration with 2 or more.
+ :param search_duration_max: The search will fail itself
+ when not finished before this overall time [s].
:param ppta: Packets per transaction, aggregated over directions.
Needed for udp_pps which does not have a good transaction counter,
so we need to compute expected number of packets.
@@ -1312,17 +1503,21 @@ class OptimizedSearch:
transactions. Default: "packet".
:param use_latency: Whether to measure latency during the trial.
Default: False.
+ :param ramp_up_rate: Rate to use in ramp-up trials [pps].
+ :param ramp_up_duration: Duration of ramp-up trials [s].
+ :param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- :type packet_loss_ratio: float
- :type final_relative_width: float
- :type final_trial_duration: float
+ :type min_load: float
+ :type max_load: float
+ :type loss_ratio: float
+ :type relative_width: float
:type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
- :type doublings: int
+ :type final_trial_duration: float
+ :type duration_sum: float
+ :type expansion_coefficient: int
+ :type preceding_targets: int
+ :type search_duration_max: float
:type ppta: int
:type resetter: Optional[Callable[[], None]]
:type traffic_directions: int
@@ -1330,10 +1525,15 @@ class OptimizedSearch:
:type transaction_scale: int
:type transaction_type: str
:type use_latency: bool
- :returns: Structure containing narrowed down NDR and PDR intervals
- and their measurements.
- :rtype: NdrPdrResult
- :raises RuntimeError: If total duration is larger than timeout.
+ :type ramp_up_rate: float
+ :type ramp_up_duration: float
+ :type state_timeout: float
+ :returns: Goal result (based on unidirectional tps) for each goal.
+ The result contains both the offered load for stat trial,
+ and the conditional throughput for display.
+ :rtype: List[GoalResult]
+ :raises RuntimeError: If search duration exceeds search_duration_max
+ or if min load becomes an upper bound for any search goal.
"""
# we need instance of TrafficGenerator instantiated by Robot Framework
# to be able to use trex_stl-*()
@@ -1341,13 +1541,12 @@ class OptimizedSearch:
u"resources.libraries.python.TrafficGenerator"
)
# Overrides for fixed transaction amount.
- # TODO: Move to robot code? We have two call sites, so this saves space,
- # even though this is surprising for log readers.
if transaction_scale:
initial_trial_duration = 1.0
- final_trial_duration = 2.0
- number_of_intermediate_phases = 0
- timeout = 3600.0
+ final_trial_duration = 1.0
+ preceding_targets = 1
+ # TODO: Move the value to Constants.py?
+ search_duration_max += transaction_scale * 3e-4
tg_instance.set_rate_provider_defaults(
frame_size=frame_size,
traffic_profile=traffic_profile,
@@ -1359,29 +1558,47 @@ class OptimizedSearch:
transaction_scale=transaction_scale,
transaction_type=transaction_type,
use_latency=use_latency,
+ ramp_up_rate=ramp_up_rate,
+ ramp_up_duration=ramp_up_duration,
+ state_timeout=state_timeout,
)
- algorithm = MultipleLossRatioSearch(
- measurer=tg_instance,
- final_trial_duration=final_trial_duration,
- final_relative_width=final_relative_width,
- number_of_intermediate_phases=number_of_intermediate_phases,
- initial_trial_duration=initial_trial_duration,
- timeout=timeout,
- doublings=doublings,
- )
- result = algorithm.narrow_down_ndr_and_pdr(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
- packet_loss_ratio=packet_loss_ratio,
- )
- return result
+ if loss_ratio:
+ loss_ratios = [0.0, loss_ratio]
+ exceed_ratio = 0.5
+ else:
+ # Happens in reconf tests.
+ loss_ratios = [0.0]
+ exceed_ratio = 0.0
+ goals = [
+ SearchGoal(
+ loss_ratio=loss_ratio,
+ exceed_ratio=exceed_ratio,
+ relative_width=relative_width,
+ initial_trial_duration=initial_trial_duration,
+ final_trial_duration=final_trial_duration,
+ duration_sum=duration_sum,
+ preceding_targets=preceding_targets,
+ expansion_coefficient=expansion_coefficient,
+ fail_fast=True,
+ )
+ for loss_ratio in loss_ratios
+ ]
+ config = Config()
+ config.goals = goals
+ config.min_load = min_load
+ config.max_load = max_load
+ config.search_duration_max = search_duration_max
+ config.warmup_duration = 1.0
+ algorithm = MultipleLossRatioSearch(config)
+ results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+ return [results[goal] for goal in goals]
@staticmethod
def perform_soak_search(
frame_size,
traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
+ min_load,
+ max_load,
plr_target=1e-7,
tdpt=0.1,
initial_count=50,
@@ -1394,15 +1611,18 @@ class OptimizedSearch:
transaction_scale=0,
transaction_type=u"packet",
use_latency=False,
+ ramp_up_rate=None,
+ ramp_up_duration=None,
+ state_timeout=240.0,
):
"""Setup initialized TG, perform soak search, return avg and stdev.
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
- :param plr_target: Fraction of packets lost to achieve [1].
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
+ :param plr_target: Ratio of packets lost to achieve [1].
:param tdpt: Trial duration per trial.
The algorithm linearly increases trial duration with trial number,
this is the increment between succesive trials, in seconds.
@@ -1430,10 +1650,13 @@ class OptimizedSearch:
transactions. Default: "packet".
:param use_latency: Whether to measure latency during the trial.
Default: False.
+ :param ramp_up_rate: Rate to use in ramp-up trials [pps].
+ :param ramp_up_duration: Duration of ramp-up trials [s].
+ :param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
+ :type min_load: float
+ :type max_load: float
:type plr_target: float
:type initial_count: int
:type timeout: float
@@ -1445,15 +1668,16 @@ class OptimizedSearch:
:type transaction_scale: int
:type transaction_type: str
:type use_latency: bool
- :returns: Average and stdev of estimated aggregate rate giving PLR.
+ :type ramp_up_rate: float
+ :type ramp_up_duration: float
+ :type state_timeout: float
+ :returns: Average and stdev of estimated aggregated rate giving PLR.
:rtype: 2-tuple of float
"""
tg_instance = BuiltIn().get_library_instance(
u"resources.libraries.python.TrafficGenerator"
)
# Overrides for fixed transaction amount.
- # TODO: Move to robot code? We have a single call site
- # but MLRsearch has two and we want the two to be used similarly.
if transaction_scale:
timeout = 7200.0
tg_instance.set_rate_provider_defaults(
@@ -1468,6 +1692,9 @@ class OptimizedSearch:
transaction_scale=transaction_scale,
transaction_type=transaction_type,
use_latency=use_latency,
+ ramp_up_rate=ramp_up_rate,
+ ramp_up_duration=ramp_up_duration,
+ state_timeout=state_timeout,
)
algorithm = PLRsearch(
measurer=tg_instance,
@@ -1478,7 +1705,7 @@ class OptimizedSearch:
trace_enabled=trace_enabled,
)
result = algorithm.search(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
+ min_rate=min_load,
+ max_rate=max_load,
)
return result
diff --git a/resources/libraries/python/TrafficScriptExecutor.py b/resources/libraries/python/TrafficScriptExecutor.py
index 8faa084ca1..0a45a0b22f 100644
--- a/resources/libraries/python/TrafficScriptExecutor.py
+++ b/resources/libraries/python/TrafficScriptExecutor.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py
index e343d38d2f..1ede76cdd4 100644
--- a/resources/libraries/python/VPPUtil.py
+++ b/resources/libraries/python/VPPUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,6 +18,9 @@ from robot.api import logger
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version
+)
from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
from resources.libraries.python.topology import Topology, SocketType, NodeType
@@ -26,35 +29,6 @@ class VPPUtil:
"""General class for any VPP related methods/functions."""
@staticmethod
- def show_vpp_settings(node, *additional_cmds):
- """Print default VPP settings. In case others are needed, can be
- accepted as next parameters (each setting one parameter), preferably
- in form of a string.
-
- :param node: VPP node.
- :param additional_cmds: Additional commands that the vpp should print
- settings for.
- :type node: dict
- :type additional_cmds: tuple
- """
- def_setting_tb_displayed = {
- u"IPv6 FIB": u"ip6 fib",
- u"IPv4 FIB": u"ip fib",
- u"Interface IP": u"int addr",
- u"Interfaces": u"int",
- u"ARP": u"ip arp",
- u"Errors": u"err"
- }
-
- if additional_cmds:
- for cmd in additional_cmds:
- def_setting_tb_displayed[f"Custom Setting: {cmd}"] = cmd
-
- for _, cmd in def_setting_tb_displayed.items():
- command = f"vppctl sh {cmd}"
- exec_cmd_no_error(node, command, timeout=30, sudo=True)
-
- @staticmethod
def restart_vpp_service(node, node_key=None):
"""Restart VPP service on the specified topology node.
@@ -67,9 +41,18 @@ class VPPUtil:
"""
# Containers have a separate lifecycle, but better be safe.
PapiSocketExecutor.disconnect_all_sockets_by_node(node)
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
+
+ VPPUtil.stop_vpp_service(node)
+ command = "/usr/bin/vpp -c /etc/vpp/startup.conf"
+ message = f"Node {node[u'host']} failed to start VPP!"
+ exec_cmd_no_error(
+ node, command, timeout=180, sudo=True, message=message
+ )
+
if node_key:
Topology.add_new_socket(
+ node, SocketType.CLI, node_key, Constants.SOCKCLI_PATH)
+ Topology.add_new_socket(
node, SocketType.PAPI, node_key, Constants.SOCKSVR_PATH)
Topology.add_new_socket(
node, SocketType.STATS, node_key, Constants.SOCKSTAT_PATH)
@@ -96,12 +79,19 @@ class VPPUtil:
:type node: dict
:type node_key: str
"""
- # Containers have a separate lifecycle, but better be safe.
PapiSocketExecutor.disconnect_all_sockets_by_node(node)
- DUTSetup.stop_service(node, Constants.VPP_UNIT)
+ command = "pkill -9 vpp; sleep 1"
+ exec_cmd(node, command, timeout=180, sudo=True)
+ command = (
+ "/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api"
+ )
+ exec_cmd(node, command, timeout=180, sudo=True)
+
if node_key:
- Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
- Topology.del_node_socket_id(node, SocketType.STATS, node_key)
+ if Topology.get_node_sockets(node, socket_type=SocketType.PAPI):
+ Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
+ if Topology.get_node_sockets(node, socket_type=SocketType.STATS):
+ Topology.del_node_socket_id(node, SocketType.STATS, node_key)
@staticmethod
def stop_vpp_service_on_all_duts(nodes):
@@ -115,6 +105,39 @@ class VPPUtil:
VPPUtil.stop_vpp_service(node, node_key)
@staticmethod
+ def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
+ """Install VPP on all DUT nodes.
+
+ :param nodes: Nodes in the topology.
+ :param vpp_pkg_dir: Path to directory where VPP packages are stored.
+ :type nodes: dict
+ :type vpp_pkg_dir: str
+ """
+ VPPUtil.stop_vpp_service_on_all_duts(nodes)
+ for node in nodes.values():
+ message = f"Failed to install VPP on host {node['host']}!"
+ if node["type"] == NodeType.DUT:
+ command = "mkdir -p /var/log/vpp/"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/systemd/system/vpp.service"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/sysctl.d/80-vpp.conf"
+ exec_cmd(node, command, sudo=True)
+
+ command = "apt-get purge -y '*vpp*' || true"
+ exec_cmd_no_error(node, command, timeout=120, sudo=True)
+
+ command = f"dpkg -i --force-all {vpp_pkg_dir}*.deb"
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
+
+ command = "dpkg -l | grep vpp"
+ exec_cmd_no_error(node, command, sudo=True)
+
+ @staticmethod
def verify_vpp_installed(node):
"""Verify that VPP is installed on the specified topology node.
@@ -197,6 +220,7 @@ class VPPUtil:
"""Run "show_version" PAPI command.
Socket is configurable, so VPP inside container can be accessed.
+ The result is exported to JSON UTI output as "dut-version".
:param node: Node to run command on.
:param remote_vpp_socket: Path to remote socket to target VPP.
@@ -214,7 +238,9 @@ class VPPUtil:
reply = papi_exec.add(cmd).get_reply()
if log:
logger.info(f"VPP version: {reply[u'version']}\n")
- return f"{reply[u'version']}"
+ version = f"{reply[u'version']}"
+ export_dut_type_and_version(u"VPP", version)
+ return version
@staticmethod
def show_vpp_version_on_all_duts(nodes):
@@ -402,3 +428,20 @@ class VPPUtil:
reply = papi_exec.add(cmd, **args).get_reply()
return reply[u"next_index"]
+
+ @staticmethod
+ def vpp_set_neighbor_limit_on_all_duts(nodes, count):
+ """VPP set neighbor count limit on all DUTs in the given topology.
+
+ :param nodes: Nodes in the topology.
+ :param count: Neighbor count need to set.
+ :type nodes: dict
+ :type count: int
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ cmd = f"set ip neighbor-config ip4 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
+
+ cmd = f"set ip neighbor-config ip6 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
diff --git a/resources/libraries/python/VatExecutor.py b/resources/libraries/python/VatExecutor.py
deleted file mode 100644
index 26d4b75781..0000000000
--- a/resources/libraries/python/VatExecutor.py
+++ /dev/null
@@ -1,397 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VAT executor library."""
-
-import json
-
-from os import remove
-
-from paramiko.ssh_exception import SSHException
-from robot.api import logger
-
-import resources.libraries.python.DUTSetup as PidLib
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.PapiHistory import PapiHistory
-from resources.libraries.python.ssh import SSH, SSHTimeout
-
-__all__ = [u"VatExecutor"]
-
-
-def cleanup_vat_json_output(json_output, vat_name=None):
- """Return VAT JSON output cleaned from VAT clutter.
-
- Clean up VAT JSON output from clutter like vat# prompts and such.
-
- :param json_output: Cluttered JSON output.
- :param vat_name: Name of the VAT script.
- :type json_output: JSON
- :type vat_name: str
- :returns: Cleaned up output JSON string.
- :rtype: JSON
- """
-
- retval = json_output
- clutter = [u"vat#", u"dump_interface_table error: Misc"]
- if vat_name:
- remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \
- f"{Constants.RESOURCES_TPL_VAT}/{vat_name}"
- clutter.append(f"{remote_file_path}(2):")
- for garbage in clutter:
- retval = retval.replace(garbage, u"")
- return retval
-
-
-def get_vpp_pid(node):
- """Get PID of running VPP process.
-
- :param node: DUT node.
- :type node: dict
- :returns: PID of VPP process / List of PIDs if more VPP processes are
- running on the DUT node.
- :rtype: int or list
- """
- pid = PidLib.DUTSetup.get_pid(node, u"vpp")
- return pid
-
-
-class VatExecutor:
- """Contains methods for executing VAT commands on DUTs."""
- def __init__(self):
- self._stdout = None
- self._stderr = None
- self._ret_code = None
- self._script_name = None
-
- def execute_script(
- self, vat_name, node, timeout=120, json_out=True,
- copy_on_execute=False, history=True):
- """Execute VAT script on remote node, and store the result. There is an
- option to copy script from local host to remote host before execution.
- Path is defined automatically.
-
- :param vat_name: Name of the vat script file. Only the file name of
- the script is required, the resources path is prepended
- automatically.
- :param node: Node to execute the VAT script on.
- :param timeout: Seconds to allow the script to run.
- :param json_out: Require JSON output.
- :param copy_on_execute: If true, copy the file from local host to remote
- before executing.
- :param history: If true, add command to history.
- :type vat_name: str
- :type node: dict
- :type timeout: int
- :type json_out: bool
- :type copy_on_execute: bool
- :type history: bool
- :raises SSHException: If cannot open connection for VAT.
- :raises SSHTimeout: If VAT execution is timed out.
- :raises RuntimeError: If VAT script execution fails.
- """
- ssh = SSH()
- try:
- ssh.connect(node)
- except:
- raise SSHException(
- f"Cannot open SSH connection to execute VAT command(s) "
- f"from vat script {vat_name}"
- )
-
- if copy_on_execute:
- ssh.scp(vat_name, vat_name)
- remote_file_path = vat_name
- if history:
- with open(vat_name, u"rt") as vat_file:
- for line in vat_file:
- PapiHistory.add_to_papi_history(
- node, line.replace(u"\n", u""), papi=False
- )
- else:
- remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \
- f"{Constants.RESOURCES_TPL_VAT}/{vat_name}"
-
- cmd = f"{Constants.VAT_BIN_NAME}" \
- f"{u' json' if json_out is True else u''} " \
- f"in {remote_file_path} script"
- try:
- ret_code, stdout, stderr = ssh.exec_command_sudo(
- cmd=cmd, timeout=timeout
- )
- except SSHTimeout:
- logger.error(f"VAT script execution timeout: {cmd}")
- raise
- except Exception:
- raise RuntimeError(f"VAT script execution failed: {cmd}")
-
- self._ret_code = ret_code
- self._stdout = stdout
- self._stderr = stderr
- self._script_name = vat_name
-
- def write_and_execute_script(
- self, node, tmp_fn, commands, timeout=300, json_out=False):
- """Write VAT commands to the script, copy it to node and execute it.
-
- :param node: VPP node.
- :param tmp_fn: Path to temporary file script.
- :param commands: VAT command list.
- :param timeout: Seconds to allow the script to run.
- :param json_out: Require JSON output.
- :type node: dict
- :type tmp_fn: str
- :type commands: list
- :type timeout: int
- :type json_out: bool
- """
- with open(tmp_fn, u"wt") as tmp_f:
- tmp_f.writelines(commands)
-
- self.execute_script(
- tmp_fn, node, timeout=timeout, json_out=json_out,
- copy_on_execute=True
- )
- remove(tmp_fn)
-
- def execute_script_json_out(self, vat_name, node, timeout=120):
- """Pass all arguments to 'execute_script' method, then cleanup returned
- json output.
-
- :param vat_name: Name of the vat script file. Only the file name of
- the script is required, the resources path is prepended
- automatically.
- :param node: Node to execute the VAT script on.
- :param timeout: Seconds to allow the script to run.
- :type vat_name: str
- :type node: dict
- :type timeout: int
- """
- self.execute_script(vat_name, node, timeout, json_out=True)
- self._stdout = cleanup_vat_json_output(self._stdout, vat_name=vat_name)
-
- def script_should_have_failed(self):
- """Read return code from last executed script and raise exception if the
- script didn't fail."""
- if self._ret_code is None:
- raise Exception(u"First execute the script!")
- if self._ret_code == 0:
- raise AssertionError(
- f"VAT Script execution passed, but failure was expected: "
- f"{self._script_name}"
- )
-
- def script_should_have_passed(self):
- """Read return code from last executed script and raise exception if the
- script failed."""
- if self._ret_code is None:
- raise Exception(u"First execute the script!")
- if self._ret_code != 0:
- raise AssertionError(
- f"VAT Script execution failed, but success was expected: "
- f"{self._script_name}"
- )
-
- def get_script_stdout(self):
- """Returns value of stdout from last executed script."""
- return self._stdout
-
- def get_script_stderr(self):
- """Returns value of stderr from last executed script."""
- return self._stderr
-
- @staticmethod
- def cmd_from_template(node, vat_template_file, json_param=True, **vat_args):
- """Execute VAT script on specified node. This method supports
- script templates with parameters.
-
- :param node: Node in topology on witch the script is executed.
- :param vat_template_file: Template file of VAT script.
- :param json_param: Require JSON mode.
- :param vat_args: Arguments to the template file.
- :returns: List of JSON objects returned by VAT.
- """
- with VatTerminal(node, json_param=json_param) as vat:
- return vat.vat_terminal_exec_cmd_from_template(
- vat_template_file, **vat_args
- )
-
-
-class VatTerminal:
- """VAT interactive terminal.
-
- :param node: Node to open VAT terminal on.
- :param json_param: Defines if outputs from VAT are in JSON format.
- Default is True.
- :type node: dict
- :type json_param: bool
-
- """
-
- __VAT_PROMPT = (u"vat# ", )
- __LINUX_PROMPT = (u":~# ", u":~$ ", u"~]$ ", u"~]# ")
-
- def __init__(self, node, json_param=True):
- json_text = u" json" if json_param else u""
- self.json = json_param
- self._node = node
- self._ssh = SSH()
- self._ssh.connect(self._node)
- try:
- self._tty = self._ssh.interactive_terminal_open()
- except Exception:
- raise RuntimeError(
- f"Cannot open interactive terminal on node "
- f"{self._node[u'host']}"
- )
-
- for _ in range(3):
- try:
- self._ssh.interactive_terminal_exec_command(
- self._tty, f"sudo -S {Constants.VAT_BIN_NAME}{json_text}",
- self.__VAT_PROMPT
- )
- except Exception:
- continue
- else:
- break
- else:
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- logger.trace(f"VPP running on node {self._node[u'host']}")
- else:
- logger.error(
- f"More instances of VPP running "
- f"on node {self._node[u'host']}."
- )
- else:
- logger.error(f"VPP not running on node {self._node[u'host']}.")
- raise RuntimeError(
- f"Failed to open VAT console on node {self._node[u'host']}"
- )
-
- self._exec_failure = False
- self.vat_stdout = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.vat_terminal_close()
-
- def vat_terminal_exec_cmd(self, cmd):
- """Execute command on the opened VAT terminal.
-
- :param cmd: Command to be executed.
-
- :returns: Command output in python representation of JSON format or
- None if not in JSON mode.
- """
- PapiHistory.add_to_papi_history(self._node, cmd, papi=False)
- logger.debug(f"Executing command in VAT terminal: {cmd}")
- try:
- out = self._ssh.interactive_terminal_exec_command(
- self._tty, cmd, self.__VAT_PROMPT
- )
- self.vat_stdout = out
- except Exception:
- self._exec_failure = True
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- msg = f"VPP running on node {self._node[u'host']} " \
- f"but VAT command {cmd} execution failed."
- else:
- msg = f"More instances of VPP running on node " \
- f"{self._node[u'host']}. VAT command {cmd} " \
- f"execution failed."
- else:
- msg = f"VPP not running on node {self._node[u'host']}. " \
- f"VAT command {cmd} execution failed."
- raise RuntimeError(msg)
-
- logger.debug(f"VAT output: {out}")
- if self.json:
- obj_start = out.find(u"{")
- obj_end = out.rfind(u"}")
- array_start = out.find(u"[")
- array_end = out.rfind(u"]")
-
- if obj_start == -1 and array_start == -1:
- raise RuntimeError(f"VAT command {cmd}: no JSON data.")
-
- if obj_start < array_start or array_start == -1:
- start = obj_start
- end = obj_end + 1
- else:
- start = array_start
- end = array_end + 1
- out = out[start:end]
- json_out = json.loads(out)
- return json_out
-
- return None
-
- def vat_terminal_close(self):
- """Close VAT terminal."""
- # interactive terminal is dead, we only need to close session
- if not self._exec_failure:
- try:
- self._ssh.interactive_terminal_exec_command(
- self._tty, u"quit", self.__LINUX_PROMPT
- )
- except Exception:
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- logger.trace(
- f"VPP running on node {self._node[u'host']}."
- )
- else:
- logger.error(
- f"More instances of VPP running "
- f"on node {self._node[u'host']}."
- )
- else:
- logger.error(
- f"VPP not running on node {self._node[u'host']}."
- )
- raise RuntimeError(
- f"Failed to close VAT console "
- f"on node {self._node[u'host']}"
- )
- try:
- self._ssh.interactive_terminal_close(self._tty)
- except Exception:
- raise RuntimeError(
- f"Cannot close interactive terminal "
- f"on node {self._node[u'host']}"
- )
-
- def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
- """Execute VAT script from a file.
-
- :param vat_template_file: Template file name of a VAT script.
- :param args: Dictionary of parameters for VAT script.
- :returns: List of JSON objects returned by VAT.
- """
- file_path = f"{Constants.RESOURCES_TPL_VAT}/{vat_template_file}"
-
- with open(file_path, u"rt") as template_file:
- cmd_template = template_file.readlines()
- ret = list()
- for line_tmpl in cmd_template:
- vat_cmd = line_tmpl.format(**args)
- ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace(u"\n", u"")))
- return ret
diff --git a/resources/libraries/python/VatJsonUtil.py b/resources/libraries/python/VatJsonUtil.py
deleted file mode 100644
index 594d3e2f39..0000000000
--- a/resources/libraries/python/VatJsonUtil.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utilities to work with JSON data format from VAT."""
-
-from robot.api import logger
-
-from resources.libraries.python.parsers.JsonParser import JsonParser
-
-
-class VatJsonUtil:
- """Utilities to work with JSON data format from VAT."""
-
- @staticmethod
- def _convert_mac_to_number_list(mac_address):
- """Convert MAC address string to list of decimal numbers.
-
- Converts a ":" separated MAC address to decimal number list as used
- in JSON interface dump.
-
- :param mac_address: MAC address.
- :type mac_address: str
- :returns: List representation of MAC address.
- :rtype: list
- """
- list_mac = list()
- for num in mac_address.split(u":"):
- list_mac.append(int(num, 16))
- return list_mac
-
- @staticmethod
- def get_vpp_interface_by_mac(interfaces_list, mac_address):
- """Return interface dictionary from interface_list by MAC address.
-
- Extracts interface dictionary from all of the interfaces in interfaces
- list parsed from JSON according to mac_address of the interface.
-
- :param interfaces_list: Interfaces parsed from JSON.
- :param mac_address: MAC address of interface we are looking for.
- :type interfaces_list: dict
- :type mac_address: str
- :returns: Interface from JSON.
- :rtype: dict
- """
- interface_dict = dict()
- list_mac_address = VatJsonUtil._convert_mac_to_number_list(mac_address)
- logger.trace(
- f"MAC address {mac_address} converted to list {list_mac_address}."
- )
- for interface in interfaces_list:
- # TODO: create vat json integrity checking and move there
- if u"l2_address" not in interface:
- raise KeyError(
- u"key l2_address not found in interface dict."
- u"Probably input list is not parsed from correct VAT "
- u"json output."
- )
- if u"l2_address_length" not in interface:
- raise KeyError(
- u"key l2_address_length not found in interface "
- u"dict. Probably input list is not parsed from correct "
- u"VAT json output."
- )
- mac_from_json = interface[u"l2_address"][:6]
- if mac_from_json == list_mac_address:
- if interface[u"l2_address_length"] != 6:
- raise ValueError(u"l2_address_length value is not 6.")
- interface_dict = interface
- break
- return interface_dict
-
- @staticmethod
- def update_vpp_interface_data_from_json(node, interface_dump_json):
- """Update vpp node data in node__DICT from JSON interface dump.
-
- This method updates vpp interface names and sw if indexes according to
- interface MAC addresses found in interface_dump_json.
-
- :param node: Node dictionary.
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :type node: dict
- :type interface_dump_json: str
- """
- interface_list = JsonParser().parse_data(interface_dump_json)
- for ifc in node[u"interfaces"].values():
- if_mac = ifc[u"mac_address"]
- interface_dict = VatJsonUtil.get_vpp_interface_by_mac(
- interface_list, if_mac
- )
- if not interface_dict:
- logger.trace(f"Interface {ifc} not found by MAC {if_mac}")
- ifc[u"vpp_sw_index"] = None
- continue
- ifc[u"name"] = interface_dict[u"interface_name"]
- ifc[u"vpp_sw_index"] = interface_dict[u"sw_if_index"]
- ifc[u"mtu"] = interface_dict[u"mtu"]
-
- @staticmethod
- def get_interface_sw_index_from_json(interface_dump_json, interface_name):
- """Get sw_if_index from given JSON output by interface name.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param interface_name: Interface name.
- :type interface_dump_json: str
- :type interface_name: str
- :returns: SW interface index.
- :rtype: int
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"interface_name"] == interface_name:
- index = interface[u"sw_if_index"]
- logger.debug(
- f"Interface with name {interface_name} "
- f"has sw_if_index {index}."
- )
- return index
- except KeyError:
- pass
- raise ValueError(f"Interface with name {interface_name} not found.")
-
- @staticmethod
- def get_interface_name_from_json(interface_dump_json, sw_if_index):
- """Get interface name from given JSON output by sw_if_index.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param sw_if_index: SW interface index.
- :type interface_dump_json: str
- :type sw_if_index: int
- :returns: Interface name.
- :rtype: str
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"sw_if_index"] == sw_if_index:
- interface_name = interface[u"interface_name"]
- logger.debug(
- f"Interface with sw_if_index {sw_if_index} "
- f"has name {interface_name}."
- )
- return interface_name
- except KeyError:
- pass
- raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.")
-
- @staticmethod
- def get_interface_mac_from_json(interface_dump_json, sw_if_index):
- """Get interface MAC address from given JSON output by sw_if_index.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param sw_if_index: SW interface index.
- :type interface_dump_json: str
- :type sw_if_index: int
- :returns: Interface MAC address.
- :rtype: str
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"sw_if_index"] == sw_if_index:
- mac_from_json = interface[u"l2_address"][:6] \
- if u"l2_address" in list(interface.keys()) else u""
- mac_address = u":".join(
- f"{item:02x}" for item in mac_from_json
- )
- logger.debug(
- f"Interface with sw_if_index {sw_if_index} "
- f"has MAC address {mac_address}."
- )
- return mac_address
- except KeyError:
- pass
- raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.")
-
- @staticmethod
- def verify_vat_retval(vat_out, exp_retval=0, err_msg=u"VAT cmd failed"):
- """Verify return value of VAT command.
-
- VAT command JSON output should be object (dict in python) or array. We
- are looking for something like this: { "retval": 0 }. Verification is
- skipped if VAT output does not contain return value element or root
- elemet is array.
-
- :param vat_out: VAT command output in python representation of JSON.
- :param exp_retval: Expected return value (default 0).
- :err_msg: Message to be displayed in case of error (optional).
- :type vat_out: dict or list
- :type exp_retval: int
- :type err_msg: str
- :raises RuntimeError: If VAT command return value is incorrect.
- """
- if isinstance(vat_out, dict):
- retval = vat_out.get(u"retval")
- if retval is not None:
- if retval != exp_retval:
- raise RuntimeError(err_msg)
diff --git a/resources/libraries/python/VhostUser.py b/resources/libraries/python/VhostUser.py
index c6b9185e14..b36edbf95a 100644
--- a/resources/libraries/python/VhostUser.py
+++ b/resources/libraries/python/VhostUser.py
@@ -62,7 +62,7 @@ class VhostUser:
:returns: SW interface index.
:rtype: int
"""
- cmd = u"create_vhost_user_if"
+ cmd = u"create_vhost_user_if_v2"
err_msg = f"Failed to create Vhost-user interface " \
f"on host {node[u'host']}"
if virtio_feature_mask is None:
@@ -203,7 +203,7 @@ class VirtioFeatureMask:
@staticmethod
def is_feature_enabled(virtio_feature_mask, virtio_feature_flag):
"""Checks if concrete virtio feature is enabled within
- virtio_feature_mask
+ virtio_feature_mask
:param virtio_feature_mask: Mask of enabled virtio features
:param virtio_feature_flag: Checked virtio feature
:type virtio_feature_mask: int
diff --git a/resources/libraries/python/VppApiCrc.py b/resources/libraries/python/VppApiCrc.py
index 693dac064a..a8947a18cb 100644
--- a/resources/libraries/python/VppApiCrc.py
+++ b/resources/libraries/python/VppApiCrc.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -75,8 +75,9 @@ class VppApiCrcChecker:
Starts the same as _expected, but each time an encountered api,crc pair
fits the expectation, the pair is removed from all collections
- within this mapping. Ideally, the active mappings will become empty.
- If not, it is an error, VPP removed or renamed a message CSIT needs."""
+ within this mapping. It is fine if an api is missing
+ from some collections, as long as it is not missing from all collections
+ that remained in _expected."""
self._found = dict()
"""Mapping from API name to CRC string.
@@ -84,6 +85,12 @@ class VppApiCrcChecker:
This gets populated with CRCs found in .api.json,
to serve as a hint when reporting errors."""
+ self._options = dict()
+ """Mapping from API name to options dictionary.
+
+ This gets populated with options found in .api.json,
+ to serve as a hint when reporting errors."""
+
self._reported = dict()
"""Mapping from API name to CRC string.
@@ -171,7 +178,34 @@ class VppApiCrcChecker:
return _str(crc)
raise RuntimeError(f"No CRC found for message: {msg_obj!r}")
- def _process_crc(self, api_name, crc):
+ @staticmethod
+ def _get_options(msg_obj, version):
+ """Utility function to extract API options from an intermediate json.
+
+ Empty dict is returned if options are not found,
+ so old VPP builds can be tested without spamming.
+ If version starts with "0.", add a fake option,
+ as the message is treated as "in-progress" by the API upgrade process.
+
+ :param msg_obj: Loaded json object, item of "messages" list.
+ :param version: Version string from the .api.json file.
+ :type msg_obj: list of various types
+ :type version: Optional[str]
+ :returns: Object found as value for "options" key.
+ :rtype: dict
+ """
+ options = dict()
+ for item in reversed(msg_obj):
+ if not isinstance(item, dict):
+ continue
+ options = item.get(u"options", dict())
+ if not options:
+ break
+ if version is None or version.startswith(u"0."):
+ options[u"version"] = version
+ return options
+
+ def _process_crc(self, api_name, crc, options):
"""Compare API to verified collections, update class state.
Here, API stands for (message name, CRC) pair.
@@ -195,16 +229,21 @@ class VppApiCrcChecker:
Attempts to overwrite value in _found or _reported should not happen,
so the code does not check for that, simply overwriting.
+ Options are stored, to be examined later.
+
The intended usage is to call this method multiple times,
and then raise exception listing all _reported.
:param api_name: API name to check.
:param crc: Discovered CRC to check for the name.
+ :param options: Empty dict or options value for in .api.json
:type api_name: str
:type crc: str
+ :type options: dict
"""
# Regardless of the result, remember as found.
self._found[api_name] = crc
+ self._options[api_name] = options
old_expected = self._expected
new_expected = old_expected.copy()
for collection_name, name_to_crc_mapping in old_expected.items():
@@ -244,11 +283,13 @@ class VppApiCrcChecker:
continue
with open(f"{root}/{filename}", u"rt") as file_in:
json_obj = json.load(file_in)
+ version = json_obj[u"options"].get(u"version", None)
msgs = json_obj[u"messages"]
for msg_obj in msgs:
msg_name = self._get_name(msg_obj)
msg_crc = self._get_crc(msg_obj)
- self._process_crc(msg_name, msg_crc)
+ msg_options = self._get_options(msg_obj, version)
+ self._process_crc(msg_name, msg_crc, msg_options)
logger.debug(f"Surviving collections: {self._expected.keys()!r}")
def report_initial_conflicts(self, report_missing=False):
@@ -285,16 +326,21 @@ class VppApiCrcChecker:
if not report_missing:
return
missing = {name: mapp for name, mapp in self._missing.items() if mapp}
- if missing:
- missing_indented = json.dumps(
- missing, indent=1, sort_keys=True, separators=[u",", u":"])
- self.log_and_raise(
- f"API CRCs missing from .api.json:\n{missing_indented}"
- )
+ if set(missing.keys()) < set(self._expected.keys()):
+ # There is a collection where nothing is missing.
+ return
+ missing_indented = json.dumps(
+ missing, indent=1, sort_keys=True, separators=[u",", u":"]
+ )
+ self.log_and_raise(
+ f"API CRCs missing from .api.json:\n{missing_indented}"
+ )
def check_api_name(self, api_name):
"""Fail if the api_name has no, or different from known CRC associated.
+ Print warning if options contain anything more than vat_help.
+
Do not fail if this particular failure has been already reported.
Intended use: Call during test (not in initialization),
@@ -328,9 +374,30 @@ class VppApiCrcChecker:
if name_to_crc_mapping[api_name] == crc:
matching = True
break
- if matching:
+ if not matching:
+ self._reported[api_name] = crc
+ self.log_and_raise(
+ f"No active collection has API {api_name!r} with CRC {crc!r}"
+ )
+ options = self._options.get(api_name, None)
+ if not options:
+ # None means CSIT is attempting a new API on an old VPP build.
+ # If that is an issue, the API has been reported as missing already.
return
- self._reported[api_name] = crc
- self.log_and_raise(
- f"No active collection contains API {api_name!r} with CRC {crc!r}"
- )
+ options.pop(u"vat_help", None)
+ if options:
+ self._reported[api_name] = crc
+ logger.console(f"{api_name} used but has options {options}")
+
+ def print_warnings(self):
+ """Call check_api_name for API names in surviving collections.
+
+ Useful for VPP CRC checking job.
+ The API name is only checked when it appears
+ in all surviving collections.
+ """
+ api_name_to_crc_maps = self._expected.values()
+ api_name_sets = (set(n2c.keys()) for n2c in api_name_to_crc_maps)
+ api_names = set.intersection(*api_name_sets)
+ for api_name in sorted(api_names):
+ self.check_api_name(api_name)
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index 437386d81d..4191c0eed2 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -11,12 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""VPP Configuration File Generator library.
-
-TODO: Support initialization with default values,
-so that we do not need to have block of 6 "Add Unix" commands
-in 7 various places of CSIT code.
-"""
+"""VPP Configuration File Generator library."""
import re
@@ -26,7 +21,7 @@ from resources.libraries.python.topology import NodeType
from resources.libraries.python.topology import Topology
from resources.libraries.python.VPPUtil import VPPUtil
-__all__ = [u"VppConfigGenerator"]
+__all__ = ["VppConfigGenerator", "VppInitConfig"]
def pci_dev_check(pci_dev):
@@ -54,21 +49,17 @@ class VppConfigGenerator:
def __init__(self):
"""Initialize library."""
# VPP Node to apply configuration on
- self._node = u""
+ self._node = ""
# Topology node key
- self._node_key = u""
+ self._node_key = ""
# VPP Configuration
self._nodeconfig = dict()
# Serialized VPP Configuration
- self._vpp_config = u""
+ self._vpp_config = ""
# VPP Service name
- self._vpp_service_name = u"vpp"
- # VPP Logfile location
- self._vpp_logfile = u"/tmp/vpe.log"
+ self._vpp_service_name = "vpp"
# VPP Startup config location
- self._vpp_startup_conf = u"/etc/vpp/startup.conf"
- # VPP Startup config backup location
- self._vpp_startup_conf_backup = None
+ self._vpp_startup_conf = "/etc/vpp/startup.conf"
def set_node(self, node, node_key=None):
"""Set DUT node.
@@ -79,29 +70,13 @@ class VppConfigGenerator:
:type node_key: str
:raises RuntimeError: If Node type is not DUT.
"""
- if node[u"type"] != NodeType.DUT:
+ if node["type"] != NodeType.DUT:
raise RuntimeError(
- u"Startup config can only be applied to DUTnode."
+ "Startup config can only be applied to DUTnode."
)
self._node = node
self._node_key = node_key
- def set_vpp_logfile(self, logfile):
- """Set VPP logfile location.
-
- :param logfile: VPP logfile location.
- :type logfile: str
- """
- self._vpp_logfile = logfile
-
- def set_vpp_startup_conf_backup(self, backup=u"/etc/vpp/startup.backup"):
- """Set VPP startup configuration backup.
-
- :param backup: VPP logfile location.
- :type backup: str
- """
- self._vpp_startup_conf_backup = backup
-
def get_config_str(self):
"""Get dumped startup configuration in VPP config format.
@@ -127,8 +102,8 @@ class VppConfigGenerator:
if path[0] not in config:
config[path[0]] = dict()
elif isinstance(config[path[0]], str):
- config[path[0]] = dict() if config[path[0]] == u"" \
- else {config[path[0]]: u""}
+ config[path[0]] = dict() if config[path[0]] == "" \
+ else {config[path[0]]: ""}
self.add_config_item(config[path[0]], value, path[1:])
def dump_config(self, obj, level=-1):
@@ -140,7 +115,7 @@ class VppConfigGenerator:
:type level: int
:returns: nothing
"""
- indent = u" "
+ indent = " "
if level >= 0:
self._vpp_config += f"{level * indent}{{\n"
if isinstance(obj, dict):
@@ -156,53 +131,56 @@ class VppConfigGenerator:
if level >= 0:
self._vpp_config += f"{level * indent}}}\n"
- def add_unix_log(self, value=None):
+ def add_unix_log(self, value="/var/log/vpp/vpp.log"):
"""Add UNIX log configuration.
:param value: Log file.
:type value: str
"""
- path = [u"unix", u"log"]
- if value is None:
- value = self._vpp_logfile
+ path = ["unix", "log"]
self.add_config_item(self._nodeconfig, value, path)
- def add_unix_cli_listen(self, value=u"/run/vpp/cli.sock"):
+ def add_unix_cli_listen(self, value="/run/vpp/cli.sock"):
"""Add UNIX cli-listen configuration.
:param value: CLI listen address and port or path to CLI socket.
:type value: str
"""
- path = [u"unix", u"cli-listen"]
+ path = ["unix", "cli-listen"]
self.add_config_item(self._nodeconfig, value, path)
- def add_unix_gid(self, value=u"vpp"):
+ def add_unix_cli_no_pager(self):
+ """Add UNIX cli-no-pager configuration."""
+ path = ["unix", "cli-no-pager"]
+ self.add_config_item(self._nodeconfig, "", path)
+
+ def add_unix_gid(self, value="vpp"):
"""Add UNIX gid configuration.
:param value: Gid.
:type value: str
"""
- path = [u"unix", u"gid"]
+ path = ["unix", "gid"]
self.add_config_item(self._nodeconfig, value, path)
def add_unix_nodaemon(self):
"""Add UNIX nodaemon configuration."""
- path = [u"unix", u"nodaemon"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["unix", "nodaemon"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_unix_coredump(self):
"""Add UNIX full-coredump configuration."""
- path = [u"unix", u"full-coredump"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["unix", "full-coredump"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_unix_exec(self, value):
"""Add UNIX exec configuration."""
- path = [u"unix", u"exec"]
+ path = ["unix", "exec"]
self.add_config_item(self._nodeconfig, value, path)
def add_socksvr(self, socket=Constants.SOCKSVR_PATH):
"""Add socksvr configuration."""
- path = [u"socksvr", u"socket-name"]
+ path = ["socksvr", "socket-name"]
self.add_config_item(self._nodeconfig, socket, path)
def add_graph_node_variant(self, variant=Constants.GRAPH_NODE_VARIANT):
@@ -211,39 +189,48 @@ class VppConfigGenerator:
:param value: Graph node variant default value.
:type value: str
"""
- if variant == u"":
+ if variant == "":
return
- variant_list = [u"hsw", u"skx", u"icl"]
+ variant_list = ["hsw", "skx", "icl"]
if variant not in variant_list:
raise ValueError("Invalid graph node variant value")
- path = [u"node", u"default", u"variant"]
+ path = ["node", "default", "variant"]
self.add_config_item(self._nodeconfig, variant, path)
- def add_api_segment_gid(self, value=u"vpp"):
- """Add API-SEGMENT gid configuration.
+ def add_api_segment_gid(self, value="vpp"):
+ """Add api-segment gid configuration.
:param value: Gid.
:type value: str
"""
- path = [u"api-segment", u"gid"]
+ path = ["api-segment", "gid"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_segment_global_size(self, value):
- """Add API-SEGMENT global-size configuration.
+ """Add api-segment global-size configuration.
:param value: Global size.
:type value: str
"""
- path = [u"api-segment", u"global-size"]
+ path = ["api-segment", "global-size"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_api_segment_prefix(self, value="vpp"):
+ """Add api-segment prefix configuration.
+
+ :param value: Gid.
+ :type value: str
+ """
+ path = ["api-segment", "prefix"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_segment_api_size(self, value):
- """Add API-SEGMENT api-size configuration.
+ """Add api-segment api-size configuration.
:param value: API size.
:type value: str
"""
- path = [u"api-segment", u"api-size"]
+ path = ["api-segment", "api-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_buffers_per_numa(self, value):
@@ -252,7 +239,7 @@ class VppConfigGenerator:
:param value: Number of buffers allocated.
:type value: int
"""
- path = [u"buffers", u"buffers-per-numa"]
+ path = ["buffers", "buffers-per-numa"]
self.add_config_item(self._nodeconfig, value, path)
def add_buffers_default_data_size(self, value):
@@ -261,7 +248,7 @@ class VppConfigGenerator:
:param value: Buffers data-size allocated.
:type value: int
"""
- path = [u"buffers", u"default data-size"]
+ path = ["buffers", "default data-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev(self, *devices):
@@ -272,35 +259,26 @@ class VppConfigGenerator:
"""
for device in devices:
if pci_dev_check(device):
- path = [u"dpdk", f"dev {device}"]
- self.add_config_item(self._nodeconfig, u"", path)
-
- def add_dpdk_dev_parameter(self, device, parameter, value):
- """Add parameter for DPDK device.
+ path = ["dpdk", f"dev {device}"]
+ self.add_config_item(self._nodeconfig, "", path)
- :param device: PCI device (format xxxx:xx:xx.x).
- :param parameter: Parameter name.
- :param value: Parameter value.
- :type device: str
- :type parameter: str
- :type value: str
- """
- if pci_dev_check(device):
- path = [u"dpdk", f"dev {device}", parameter]
- self.add_config_item(self._nodeconfig, value, path)
-
- def add_dpdk_cryptodev(self, count):
+ def add_dpdk_cryptodev(self, count, num_rx_queues=1):
"""Add DPDK Crypto PCI device configuration.
:param count: Number of HW crypto devices to add.
+ :param num_rx_queues: Number of RX queues per QAT interface.
:type count: int
- """
- cryptodev = Topology.get_cryptodev(self._node)
- for i in range(count):
- cryptodev_config = re.sub(r"\d.\d$", f"1.{str(i)}", cryptodev)
- path = [u"dpdk", f"dev {cryptodev_config}"]
- self.add_config_item(self._nodeconfig, u"", path)
- self.add_dpdk_uio_driver(u"vfio-pci")
+ :type num_rx_queues: int
+ """
+ cryptodevs = Topology.get_cryptodev(self._node)
+ for device in cryptodevs.values():
+ for i in range(int(count/len(cryptodevs))):
+ numvfs = device["numvfs"]
+ computed = f"{(i+1)//numvfs}.{(i+1)%numvfs}"
+ addr = re.sub(r"\d.\d$", computed, device["pci_address"])
+ path = ["dpdk", f"dev {addr}", "num-rx-queues"]
+ self.add_config_item(self._nodeconfig, num_rx_queues, path)
+ self.add_dpdk_uio_driver("vfio-pci")
def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count):
"""Add DPDK SW Crypto device configuration.
@@ -315,8 +293,8 @@ class VppConfigGenerator:
for _ in range(count):
cryptodev_config = f"vdev cryptodev_{sw_pmd_type}_pmd," \
f"socket_id={str(socket_id)}"
- path = [u"dpdk", cryptodev_config]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", cryptodev_config]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_dev_default_rxq(self, value):
"""Add DPDK dev default rxq configuration.
@@ -324,7 +302,7 @@ class VppConfigGenerator:
:param value: Default number of rxqs.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-rx-queues"]
+ path = ["dpdk", "dev default", "num-rx-queues"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_txq(self, value):
@@ -333,7 +311,7 @@ class VppConfigGenerator:
:param value: Default number of txqs.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-tx-queues"]
+ path = ["dpdk", "dev default", "num-tx-queues"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_rxd(self, value):
@@ -342,7 +320,7 @@ class VppConfigGenerator:
:param value: Default number of rxds.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-rx-desc"]
+ path = ["dpdk", "dev default", "num-rx-desc"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_txd(self, value):
@@ -351,22 +329,27 @@ class VppConfigGenerator:
:param value: Default number of txds.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-tx-desc"]
+ path = ["dpdk", "dev default", "num-tx-desc"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_dpdk_dev_default_tso(self):
+ """Add DPDK dev default tso configuration."""
+ path = [u"dpdk", u"dev default", u"tso"]
+ self.add_config_item(self._nodeconfig, "on", path)
+
def add_dpdk_log_level(self, value):
"""Add DPDK log-level configuration.
:param value: Log level.
:type value: str
"""
- path = [u"dpdk", u"log-level"]
+ path = ["dpdk", "log-level"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_no_pci(self):
"""Add DPDK no-pci."""
- path = [u"dpdk", u"no-pci"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-pci"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_uio_driver(self, value=None):
"""Add DPDK uio-driver configuration.
@@ -378,16 +361,37 @@ class VppConfigGenerator:
"""
if value is None:
value = Topology.get_uio_driver(self._node)
- path = [u"dpdk", u"uio-driver"]
+ path = ["dpdk", "uio-driver"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_max_simd_bitwidth(self, variant=Constants.GRAPH_NODE_VARIANT):
+ """Add DPDK max-simd-bitwidth configuration.
+
+ :param value: Graph node variant default value.
+ :type value: str
+ """
+ if variant == "icl":
+ value = 512
+ elif variant in ["skx", "hsw"]:
+ value = 256
+ else:
+ return
+
+ path = ["dpdk", "max-simd-bitwidth"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_dpdk_enable_tcp_udp_checksum(self):
+ """Add DPDK enable-tcp-udp-checksum configuration."""
+ path = [u"dpdk", u"enable-tcp-udp-checksum"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
def add_cpu_main_core(self, value):
"""Add CPU main core configuration.
:param value: Main core option.
:type value: str
"""
- path = [u"cpu", u"main-core"]
+ path = ["cpu", "main-core"]
self.add_config_item(self._nodeconfig, value, path)
def add_cpu_corelist_workers(self, value):
@@ -396,7 +400,7 @@ class VppConfigGenerator:
:param value: Corelist-workers option.
:type value: str
"""
- path = [u"cpu", u"corelist-workers"]
+ path = ["cpu", "corelist-workers"]
self.add_config_item(self._nodeconfig, value, path)
def add_main_heap_size(self, value):
@@ -405,7 +409,7 @@ class VppConfigGenerator:
:param value: Amount of heap.
:type value: str
"""
- path = [u"memory", u"main-heap-size"]
+ path = ["memory", "main-heap-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_main_heap_page_size(self, value):
@@ -414,13 +418,22 @@ class VppConfigGenerator:
:param value: Heap page size.
:type value: str
"""
- path = [u"memory", u"main-heap-page-size"]
+ path = ["memory", "main-heap-page-size"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_default_hugepage_size(self, value=Constants.DEFAULT_HUGEPAGE_SIZE):
+ """Add Default Hugepage Size configuration.
+
+ :param value: Hugepage size.
+ :type value: str
+ """
+ path = ["memory", "default-hugepage-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_trace(self):
"""Add API trace configuration."""
- path = [u"api-trace", u"on"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["api-trace", "on"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_ip6_hash_buckets(self, value):
"""Add IP6 hash buckets configuration.
@@ -428,7 +441,7 @@ class VppConfigGenerator:
:param value: Number of IP6 hash buckets.
:type value: str
"""
- path = [u"ip6", u"hash-buckets"]
+ path = ["ip6", "hash-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_ip6_heap_size(self, value):
@@ -437,7 +450,52 @@ class VppConfigGenerator:
:param value: IP6 Heapsize amount.
:type value: str
"""
- path = [u"ip6", u"heap-size"]
+ path = ["ip6", "heap-size"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_flow_cache_ipv4_inbound(self, value):
+ """Add IPsec spd flow cache for IP4 inbound.
+
+ :param value: "on" to enable spd flow cache.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-inbound-spd-flow-cache"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_flow_cache_ipv4_outbound(self, value):
+ """Add IPsec spd flow cache for IP4 outbound.
+
+ :param value: "on" to enable spd flow cache.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-outbound-spd-flow-cache"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_ipv4_inbound(self, value):
+ """Add IPsec spd fast path for IP4 inbound.
+
+ :param value: "on" to enable spd fast path.
+ :type value: str
+ """
+ path = [u"ipsec", u"ipv4-inbound-spd-fast-path"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_ipv4_outbound(self, value):
+ """Add IPsec spd fast path for IP4 outbound.
+
+ :param value: "on" to enable spd fast path.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-outbound-spd-fast-path"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_num_buckets(self, value):
+ """Add num buckets for IPsec spd fast path.
+
+ :param value: Number of buckets.
+ :type value: int
+ """
+ path = ["ipsec", "spd-fast-path-num-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_size(self, value):
@@ -446,7 +504,7 @@ class VppConfigGenerator:
:param value: Stats heapsize amount.
:type value: str
"""
- path = [u"statseg", u"size"]
+ path = ["statseg", "size"]
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_page_size(self, value):
@@ -455,7 +513,7 @@ class VppConfigGenerator:
:param value: Stats heapsize amount.
:type value: str
"""
- path = [u"statseg", u"page-size"]
+ path = ["statseg", "page-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_per_node_counters(self, value):
@@ -464,7 +522,7 @@ class VppConfigGenerator:
:param value: "on" to switch the counters on.
:type value: str
"""
- path = [u"statseg", u"per-node-counters"]
+ path = ["statseg", "per-node-counters"]
self.add_config_item(self._nodeconfig, value, path)
def add_plugin(self, state, *plugins):
@@ -476,27 +534,27 @@ class VppConfigGenerator:
:type plugins: list
"""
for plugin in plugins:
- path = [u"plugins", f"plugin {plugin}", state]
- self.add_config_item(self._nodeconfig, u" ", path)
+ path = ["plugins", f"plugin {plugin}", state]
+ self.add_config_item(self._nodeconfig, " ", path)
def add_dpdk_no_multi_seg(self):
"""Add DPDK no-multi-seg configuration."""
- path = [u"dpdk", u"no-multi-seg"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-multi-seg"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_no_tx_checksum_offload(self):
"""Add DPDK no-tx-checksum-offload configuration."""
- path = [u"dpdk", u"no-tx-checksum-offload"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-tx-checksum-offload"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_nat(self, value=u"deterministic"):
+ def add_nat(self, value="deterministic"):
"""Add NAT mode configuration.
:param value: NAT mode.
:type value: str
"""
- path = [u"nat", value]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["nat", value]
+ self.add_config_item(self._nodeconfig, "", path)
def add_nat_max_translations_per_thread(self, value):
"""Add NAT max. translations per thread number configuration.
@@ -504,21 +562,21 @@ class VppConfigGenerator:
:param value: NAT mode.
:type value: str
"""
- path = [u"nat", u"max translations per thread"]
+ path = ["nat", "max translations per thread"]
self.add_config_item(self._nodeconfig, value, path)
def add_nsim_poll_main_thread(self):
"""Add NSIM poll-main-thread configuration."""
- path = [u"nsim", u"poll-main-thread"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["nsim", "poll-main-thread"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_tcp_congestion_control_algorithm(self, value=u"cubic"):
+ def add_tcp_congestion_control_algorithm(self, value="cubic"):
"""Add TCP congestion control algorithm.
:param value: The congestion control algorithm to use. Example: cubic
:type value: str
"""
- path = [u"tcp", u"cc-algo"]
+ path = ["tcp", "cc-algo"]
self.add_config_item(self._nodeconfig, value, path)
def add_tcp_preallocated_connections(self, value):
@@ -527,7 +585,7 @@ class VppConfigGenerator:
:param value: The number of pre-allocated connections.
:type value: int
"""
- path = [u"tcp", u"preallocated-connections"]
+ path = ["tcp", "preallocated-connections"]
self.add_config_item(self._nodeconfig, value, path)
def add_tcp_preallocated_half_open_connections(self, value):
@@ -536,18 +594,28 @@ class VppConfigGenerator:
:param value: The number of pre-allocated half open connections.
:type value: int
"""
- path = [u"tcp", u"preallocated-half-open-connections"]
+ path = ["tcp", "preallocated-half-open-connections"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_tcp_tso(self):
+ """Add TCP tso configuration."""
+ path = [u"tcp", u"tso"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
def add_session_enable(self):
"""Add session enable."""
- path = [u"session", u"enable"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["session", "enable"]
+ self.add_config_item(self._nodeconfig, "", path)
+
+ def add_session_app_socket_api(self):
+ """Use session app socket api."""
+ path = ["session", "use-app-socket-api"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_session_event_queues_memfd_segment(self):
"""Add session event queue memfd segment."""
- path = [u"session", u"evt_qs_memfd_seg"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["session", "evt_qs_memfd_seg"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_session_event_queue_length(self, value):
"""Add session event queue length.
@@ -555,7 +623,7 @@ class VppConfigGenerator:
:param value: Session event queue length.
:type value: int
"""
- path = [u"session", u"event-queue-length"]
+ path = ["session", "event-queue-length"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_event_queues_segment_size(self, value):
@@ -564,7 +632,7 @@ class VppConfigGenerator:
:param value: Session event queue segment size.
:type value: str
"""
- path = [u"session", u"evt_qs_seg_size"]
+ path = ["session", "evt_qs_seg_size"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_preallocated_sessions(self, value):
@@ -573,7 +641,7 @@ class VppConfigGenerator:
:param value: Number of pre-allocated sessions.
:type value: int
"""
- path = [u"session", u"preallocated-sessions"]
+ path = ["session", "preallocated-sessions"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_session_table_buckets(self, value):
@@ -582,7 +650,7 @@ class VppConfigGenerator:
:param value: Number of v4 session table buckets.
:type value: int
"""
- path = [u"session", u"v4-session-table-buckets"]
+ path = ["session", "v4-session-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_session_table_memory(self, value):
@@ -591,7 +659,7 @@ class VppConfigGenerator:
:param value: Size of v4 session table memory.
:type value: str
"""
- path = [u"session", u"v4-session-table-memory"]
+ path = ["session", "v4-session-table-memory"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_halfopen_table_buckets(self, value):
@@ -600,7 +668,7 @@ class VppConfigGenerator:
:param value: Number of v4 halfopen table buckets.
:type value: int
"""
- path = [u"session", u"v4-halfopen-table-buckets"]
+ path = ["session", "v4-halfopen-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_halfopen_table_memory(self, value):
@@ -609,7 +677,7 @@ class VppConfigGenerator:
:param value: Size of v4 halfopen table memory.
:type value: str
"""
- path = [u"session", u"v4-halfopen-table-memory"]
+ path = ["session", "v4-halfopen-table-memory"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_local_endpoints_table_buckets(self, value):
@@ -618,7 +686,7 @@ class VppConfigGenerator:
:param value: Number of local endpoints table buckets.
:type value: int
"""
- path = [u"session", u"local-endpoints-table-buckets"]
+ path = ["session", "local-endpoints-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_local_endpoints_table_memory(self, value):
@@ -627,7 +695,31 @@ class VppConfigGenerator:
:param value: Size of local endpoints table memory.
:type value: str
"""
- path = [u"session", u"local-endpoints-table-memory"]
+ path = ["session", "local-endpoints-table-memory"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_session_use_dma(self):
+ """Add session use-dma configuration."""
+ path = [u"session", u"use-dma"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
+ def add_dma_dev(self, devices):
+ """Add DMA devices configuration.
+
+ :param devices: DMA devices or work queues.
+ :type devices: list
+ """
+ for device in devices:
+ path = ["dsa", f"dev {device}"]
+ self.add_config_item(self._nodeconfig, "", path)
+
+ def add_logging_default_syslog_log_level(self, value="debug"):
+ """Add default logging level for syslog.
+
+ :param value: Log level.
+ :type value: str
+ """
+ path = ["logging", "default-syslog-log-level"]
self.add_config_item(self._nodeconfig, value, path)
def write_config(self, filename=None):
@@ -644,15 +736,9 @@ class VppConfigGenerator:
if filename is None:
filename = self._vpp_startup_conf
- if self._vpp_startup_conf_backup is not None:
- cmd = f"cp {self._vpp_startup_conf} {self._vpp_startup_conf_backup}"
- exec_cmd_no_error(
- self._node, cmd, sudo=True, message=u"Copy config file failed!"
- )
-
cmd = f"echo \"{self._vpp_config}\" | sudo tee {filename}"
exec_cmd_no_error(
- self._node, cmd, message=u"Writing config file failed!"
+ self._node, cmd, message="Writing config file failed!"
)
def apply_config(self, filename=None, verify_vpp=True):
@@ -672,9 +758,39 @@ class VppConfigGenerator:
if verify_vpp:
VPPUtil.verify_vpp(self._node)
- def restore_config(self):
- """Restore VPP startup.conf from backup."""
- cmd = f"cp {self._vpp_startup_conf_backup} {self._vpp_startup_conf}"
- exec_cmd_no_error(
- self._node, cmd, sudo=True, message=u"Copy config file failed!"
- )
+
+class VppInitConfig:
+ """VPP Initial Configuration."""
+ @staticmethod
+ def init_vpp_startup_configuration_on_all_duts(nodes):
+ """Apply initial VPP startup configuration on all DUTs.
+
+ :param nodes: Nodes in the topology.
+ :type nodes: dict
+ """
+ huge_size = Constants.DEFAULT_HUGEPAGE_SIZE
+ for node in nodes.values():
+ if node["type"] == NodeType.DUT:
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(node)
+ vpp_config.add_unix_log()
+ vpp_config.add_unix_cli_listen()
+ vpp_config.add_unix_cli_no_pager()
+ vpp_config.add_unix_gid()
+ vpp_config.add_unix_coredump()
+ vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+ vpp_config.add_main_heap_size("2G")
+ vpp_config.add_main_heap_page_size(huge_size)
+ vpp_config.add_default_hugepage_size(huge_size)
+ vpp_config.add_statseg_size("2G")
+ vpp_config.add_statseg_page_size(huge_size)
+ vpp_config.add_statseg_per_node_counters("on")
+ vpp_config.add_plugin("disable", "default")
+ vpp_config.add_plugin("enable", "dpdk_plugin.so")
+ vpp_config.add_dpdk_dev(
+ *[node["interfaces"][interface].get("pci_address") \
+ for interface in node["interfaces"]]
+ )
+ vpp_config.add_ip6_hash_buckets(2000000)
+ vpp_config.add_ip6_heap_size("4G")
+ vpp_config.apply_config()
diff --git a/resources/libraries/python/VppCounters.py b/resources/libraries/python/VppCounters.py
index e9b607b4f1..6bd0aea4bf 100644
--- a/resources/libraries/python/VppCounters.py
+++ b/resources/libraries/python/VppCounters.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/WireGuardUtil.py b/resources/libraries/python/WireGuardUtil.py
new file mode 100644
index 0000000000..6e6237e7e7
--- /dev/null
+++ b/resources/libraries/python/WireGuardUtil.py
@@ -0,0 +1,298 @@
+# Copyright (c) 2022 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""WireGuard utilities library."""
+
+from ipaddress import ip_address
+from cryptography.hazmat.primitives.serialization import Encoding, \
+ PrivateFormat, PublicFormat, NoEncryption
+from cryptography.hazmat.primitives.asymmetric.x25519 import \
+ X25519PrivateKey
+
+from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.IPUtil import IPUtil
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+
+class WireGuardUtil:
+ """This class defines the methods to set WireGuard."""
+
+ @staticmethod
+ def public_key_bytes(k):
+ """Return the public key as byte.
+
+ :param k: Generated public key.
+ :type: x25519._X25519PublicKey object
+ :returns: Public key.
+ :rtype: bytes
+ """
+ return k.public_bytes(Encoding.Raw, PublicFormat.Raw)
+
+ @staticmethod
+ def private_key_bytes(k):
+ """Return the private key as byte.
+
+ :param k: Generated private key.
+ :type: x25519._X25519PrivateKey object
+ :returns: Private key.
+ :rtype: bytes
+ """
+ return k.private_bytes(Encoding.Raw, PrivateFormat.Raw, NoEncryption())
+
+ @staticmethod
+ def generate_wireguard_privatekey_and_pubkey():
+ """Generate a pair of WireGuard Private key and Public key.
+
+ :returns: A pair of privatekey and publickey
+ :rtype: x25519._X25519PublicKey object
+ """
+ privatekey = X25519PrivateKey.generate()
+ pubkey = privatekey.public_key()
+ private_key = WireGuardUtil.private_key_bytes(privatekey)
+ public_key = WireGuardUtil.public_key_bytes(pubkey)
+ return private_key, public_key
+
+ @staticmethod
+ def vpp_wireguard_create_interface(
+ node, listen_port, wg_src, private_key):
+ """Create WireGuard interface.
+
+ :param node: VPP node to add config on.
+ :param listen_port: WireGuard interface listen port.
+ :param wg_src: WireGuard source IPv4.
+ :param private_key: WireGuard interface private key
+ :type node: dict
+ :type listen_port: int
+ :type wg_src: str
+ :type private_key: bytes
+ :returns: Wireguard interface sw_if_index.
+ :rtype: int
+ """
+ cmd = u"wireguard_interface_create"
+ err_msg = f"Failed to create wireguard interface" \
+ f"on host {node[u'host']}"
+ src_ip = ip_address(wg_src)
+ args = dict(
+ interface=dict(
+ port=int(listen_port),
+ src_ip=src_ip,
+ private_key=private_key,
+ generate_key=False
+ )
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ wg_sw_index = \
+ papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
+ return wg_sw_index
+
+ @staticmethod
+ def vpp_wireguard_add_peer(
+ node, interface, peer_pubkey, endpoint_ip,
+ allowed_ips, n_allowed_ips, dst_port, keepalive_time):
+ """Add a peer for WireGuard interface.
+
+ :param node: VPP node to add config on.
+ :param interface: WireGuard interface sw_if_index.
+ :param peer_pubkey: Public key of wireguard interface peer.
+ :param endpoint_ip: Peer source IPv4.
+ :param allowed_ips: WireGuard interface allowed ips list.
+ :param n_allowed_ips: Number of allowed ips.
+ :param dst_port: WireGuard destination port.
+ :param keepaliva time: WireGuard persistent keepalive time.
+ :type node: dict
+ :type interface: int
+ :type peer_pubkey: bytes
+ :type endpoint_ip: str
+ :type allowed_ips: list
+ :type n_allowed_ips: int
+ :type dst_port: int
+ :type keepalive_time: int
+ """
+ endpoint_ip = ip_address(endpoint_ip)
+ cmd = u"wireguard_peer_add"
+ err_msg = f"Failed to add peer of wireguard interface" \
+ f"{interface} on host {node[u'host']}"
+ args = dict(
+ peer=dict(
+ public_key=peer_pubkey,
+ port=int(dst_port),
+ endpoint=endpoint_ip,
+ sw_if_index=interface,
+ persistent_keepalive=int(keepalive_time),
+ n_allowed_ips=int(n_allowed_ips),
+ allowed_ips=allowed_ips
+ )
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def vpp_wireguard_set_async_mode(node, async_enable=1):
+ """Set wireguard async mode on or off.
+
+ :param node: VPP node to set wireguard async mode.
+ :param async_enable: Async mode on or off.
+ :type node: dict
+ :type async_enable: int
+ """
+ cmd = u"wg_set_async_mode"
+ err_msg = f"Failed to set wireguard async mode on host {node[u'host']}"
+ args = dict(
+ async_enable=async_enable
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
+ def _wireguard_create_tunnel_interface_on_dut(
+ node, if1_key, if2_mac_addr, src_ip, peer_endpoint_ip,
+ peer_allowed_ips, peer_n_allowed_ips, dut_wg_ip, port,
+ keepalive_time, dut_private_key, peer_pubkey):
+ """Create WireGuard tunnel interface on one DUT node using PAPI.
+
+ :param node: VPP node as DUT to create tunnel interface.
+ :param if1_key: VPP node as DUT interface key from topology file.
+ :param if2_mac_addr: Vpp node on the other end/ TG node
+ (in case of 2-node topology) interface mac address.
+ :param src_ip: WireGuard source IPv4 address.
+ :param peer_endpoint_ip: Peer source IPv4 address.
+ :param peer_allowed_ips: WireGuard peer interface allowed ip list.
+ :param peer_n_allowed ips: Number of peer allowed ips.
+ :param dut_wg_ip: WireGuard interface ip address on DUT.
+ :param port: WireGuard interface listen port or
+ Peer interface destination port.
+ :param keepalive_time: WireGuard persistent keepalive time.
+ :param dut_private_key: WireGuard interface private key of DUT.
+ :param peer_pubkey: WireGuard Peer interface public key.
+ :type nodes: dict
+ :type if1_key: str
+ :type if2_mac_addr: str
+ :type src_ip: str
+ :type peer_endpoint_ip: str
+ :type peer_allowed_ips: list
+ :type peer_n_allowed_ips: int
+ :type dut_wg_ip: str
+ :type port: int
+ :type keepalive_time: int
+ :type dut_private_key: bytes
+ :type peer_pubkey: bytes
+ """
+ #Set IP address on VPP node interface
+ IPUtil.vpp_interface_set_ip_address(node, if1_key, src_ip, 24)
+ IPUtil.vpp_add_ip_neighbor(
+ node, if1_key, peer_endpoint_ip, if2_mac_addr
+ )
+ #Create Wireguard interface on DUT
+ dut_wg_sw_index = WireGuardUtil.vpp_wireguard_create_interface(
+ node, port, src_ip, dut_private_key
+ )
+ #Add wireguard peer
+ WireGuardUtil.vpp_wireguard_add_peer(
+ node, dut_wg_sw_index, peer_pubkey, peer_endpoint_ip,
+ peer_allowed_ips, peer_n_allowed_ips, port, keepalive_time
+ )
+ #Set wireguard interface up
+ InterfaceUtil.set_interface_state(node, dut_wg_sw_index, state=u'up')
+ #Set wireguard interface IP address
+ cmd = u'sw_interface_add_del_address'
+ args = dict(
+ sw_if_index=dut_wg_sw_index,
+ is_add=True,
+ del_all=False,
+ prefix=IPUtil.create_prefix_object(ip_address(dut_wg_ip), 24)
+ )
+ err_msg = f"Failed to set IP address on wg interface " \
+ f"on host {node[u'host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+ #Set route on VPP node as DUT wg interface
+ for allowed_ip in peer_allowed_ips:
+ traffic_addr = ip_address(
+ allowed_ip[u'address'][u'un'][u'ip4']
+ )
+ prefix_len = allowed_ip[u'len']
+ IPUtil.vpp_route_add(
+ node, traffic_addr, prefix_len,
+ gateway=(traffic_addr+1).compressed,
+ interface=dut_wg_sw_index
+ )
+
+ @staticmethod
+ def vpp_wireguard_create_tunnel_interfaces_on_duts(
+ nodes, if1_key, if2_key, if1_ip_addr, if2_ip_addr,
+ if1_mac_addr, if2_mac_addr, wg_if1_ip_addr, wg_if2_ip_addr,
+ n_tunnels, port, keepalive_time, raddr_ip1, raddr_ip2):
+ """Create WireGuard tunnel interfaces between two VPP nodes.
+
+ :param nodes: VPP nodes to create tunnel interfaces.
+ :param if1_key: VPP node 1 interface key from topology file.
+ :param if2_key: VPP node 2 / TG node (in case of 2-node topology)
+ :param if1_ip_addr: VPP node 1 interface IPv4/IPv6 address.
+ :param if2_ip_addr: VPP node 2 / TG node
+ (in case of 2-node topology) interface IPv4/IPv6 address.
+ :param if1_mac_addr: VPP node1 interface mac address.
+ :param if2_mac_addr: VPP node2 interface mac address.
+ :param wg_if1_ip_addr: VPP node 1 WireGuard interface IPv4 address.
+ :param wg_if2_ip_addr: VPP node 2 WireGuard interface IPv4 address.
+ :param n_tunnels: Number of wireguard tunnels.
+ :param port: WireGuard interface listen port or
+ Peer interface destination port.
+ :param keepalive_time: WireGuard persistent keepalive time.
+ :param raddr_ip1: Policy selector remote IPv4/IPv6 start address
+ for the first tunnel in direction node1->node2.
+ :param raddr_ip2: Policy selector remote IPv4/IPv6 start address
+ for the first tunnel in direction node2->node1.
+ :type nodes: dict
+ :type if1_key: str
+ :type if2_key: str
+ :type if1_ip_addr: str
+ :type if2_ip_addr: str
+ :type if1_mac_addr: str
+ :type if2_mac_addr: str
+ :type wg_if1_ip_addr: str
+ :type wg_if2_ip_addr: str
+ :type n_tunnels: int
+ :type port: int
+ :type keepalive_time: int
+ :type raddr_ip1: str
+ :type raddr_ip2: str
+ """
+ for i in range(n_tunnels):
+ if1_ipaddr = str(ip_address(if1_ip_addr) + i*256)
+ if2_ipaddr = str(ip_address(if2_ip_addr) + i*256)
+ wg_if1_ipaddr = str(ip_address(wg_if1_ip_addr) + i*256)
+ wg_if2_ipaddr = str(ip_address(wg_if2_ip_addr) + i*256)
+
+ allowed_ipaddr1 = ip_address(raddr_ip1) + i*256
+ allowed_ipaddr2 = ip_address(raddr_ip2) + i*256
+ dut1_allowed_ips = \
+ [IPUtil.create_prefix_object(allowed_ipaddr2, 24),]
+ dut2_allowed_ips = \
+ [IPUtil.create_prefix_object(allowed_ipaddr1, 24),]
+
+ dut1_privatekey, dut1_pubkey = \
+ WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
+ dut2_privatekey, dut2_pubkey = \
+ WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
+
+ #Configure WireGuard interface on DUT1
+ WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
+ nodes[u'DUT1'], if1_key, if2_mac_addr, if1_ipaddr, if2_ipaddr,
+ dut1_allowed_ips, 1, wg_if1_ipaddr, port,
+ keepalive_time, dut1_privatekey, dut2_pubkey
+ )
+ #Configure WireGuard interface on DUT2
+ WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
+ nodes[u'DUT2'], if2_key, if1_mac_addr, if2_ipaddr, if1_ipaddr,
+ dut2_allowed_ips, 1, wg_if2_ipaddr, port,
+ keepalive_time, dut2_privatekey, dut1_pubkey
+ )
diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py
index dd2672bd7c..8d593fecca 100644
--- a/resources/libraries/python/autogen/Regenerator.py
+++ b/resources/libraries/python/autogen/Regenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,6 +17,7 @@ TODO: How can we check each suite id is unique,
when currently the suite generation is run on each directory separately?
"""
+import copy
import sys
from glob import glob
@@ -85,7 +86,7 @@ def get_iface_and_suite_ids(filename):
# It was something like "2n1l", we need one more split.
dash_split = dash_split[1].split(u"-", 1)
nic_code = dash_split[0]
- suite_id = dash_split[1].split(u".", 1)[0]
+ suite_id = dash_split[1].split(u".robot", 1)[0]
suite_tag = suite_id.rsplit(u"-", 1)[0]
for prefix in Constants.FORBIDDEN_SUITE_PREFIX_LIST:
if suite_tag.startswith(prefix):
@@ -116,50 +117,75 @@ def check_suite_tag(suite_tag, prolog):
raise ValueError(f"Suite tag found {found} times for {suite_tag}")
-def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list):
+def filter_and_edit_kwargs_for_astf(suite_id, kwargs):
+ """Return possibly edited kwargs, or None if to be skipped.
+
+ This is a code block used in few places.
+ Kwargs is (a copy of) one item from tc_kwargs_list.
+ Currently, the editable field is frame_size,
+ to be increased to for tests with data (not just CPS).
+
+ :param suite_id: Suite ID.
+ :param kwargs: Key-value pairs used to construct one testcase.
+ :type suite_id: str
+ :type tc_kwargs_list: dict
+ :returns: Edited kwargs.
+ :rtype Optional[dict]
+ """
+ if u"-cps-" in suite_id:
+ # Contrary to UDP, there is no place to affect frame size
+ # in TCP CPS tests. Actual frames are close to min size.
+ # UDP uses the min value too, for fairer comparison to TCP.
+ if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
+ return None
+ elif (u"-pps-" in suite_id or u"-tput-" in suite_id):
+ if u"imix" in str(kwargs[u"frame_size"]).lower():
+ # ASTF does not support IMIX (yet).
+ return None
+ if kwargs[u"frame_size"] in MIN_FRAME_SIZE_VALUES:
+ # Minimal (TRex) TCP data frame is 80B for IPv4.
+ # In future, we may want to have also IPv6 TCP.
+ # UDP uses the same value, for fairer comparison to TCP.
+ kwargs[u"frame_size"] = 100
+ return kwargs
+
+
+def add_default_testcases(
+ testcase, nic_code, suite_id, file_out, tc_kwargs_list):
"""Add default testcases to file.
:param testcase: Testcase class.
- :param iface: Interface.
+ :param nic_code: NIC code.
:param suite_id: Suite ID.
:param file_out: File to write testcases to.
:param tc_kwargs_list: Key-value pairs used to construct testcases.
:type testcase: Testcase
- :type iface: str
+ :type nic_code: str
:type suite_id: str
:type file_out: file
:type tc_kwargs_list: dict
"""
- for kwargs in tc_kwargs_list:
+ for kwas in tc_kwargs_list:
+ # We may edit framesize for ASTF, the copy should be local.
+ kwargs = copy.deepcopy(kwas)
# TODO: Is there a better way to disable some combinations?
emit = True
- if kwargs[u"frame_size"] == 9000:
- if u"vic1227" in iface:
- # Not supported in HW.
- emit = False
- if u"vic1385" in iface:
- # Not supported in HW.
- emit = False
- if u"-16vm2t-" in suite_id or u"-16dcr2t-" in suite_id:
- if kwargs[u"phy_cores"] > 3:
- # CSIT lab only has 28 (physical) core processors,
- # so these test would fail when attempting to assign cores.
- emit = False
- if u"-24vm1t-" in suite_id or u"-24dcr1t-" in suite_id:
- if kwargs[u"phy_cores"] > 3:
- # CSIT lab only has 28 (physical) core processors,
- # so these test would fail when attempting to assign cores.
- emit = False
+ core_scale = Constants.NIC_CODE_TO_CORESCALE[nic_code]
if u"soak" in suite_id:
# Soak test take too long, do not risk other than tc01.
if kwargs[u"phy_cores"] != 1:
emit = False
- if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
- emit = False
- if u"-cps-" in suite_id or u"-pps-" in suite_id:
- if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
- emit = False
- if emit:
+ if u"reassembly" in suite_id:
+ if kwargs[u"frame_size"] != 1518:
+ emit = False
+ else:
+ if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
+ emit = False
+
+ kwargs.update({'phy_cores': kwas['phy_cores']*core_scale})
+
+ kwargs = filter_and_edit_kwargs_for_astf(suite_id, kwargs)
+ if emit and kwargs is not None:
file_out.write(testcase.generate(**kwargs))
@@ -191,6 +217,26 @@ def add_iperf3_testcases(testcase, file_out, tc_kwargs_list):
file_out.write(testcase.generate(**kwargs))
+def add_trex_testcases(testcase, suite_id, file_out, tc_kwargs_list):
+ """Add trex testcases to file.
+
+ :param testcase: Testcase class.
+ :param suite_id: Suite ID.
+ :param file_out: File to write testcases to.
+ :param tc_kwargs_list: Key-value pairs used to construct testcases.
+ :type testcase: Testcase
+ :type suite_id: str
+ :type file_out: file
+ :type tc_kwargs_list: dict
+ """
+ for kwas in tc_kwargs_list:
+ # We may edit framesize for ASTF, the copy should be local.
+ kwargs = copy.deepcopy(kwas)
+ kwargs = filter_and_edit_kwargs_for_astf(suite_id, kwargs)
+ if kwargs is not None:
+ file_out.write(testcase.generate(**kwargs))
+
+
def write_default_files(in_filename, in_prolog, kwargs_list):
"""Using given filename and prolog, write all generated suites.
@@ -203,75 +249,75 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
"""
for suite_type in Constants.PERF_TYPE_TO_KEYWORD:
tmp_filename = replace_defensively(
- in_filename, u"ndrpdr", suite_type, 1,
- u"File name should contain suite type once.", in_filename
+ in_filename, "ndrpdr", suite_type, 1,
+ "File name should contain suite type once.", in_filename
)
tmp_prolog = replace_defensively(
- in_prolog, u"ndrpdr".upper(), suite_type.upper(), 1,
- u"Suite type should appear once in uppercase (as tag).",
+ in_prolog, "ndrpdr".upper(), suite_type.upper(), 1,
+ "Suite type should appear once in uppercase (as tag).",
in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- u"Find NDR and PDR intervals using optimized search",
+ "Find NDR and PDR intervals using optimized search",
Constants.PERF_TYPE_TO_KEYWORD[suite_type], 1,
- u"Main search keyword should appear once in suite.",
+ "Main search keyword should appear once in suite.",
in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_SUITE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_SUITE_DOC_VER["ndrpdr"],
Constants.PERF_TYPE_TO_SUITE_DOC_VER[suite_type],
- 1, u"Exact suite type doc not found.", in_filename
+ 1, "Exact suite type doc not found.", in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER["ndrpdr"],
Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[suite_type],
- 1, u"Exact template type doc not found.", in_filename
+ 1, "Exact template type doc not found.", in_filename
)
_, suite_id, _ = get_iface_and_suite_ids(tmp_filename)
testcase = Testcase.default(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp2_filename = replace_defensively(
- tmp_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
- u"File name should contain NIC code once.", in_filename
+ tmp_filename, "10ge2p1x710", nic_code, 1,
+ "File name should contain NIC code once.", in_filename
)
tmp2_prolog = replace_defensively(
- tmp_prolog, u"Intel-X710", nic_name, 2,
- u"NIC name should appear twice (tag and variable).",
+ tmp_prolog, "Intel-X710", nic_name, 2,
+ "NIC name should appear twice (tag and variable).",
in_filename
)
- if tmp2_prolog.count(u"HW_") == 2:
+ if tmp2_prolog.count("HW_") == 2:
# TODO CSIT-1481: Crypto HW should be read
# from topology file instead.
if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW:
tmp2_prolog = replace_defensively(
- tmp2_prolog, u"HW_DH895xcc",
+ tmp2_prolog, "HW_DH895xcc",
Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1,
- u"HW crypto name should appear.", in_filename
+ "HW crypto name should appear.", in_filename
)
iface, old_suite_id, old_suite_tag = get_iface_and_suite_ids(
tmp2_filename
)
- if u"DPDK" in in_prolog:
+ if "DPDK" in in_prolog:
for driver in Constants.DPDK_NIC_NAME_TO_DRIVER[nic_name]:
out_filename = replace_defensively(
tmp2_filename, old_suite_id,
Constants.DPDK_NIC_DRIVER_TO_SUITE_PREFIX[driver] \
+ old_suite_id,
- 1, u"Error adding driver prefix.", in_filename
+ 1, "Error adding driver prefix.", in_filename
)
out_prolog = replace_defensively(
- tmp2_prolog, u"vfio-pci", driver, 1,
- u"Driver name should appear once.", in_filename
+ tmp2_prolog, "vfio-pci", driver, 1,
+ "Driver name should appear once.", in_filename
)
out_prolog = replace_defensively(
out_prolog,
- Constants.DPDK_NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ Constants.DPDK_NIC_DRIVER_TO_TAG["vfio-pci"],
Constants.DPDK_NIC_DRIVER_TO_TAG[driver], 1,
- u"Driver tag should appear once.", in_filename
+ "Driver tag should appear once.", in_filename
)
iface, suite_id, suite_tag = get_iface_and_suite_ids(
out_filename
@@ -286,36 +332,41 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
check_suite_tag(suite_tag, out_prolog)
# TODO: Reorder loops so suite_id is finalized sooner.
testcase = Testcase.default(suite_id)
- with open(out_filename, u"wt") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list
+ testcase, nic_code, suite_id, file_out, kwargs_list
)
continue
for driver in Constants.NIC_NAME_TO_DRIVER[nic_name]:
out_filename = replace_defensively(
tmp2_filename, old_suite_id,
Constants.NIC_DRIVER_TO_SUITE_PREFIX[driver] + old_suite_id,
- 1, u"Error adding driver prefix.", in_filename
+ 1, "Error adding driver prefix.", in_filename
)
out_prolog = replace_defensively(
- tmp2_prolog, u"vfio-pci", driver, 1,
- u"Driver name should appear once.", in_filename
+ tmp2_prolog, "vfio-pci", driver, 1,
+ "Driver name should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_TAG["vfio-pci"],
Constants.NIC_DRIVER_TO_TAG[driver], 1,
- u"Driver tag should appear once.", in_filename
+ "Driver tag should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_PLUGINS[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_PLUGINS["vfio-pci"],
Constants.NIC_DRIVER_TO_PLUGINS[driver], 1,
- u"Driver plugin should appear once.", in_filename
+ "Driver plugin should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_VFS[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_VFS["vfio-pci"],
Constants.NIC_DRIVER_TO_VFS[driver], 1,
- u"NIC VFs argument should appear once.", in_filename
+ "NIC VFs argument should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
)
iface, suite_id, suite_tag = get_iface_and_suite_ids(
out_filename
@@ -330,10 +381,10 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
check_suite_tag(suite_tag, out_prolog)
# TODO: Reorder loops so suite_id is finalized sooner.
testcase = Testcase.default(suite_id)
- with open(out_filename, u"wt") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list
+ testcase, nic_code, suite_id, file_out, kwargs_list
)
@@ -353,10 +404,10 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list):
"""
_, suite_id, _ = get_iface_and_suite_ids(in_filename)
testcase = Testcase.default(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ in_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
tmp_prolog = replace_defensively(
@@ -401,6 +452,11 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list):
Constants.NIC_DRIVER_TO_VFS[driver], 1,
u"NIC VFs argument should appear once.", in_filename
)
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
iface, suite_id, suite_tag = get_iface_and_suite_ids(out_filename)
out_prolog = replace_defensively(
out_prolog, old_suite_tag, suite_tag, 1,
@@ -419,8 +475,6 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list):
def write_tcp_files(in_filename, in_prolog, kwargs_list):
"""Using given filename and prolog, write all generated tcp suites.
- TODO: Suport drivers.
-
:param in_filename: Template filename to derive real filenames from.
:param in_prolog: Template content to derive real content from.
:param kwargs_list: List of kwargs for add_default_testcase.
@@ -431,10 +485,78 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
# TODO: Generate rps from cps? There are subtle differences.
_, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
testcase = Testcase.tcp(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
+ tmp_filename = replace_defensively(
+ in_filename, u"10ge2p1x710", nic_code, 1,
+ u"File name should contain NIC code once.", in_filename
+ )
+ tmp_prolog = replace_defensively(
+ in_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ iface, old_suite_id, old_suite_tag = get_iface_and_suite_ids(
+ tmp_filename
+ )
+ for driver in Constants.NIC_NAME_TO_DRIVER[nic_name]:
+ out_filename = replace_defensively(
+ tmp_filename, old_suite_id,
+ Constants.NIC_DRIVER_TO_SUITE_PREFIX[driver] + old_suite_id,
+ 1, u"Error adding driver prefix.", in_filename
+ )
+ out_prolog = replace_defensively(
+ tmp_prolog, u"vfio-pci", driver, 1,
+ u"Driver name should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_TAG[driver], 1,
+ u"Driver tag should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_PLUGINS[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_PLUGINS[driver], 1,
+ u"Driver plugin should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_VFS[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_VFS[driver], 1,
+ u"NIC VFs argument should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
+ iface, suite_id, suite_tag = get_iface_and_suite_ids(out_filename)
+ out_prolog = replace_defensively(
+ out_prolog, old_suite_tag, suite_tag, 1,
+ u"Perf suite tag should appear once.", in_filename
+ )
+ check_suite_tag(suite_tag, out_prolog)
+ testcase = Testcase.tcp(suite_id)
+ with open(out_filename, u"wt") as file_out:
+ file_out.write(out_prolog)
+ add_tcp_testcases(testcase, file_out, kwargs_list)
+
+
+def write_iperf3_files(in_filename, in_prolog, kwargs_list):
+ """Using given filename and prolog, write all generated iperf3 suites.
+
+ :param in_filename: Template filename to derive real filenames from.
+ :param in_prolog: Template content to derive real content from.
+ :param kwargs_list: List of kwargs for add_default_testcase.
+ :type in_filename: str
+ :type in_prolog: str
+ :type kwargs_list: list of dict
+ """
+ _, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
+ testcase = Testcase.iperf3(suite_id)
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
out_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ in_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
out_prolog = replace_defensively(
@@ -445,11 +567,69 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
check_suite_tag(suite_tag, out_prolog)
with open(out_filename, u"wt") as file_out:
file_out.write(out_prolog)
- add_tcp_testcases(testcase, file_out, kwargs_list)
+ add_iperf3_testcases(testcase, file_out, kwargs_list)
-def write_iperf3_files(in_filename, in_prolog, kwargs_list):
- """Using given filename and prolog, write all generated iperf3 suites.
+def write_trex_files(in_filename, in_prolog, kwargs_list):
+ """Using given filename and prolog, write all generated trex suites.
+
+ :param in_filename: Template filename to derive real filenames from.
+ :param in_prolog: Template content to derive real content from.
+ :param kwargs_list: List of kwargs for add_trex_testcase.
+ :type in_filename: str
+ :type in_prolog: str
+ :type kwargs_list: list of dict
+ """
+ for suite_type in Constants.PERF_TYPE_TO_KEYWORD:
+ tmp_filename = replace_defensively(
+ in_filename, u"ndrpdr", suite_type, 1,
+ u"File name should contain suite type once.", in_filename
+ )
+ tmp_prolog = replace_defensively(
+ in_prolog, u"ndrpdr".upper(), suite_type.upper(), 1,
+ u"Suite type should appear once in uppercase (as tag).",
+ in_filename
+ )
+ tmp_prolog = replace_defensively(
+ tmp_prolog,
+ u"Find NDR and PDR intervals using optimized search",
+ Constants.PERF_TYPE_TO_KEYWORD[suite_type], 1,
+ u"Main search keyword should appear once in suite.",
+ in_filename
+ )
+ tmp_prolog = replace_defensively(
+ tmp_prolog,
+ Constants.PERF_TYPE_TO_SUITE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_SUITE_DOC_VER[suite_type],
+ 1, u"Exact suite type doc not found.", in_filename
+ )
+ tmp_prolog = replace_defensively(
+ tmp_prolog,
+ Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[suite_type],
+ 1, u"Exact template type doc not found.", in_filename
+ )
+ _, suite_id, suite_tag = get_iface_and_suite_ids(tmp_filename)
+ testcase = Testcase.trex(suite_id)
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
+ out_filename = replace_defensively(
+ tmp_filename, u"10ge2p1x710", nic_code, 1,
+ u"File name should contain NIC code once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ tmp_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ check_suite_tag(suite_tag, out_prolog)
+ with open(out_filename, u"wt") as file_out:
+ file_out.write(out_prolog)
+ add_trex_testcases(testcase, suite_id, file_out, kwargs_list)
+
+
+def write_device_files(in_filename, in_prolog, kwargs_list):
+ """Using given filename and prolog, write all generated suites.
:param in_filename: Template filename to derive real filenames from.
:param in_prolog: Template content to derive real content from.
@@ -458,22 +638,68 @@ def write_iperf3_files(in_filename, in_prolog, kwargs_list):
:type in_prolog: str
:type kwargs_list: list of dict
"""
- _, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
- testcase = Testcase.iperf3(suite_id)
- out_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[u"Intel-X710"], 1,
- u"File name should contain NIC code once.", in_filename
- )
- out_prolog = replace_defensively(
- in_prolog, u"Intel-X710", u"Intel-X710", 2,
- u"NIC name should appear twice (tag and variable).",
- in_filename
- )
- check_suite_tag(suite_tag, out_prolog)
- with open(out_filename, u"wt") as file_out:
- file_out.write(out_prolog)
- add_iperf3_testcases(testcase, file_out, kwargs_list)
+ for suite_type in Constants.DEVICE_TYPE_TO_KEYWORD:
+ tmp_filename = replace_defensively(
+ in_filename, u"scapy", suite_type, 1,
+ u"File name should contain suite type once.", in_filename
+ )
+ _, suite_id, _ = get_iface_and_suite_ids(tmp_filename)
+ testcase = Testcase.default(suite_id)
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
+ tmp2_filename = replace_defensively(
+ tmp_filename, u"10ge2p1x710", nic_code, 1,
+ u"File name should contain NIC code once.", in_filename
+ )
+ tmp2_prolog = replace_defensively(
+ in_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ iface, old_suite_id, _ = get_iface_and_suite_ids(
+ tmp2_filename
+ )
+ for driver in Constants.NIC_NAME_TO_DRIVER[nic_name]:
+ out_filename = replace_defensively(
+ tmp2_filename, old_suite_id,
+ Constants.NIC_DRIVER_TO_SUITE_PREFIX[driver] + old_suite_id,
+ 1, u"Error adding driver prefix.", in_filename
+ )
+ out_prolog = replace_defensively(
+ tmp2_prolog, u"vfio-pci", driver, 1,
+ u"Driver name should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_TAG[driver], 1,
+ u"Driver tag should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_PLUGINS[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_PLUGINS[driver], 1,
+ u"Driver plugin should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_DRIVER_TO_VFS[u"vfio-pci"],
+ Constants.NIC_DRIVER_TO_VFS[driver], 1,
+ u"NIC VFs argument should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
+ iface, suite_id, suite_tag = get_iface_and_suite_ids(
+ out_filename
+ )
+ check_suite_tag(suite_tag, out_prolog)
+ # TODO: Reorder loops so suite_id is finalized sooner.
+ testcase = Testcase.default(suite_id)
+ with open(out_filename, u"wt") as file_out:
+ file_out.write(out_prolog)
+ add_default_testcases(
+ testcase, iface, suite_id, file_out, kwargs_list
+ )
class Regenerator:
@@ -497,7 +723,7 @@ class Regenerator:
Log-like prints are emitted to sys.stderr.
- :param pattern: Glob pattern to select files. Example: *-ndrpdr.robot
+ :param pattern: Glob pattern to select files. Example: \*-ndrpdr.robot
:param protocol: String determining minimal frame size. Default: "ip4"
:type pattern: str
:type protocol: str
@@ -532,6 +758,44 @@ class Regenerator:
{u"frame_size": 128000, u"phy_cores": 2},
{u"frame_size": 128000, u"phy_cores": 4}
]
+ # List for tests with one dataplane core
+ # (and variable number of other cores).
+ dp1_kwargs_list = [
+ {u"frame_size": min_frame_size, u"phy_cores": 2},
+ {u"frame_size": min_frame_size, u"phy_cores": 3},
+ {u"frame_size": min_frame_size, u"phy_cores": 4},
+ {u"frame_size": 1518, u"phy_cores": 2},
+ {u"frame_size": 1518, u"phy_cores": 3},
+ {u"frame_size": 1518, u"phy_cores": 4},
+ {u"frame_size": 9000, u"phy_cores": 2},
+ {u"frame_size": 9000, u"phy_cores": 3},
+ {u"frame_size": 9000, u"phy_cores": 4},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 2},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 3},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 4}
+ ]
+
+ http_kwargs_list = [
+ {u"frame_size": 0, u"phy_cores": 1},
+ {u"frame_size": 0, u"phy_cores": 2},
+ {u"frame_size": 64, u"phy_cores": 1},
+ {u"frame_size": 64, u"phy_cores": 2},
+ {u"frame_size": 1024, u"phy_cores": 1},
+ {u"frame_size": 1024, u"phy_cores": 2},
+ {u"frame_size": 2048, u"phy_cores": 1},
+ {u"frame_size": 2048, u"phy_cores": 2}
+ ]
+
+ device_kwargs_list = [
+ {u"frame_size": min_frame_size, u"phy_cores": 0}
+ ]
+
+ trex_kwargs_list = [
+ {u"frame_size": min_frame_size},
+ {u"frame_size": 1518},
+ {u"frame_size": 9000},
+ {u"frame_size": u"IMIX_v4_1"}
+ ]
for in_filename in glob(pattern):
if not self.quiet:
@@ -552,17 +816,32 @@ class Regenerator:
in_prolog = u"".join(
file_in.read().partition(u"*** Test Cases ***")[:-1]
)
+ if "-tg" in in_filename:
+ write_trex_files(in_filename, in_prolog, trex_kwargs_list)
+ continue
if in_filename.endswith(u"-ndrpdr.robot"):
- write_default_files(in_filename, in_prolog, default_kwargs_list)
+ if u"scheduler" in in_filename:
+ write_default_files(
+ in_filename, in_prolog, dp1_kwargs_list
+ )
+ else:
+ write_default_files(
+ in_filename, in_prolog, default_kwargs_list
+ )
elif in_filename.endswith(u"-reconf.robot"):
write_reconf_files(in_filename, in_prolog, default_kwargs_list)
+ elif in_filename.endswith(u"-rps.robot") \
+ or in_filename.endswith(u"-cps.robot"):
+ write_tcp_files(in_filename, in_prolog, http_kwargs_list)
elif in_filename.endswith(u"-bps.robot"):
hoststack_kwargs_list = \
hs_quic_kwargs_list if u"quic" in in_filename \
else hs_bps_kwargs_list
write_tcp_files(in_filename, in_prolog, hoststack_kwargs_list)
- elif in_filename.endswith(u"-iperf3.robot"):
+ elif in_filename.endswith(u"-iperf3-mrr.robot"):
write_iperf3_files(in_filename, in_prolog, iperf3_kwargs_list)
+ elif in_filename.endswith(u"-scapy.robot"):
+ write_device_files(in_filename, in_prolog, device_kwargs_list)
else:
raise RuntimeError(
f"Error in {in_filename}: non-primary suite type found."
diff --git a/resources/libraries/python/autogen/Testcase.py b/resources/libraries/python/autogen/Testcase.py
index 173c5919af..32fc5014cc 100644
--- a/resources/libraries/python/autogen/Testcase.py
+++ b/resources/libraries/python/autogen/Testcase.py
@@ -33,16 +33,17 @@ class Testcase:
"""
self.template = Template(template_string)
- def generate(self, frame_size, phy_cores):
+ def generate(self, frame_size, phy_cores=None):
"""Return string of test case code with placeholders filled.
Fail if there are placeholders left unfilled.
It is not required for all placeholders to be present in template.
:param frame_size: Imix string or numeric frame size. Example: 74.
- :param phy_cores: Number of physical cores to use. Example: 2.
+ :param phy_cores: Number of physical cores to use. Example: 2. It can
+ be None in n2n testcases.
:type frame_size: str or int
- :type phy_cores: int or str
+ :type phy_cores: int, str or None
:returns: Filled template, usable as test case code.
:rtype: str
"""
@@ -57,6 +58,8 @@ class Testcase:
u"frame_num": str(frame_size),
u"frame_str": u"IMIX"
}
+ if phy_cores is None:
+ return self.template.substitute(subst_dict)
cores_str = str(phy_cores)
cores_num = int(cores_str)
subst_dict.update(
@@ -100,7 +103,14 @@ class Testcase:
# TODO: Choose a better frame size identifier for streamed protocols
# (TCP, QUIC, SCTP, ...) where DUT (not TG) decides frame size.
if u"tcphttp" in suite_id:
- template_string = f'''
+ if u"rps" or u"cps" in suite_id:
+ template_string = f'''
+| ${{frame_str}}-${{cores_str}}c-{suite_id}
+| | [Tags] | ${{frame_str}} | ${{cores_str}}C
+| | frame_size=${{frame_num}} | phy_cores=${{cores_num}}
+'''
+ else:
+ template_string = f'''
| IMIX-${{cores_str}}c-{suite_id}
| | [Tags] | ${{cores_str}}C
| | phy_cores=${{cores_num}}
@@ -129,3 +139,21 @@ class Testcase:
| | frame_size=${{frame_num}} | phy_cores=${{cores_num}}
'''
return cls(template_string)
+
+ @classmethod
+ def trex(cls, suite_id):
+ """Factory method for creating "trex" testcase objects.
+
+ Testcase name will contain frame size, but not core count.
+
+ :param suite_id: Part of suite name to distinguish from other suites.
+ :type suite_id: str
+ :returns: Instance for generating testcase text of this type.
+ :rtype: Testcase
+ """
+ template_string = f'''
+| ${{frame_str}}--{suite_id}
+| | [Tags] | ${{frame_str}}
+| | frame_size=${{frame_num}}
+'''
+ return cls(template_string)
diff --git a/resources/libraries/python/autogen/__init__.py b/resources/libraries/python/autogen/__init__.py
index dfb2255a09..939824903e 100644
--- a/resources/libraries/python/autogen/__init__.py
+++ b/resources/libraries/python/autogen/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/autogen/add_suite_tag.py b/resources/libraries/python/autogen/add_suite_tag.py
index 63bfa1a385..276e6b04a4 100755
--- a/resources/libraries/python/autogen/add_suite_tag.py
+++ b/resources/libraries/python/autogen/add_suite_tag.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/enum_util.py b/resources/libraries/python/enum_util.py
new file mode 100644
index 0000000000..41dfd8a459
--- /dev/null
+++ b/resources/libraries/python/enum_util.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for handling VPP API enum values from Robot."""
+
+
+from enum import Enum, IntEnum
+from typing import Type, Union
+
+
+# The return type is enum_class, but it is hard to explain that to pylint.
+def get_enum_instance(
+ enum_class: Type[Enum], value: Union[Enum, str, int, None]
+) -> Enum:
+ """Return an enum instance matching the string name.
+
+ In Robot, it is not convenient to construct Enum instances,
+ most values defined in Robot are strings.
+
+ This helper function can be used in Python L1 keywords
+ to convert string into the corresponding Enum instance.
+ Aliases are also recognized.
+
+ As an added benefit, support various Robot-like niceties,
+ like lower case, or dash or space instead of underscore.
+
+ As a common shortcut, value is returned it it already is an instance.
+
+ Another convenience: None or empty string is processed as "NONE".
+
+ If the class is a subclass of IntEnum, int values
+ and (string) values convertable to int are also accepted as input.
+
+ :param enum_class: Class object instance of which should be returned.
+ :param value: String or any other recognized form of an enum instance.
+ :type enum_class: Type[Enum]
+ :type value: Union[enum_class, str, int, None]
+ :returns: The matching instance, if found.
+ :rtype: enum_class
+ :raises: ValueError if no matching instance is found.
+ """
+ if issubclass(enum_class, IntEnum):
+ try:
+ int_value = int(value)
+ return enum_class(int_value)
+ except (TypeError, ValueError):
+ pass
+ if isinstance(value, enum_class):
+ return value
+ if not value:
+ value = "NONE"
+ normalized_name = str(value).upper().replace("-", "_").replace(" ", "_")
+ members = enum_class.__members__ # Includes aliases, useful for NONE.
+ if normalized_name not in members:
+ msg = f"Enum class {enum_class} does not have value {normalized_name!r}"
+ raise ValueError(msg)
+ return members[normalized_name]
diff --git a/resources/libraries/python/jumpavg/__init__.py b/resources/libraries/python/jumpavg/__init__.py
index cb8b3df43d..7f63b5ee39 100644
--- a/resources/libraries/python/jumpavg/__init__.py
+++ b/resources/libraries/python/jumpavg/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -15,8 +15,8 @@
__init__ file for "jumpavg" Python package.
"""
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingStats import BitCountingStats
-from .BitCountingGroup import BitCountingGroup
-from .BitCountingGroupList import BitCountingGroupList
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_stats import BitCountingStats
+from .bit_counting_group import BitCountingGroup
+from .bit_counting_group_list import BitCountingGroupList
from .classify import classify
diff --git a/resources/libraries/python/jumpavg/AvgStdevStats.py b/resources/libraries/python/jumpavg/avg_stdev_stats.py
index 9a8decd932..c21c50c8f8 100644
--- a/resources/libraries/python/jumpavg/AvgStdevStats.py
+++ b/resources/libraries/python/jumpavg/avg_stdev_stats.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,9 +13,12 @@
"""Module holding AvgStdevStats class."""
+import dataclasses
import math
+import typing
+@dataclasses.dataclass
class AvgStdevStats:
"""Class for statistics which include average and stdev of a group.
@@ -25,45 +28,18 @@ class AvgStdevStats:
Instances are only statistics, the data itself is stored elsewhere.
"""
- def __init__(self, size=0, avg=0.0, stdev=0.0):
- """Construct the stats object by storing the values needed.
-
- Each value has to be numeric.
- The values are not sanitized depending on size, wrong initialization
- can cause delayed math errors.
-
- :param size: Number of values participating in this group.
- :param avg: Population average of the participating sample values.
- :param stdev: Population standard deviation of the sample values.
- :type size: int
- :type avg: float
- :type stdev: float
- """
- self.size = size
- self.avg = avg
- self.stdev = stdev
-
- def __str__(self):
- """Return string with human readable description of the group.
-
- :returns: Readable description.
- :rtype: str
- """
- return f"size={self.size} avg={self.avg} stdev={self.stdev}"
-
- def __repr__(self):
- """Return string executable as Python constructor call.
-
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"AvgStdevStats(size={self.size!r},avg={self.avg!r}"
- f",stdev={self.stdev!r})"
- )
+ size: int = 0
+ """Number of scalar values (samples) participating in this group."""
+ avg: float = 0.0
+ """Population average of the participating sample values."""
+ stdev: float = 0.0
+ """Population standard deviation of the sample values."""
@classmethod
- def for_runs(cls, runs):
+ def for_runs(
+ cls,
+ runs: typing.Iterable[typing.Union[float, "AvgStdevStats"]],
+ ) -> "AvgStdevStats":
"""Return new stats instance describing the sequence of runs.
If you want to append data to existing stats object,
@@ -72,8 +48,8 @@ class AvgStdevStats:
Instead of a verb, "for" is used to start this method name,
to signify the result contains less information than the input data.
- Here, Run is a hypothetical abstract class, an union of float and cls.
- Defining that as a real abstract class in Python 2 is too much hassle.
+ Here, run is a hypothetical abstract class, an union of float and cls.
+ Defining that as a real abstract class in Python is too much hassle.
:param runs: Sequence of data to describe by the new metadata.
:type runs: Iterable[Union[float, cls]]
@@ -97,6 +73,8 @@ class AvgStdevStats:
run_size = run.size
run_avg = run.avg
run_stdev = run.stdev
+ if run_size < 1:
+ continue
old_total_size = total_size
delta = run_avg - total_avg
total_size += run_size
diff --git a/resources/libraries/python/jumpavg/BitCountingGroup.py b/resources/libraries/python/jumpavg/bit_counting_group.py
index 0c1aabba30..22c9337532 100644
--- a/resources/libraries/python/jumpavg/BitCountingGroup.py
+++ b/resources/libraries/python/jumpavg/bit_counting_group.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,14 +13,16 @@
"""Module holding BitCountingGroup class."""
-import copy
+import collections
+import dataclasses
+import typing
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingStats import BitCountingStats
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_stats import BitCountingStats
-class BitCountingGroup:
- # TODO: Inherit from collections.abc.Sequence in Python 3.
+@dataclasses.dataclass
+class BitCountingGroup(collections.abc.Sequence):
"""Group of runs which tracks bit count in an efficient manner.
This class contains methods that mutate the internal state,
@@ -38,74 +40,63 @@ class BitCountingGroup:
a method to add a single run in an efficient manner is provided.
"""
- def __init__(self, run_list=None, stats=None, bits=None,
- max_value=None, prev_avg=None, comment="unknown"):
- """Set the internal state and partially the stats.
-
- A "group" stands for an Iterable of runs, where "run" is either
- a float value, or a stats-like object (only size, avg and stdev
- are accessed). Run is a hypothetical abstract class,
- defining it in Python 2 is too much hassle.
-
- Only a copy of the run list argument value is stored in the instance,
- so it is not a problem if the value object is mutated afterwards.
+ run_list: typing.List[typing.Union[float, AvgStdevStats]]
+ """List of run to compose into this group.
+ The init call takes ownership of the list,
+ so the caller should clone it to avoid unexpected muations."""
+ max_value: float
+ """Maximal sample value to expect."""
+ unit: float = 1.0
+ """Typical resolution of the values"""
+ comment: str = "normal"
+ """Any string giving more info, e.g. "regression"."""
+ prev_avg: typing.Optional[float] = None
+ """Average of the previous group, if any."""
+ stats: AvgStdevStats = None
+ """Stats object used for computing bits.
+ Almost always recomputed, except when non-None in init."""
+ cached_bits: typing.Optional[float] = None
+ """Cached value of information content.
+ Noned on edit, recomputed if needed and None."""
+
+ def __post_init__(self):
+ """Recompute stats is None.
It is not verified whether the user provided values are valid,
e.g. whether the stats and bits values reflect the runs.
-
- :param run_list: List of run to compose into this group. Default: empty.
- :param stats: Stats object used for computing bits.
- :param bits: Cached value of information content.
- :param max_value: Maximal sample value to be used for computing.
- :param prev_avg: Average of the previous group, affects bits.
- :param comment: Any string giving more info, e.g. "regression".
- :type run_list: Iterable[Run]
- :type stats: Optional[AvgStdevStats]
- :type bits: Optional[float]
- :type max_value: float
- :type prev_avg: Optional[float]
- :type comment: str
"""
- self.run_list = copy.deepcopy(run_list) if run_list else list()
- self.stats = stats
- self.cached_bits = bits
- self.max_value = max_value
- self.prev_avg = prev_avg
- self.comment = comment
if self.stats is None:
- self.stats = AvgStdevStats.for_runs(self.run_list)
-
- def __str__(self):
- """Return string with human readable description of the group.
+ self.stats = AvgStdevStats.for_runs(runs=self.run_list)
- :returns: Readable description.
- :rtype: str
- """
- return f"stats={self.stats} bits={self.cached_bits}"
+ @property
+ def bits(self) -> float:
+ """Return overall bit content of the group list.
- def __repr__(self):
- """Return string executable as Python constructor call.
+ If not cached, compute from stats and cache.
- :returns: Executable constructor call.
- :rtype: str
+ :returns: The overall information content in bits.
+ :rtype: float
"""
- return (
- f"BitCountingGroup(run_list={self.run_list!r},stats={self.stats!r}"
- f",bits={self.cached_bits!r},max_value={self.max_value!r}"
- f",prev_avg={self.prev_avg!r},comment={self.comment!r})"
- )
+ if self.cached_bits is None:
+ self.cached_bits = BitCountingStats.for_runs_and_params(
+ runs=[self.stats],
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=self.prev_avg,
+ ).bits
+ return self.cached_bits
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> typing.Union[float, AvgStdevStats]:
"""Return the run at the index.
:param index: Index of the run to return.
:type index: int
:returns: The run at the index.
- :rtype: Run
+ :rtype: typing.Union[float, AvgStdevStats]
"""
return self.run_list[index]
- def __len__(self):
+ def __len__(self) -> int:
"""Return the number of runs in the group.
:returns: The Length of run_list.
@@ -113,39 +104,36 @@ class BitCountingGroup:
"""
return len(self.run_list)
- def copy(self):
+ def copy(self) -> "BitCountingGroup":
"""Return a new instance with copied internal state.
+ Stats are preserved to avoid re-computation.
+ As both float and AvgStdevStats are effectively immutable,
+ only a shallow copy of the runs list is performed.
+
:returns: The copied instance.
:rtype: BitCountingGroup
"""
stats = AvgStdevStats.for_runs([self.stats])
return self.__class__(
- run_list=self.run_list, stats=stats, bits=self.cached_bits,
- max_value=self.max_value, prev_avg=self.prev_avg,
- comment=self.comment)
-
- @property
- def bits(self):
- """Return overall bit content of the group list.
-
- If not cached, compute from stats and cache.
-
- :returns: The overall information content in bits.
- :rtype: float
- """
- if self.cached_bits is None:
- self.cached_bits = BitCountingStats.for_runs(
- [self.stats], self.max_value, self.prev_avg).bits
- return self.cached_bits
+ run_list=list(self.run_list),
+ stats=stats,
+ cached_bits=self.cached_bits,
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=self.prev_avg,
+ comment=self.comment,
+ )
- def append(self, run):
+ def append(
+ self, run: typing.Union[float, AvgStdevStats]
+ ) -> "BitCountingGroup":
"""Mutate to add the new run, return self.
Stats are updated, but old bits value is deleted from cache.
:param run: The run value to add to the group.
- :type value: Run
+ :type value: typing.Union[float, AvgStdevStats]
:returns: The updated self.
:rtype: BitCountingGroup
"""
@@ -154,7 +142,9 @@ class BitCountingGroup:
self.cached_bits = None
return self
- def extend(self, runs):
+ def extend(
+ self, runs: typing.Iterable[typing.Union[float, AvgStdevStats]]
+ ) -> "BitCountingGroup":
"""Mutate to add the new runs, return self.
This is saves small amount of computation
@@ -163,7 +153,7 @@ class BitCountingGroup:
Stats are updated, but old bits value is deleted from cache.
:param runs: The runs to add to the group.
- :type value: Iterable[Run]
+ :type value: typing.Iterable[typing.Union[float, AvgStdevStats]]
:returns: The updated self.
:rtype: BitCountingGroup
"""
diff --git a/resources/libraries/python/jumpavg/BitCountingGroupList.py b/resources/libraries/python/jumpavg/bit_counting_group_list.py
index bcc5e43267..e4d33b53a2 100644
--- a/resources/libraries/python/jumpavg/BitCountingGroupList.py
+++ b/resources/libraries/python/jumpavg/bit_counting_group_list.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,13 +13,16 @@
"""Module holding BitCountingGroupList class."""
-import copy
+import collections
+import dataclasses
+import typing
-from .BitCountingGroup import BitCountingGroup
+from .avg_stdev_stats import AvgStdevStats # Just for type hints.
+from .bit_counting_group import BitCountingGroup
-class BitCountingGroupList:
- # TODO: Inherit from collections.abc.Sequence in Python 3.
+@dataclasses.dataclass
+class BitCountingGroupList(collections.abc.Sequence):
"""List of data groups which tracks overall bit count.
The Sequence-like access is related to the list of groups,
@@ -41,55 +44,29 @@ class BitCountingGroupList:
recalculations if the bit count is not needed.
"""
- def __init__(self, group_list=None, bits_except_last=0.0, max_value=None):
- """Set the internal state without any calculations.
-
- The group list argument is copied deeply, so it is not a problem
- if the value object is mutated afterwards.
+ max_value: float
+ """Maximal sample value to base bits computation on."""
+ unit: float = 1.0
+ """Typical resolution of the values."""
+ group_list: typing.List[BitCountingGroup] = None
+ """List of groups to compose this group list.
+ Init also accepts None standing for an empty list.
+ This class takes ownership of the list,
+ so caller of init should clone their copy to avoid unexpected mutations.
+ """
+ bits_except_last: float = 0.0
+ """Partial sum of all but one group bits."""
- A "group" stands for an Iterable of runs, where "run" is either
- a float value, or a stats-like object (only size, avg and stdev
- are accessed). Run is a hypothetical abstract class,
- defining it in Python 2 is too much hassle.
+ def __post_init__(self):
+ """Turn possible None into an empty list.
It is not verified whether the user provided values are valid,
- e.g. whether the cached bits values make sense.
-
- The max_value is required and immutable,
- it is recommended the callers find their maximum beforehand.
-
- :param group_list: List of groups to compose this group list (or empty).
- :param bits_except_last: Partial sum of all but one group bits.
- :param max_value: Maximal sample value to base bits computation on.
- :type group_list: Iterable[BitCountingGroup]
- :type bits_except_last: float
- :type max_value: float
- """
- self.group_list = copy.deepcopy(group_list) if group_list else list()
- self.bits_except_last = bits_except_last
- self.max_value = max_value
-
- def __str__(self):
- """Return string with human readable description of the group list.
-
- :returns: Readable description.
- :rtype: str
+ e.g. whether the cached bits values (and bits_except_last) make sense.
"""
- return u"group_list={self.group_list} bits={self.bits}"
-
- def __repr__(self):
- """Return string executable as Python constructor call.
+ if self.group_list is None:
+ self.group_list = []
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"BitCountingGroupList(group_list={self.group_list!r}"
- f",bits_except_last={self.bits_except_last!r}"
- f",max_value={self.max_value!r})"
- )
-
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> BitCountingGroup:
"""Return the group at the index.
:param index: Index of the group to return.
@@ -99,7 +76,7 @@ class BitCountingGroupList:
"""
return self.group_list[index]
- def __len__(self):
+ def __len__(self) -> int:
"""Return the length of the group list.
:returns: The Length of group_list.
@@ -107,19 +84,46 @@ class BitCountingGroupList:
"""
return len(self.group_list)
- def copy(self):
+ def copy(self) -> "BitCountingGroupList":
"""Return a new instance with copied internal state.
:returns: The copied instance.
:rtype: BitCountingGroupList
"""
return self.__class__(
- group_list=self.group_list, bits_except_last=self.bits_except_last,
- max_value=self.max_value
+ max_value=self.max_value,
+ unit=self.unit,
+ group_list=[group.copy() for group in self.group_list],
+ bits_except_last=self.bits_except_last,
+ )
+
+ def copy_fast(self) -> "BitCountingGroupList":
+ """Return a new instance with minimaly copied internal state.
+
+ The assumption here is that only the last group will ever be mutated
+ (in self, probably never in the return value),
+ so all the previous groups can be "copied by reference".
+
+ :returns: The copied instance.
+ :rtype: BitCountingGroupList
+ """
+ group_list = list(self.group_list)
+ if group_list:
+ group_list[-1] = group_list[-1].copy()
+ # Further speedup is possible by keeping the last group
+ # as a singly linked (from end) list,
+ # but for CSIT sample sizes, copy of whole Python list is faster.
+ # TODO: Implement linked list as an option
+ # for users with many samples.
+ return self.__class__(
+ max_value=self.max_value,
+ unit=self.unit,
+ group_list=group_list,
+ bits_except_last=self.bits_except_last,
)
@property
- def bits(self):
+ def bits(self) -> float:
"""Return overall bit content of the group list.
:returns: The overall information content in bits.
@@ -130,12 +134,17 @@ class BitCountingGroupList:
# TODO: Is it worth to cache the overall result?
return self.bits_except_last + self.group_list[-1].bits
- def append_group_of_runs(self, runs):
+ def append_group_of_runs(
+ self,
+ runs: typing.Union[
+ BitCountingGroup, typing.List[typing.Union[float, AvgStdevStats]]
+ ],
+ ) -> "BitCountingGroupList":
"""Mutate to add a new group based on the runs, return self.
- The argument is copied before adding to the group list,
- so further edits do not affect the grup list.
- The argument can also be a group, only runs from it are used.
+ The list argument is NOT copied before adding to the group list,
+ so further edits MAY not affect the grup list.
+ The list from BitCountingGroup is shallow copied though.
:param runs: Runs to form the next group to be appended to self.
:type runs: Union[Iterable[Run], BitCountingGroup]
@@ -147,16 +156,23 @@ class BitCountingGroupList:
# It is faster to avoid stats recalculation.
new_group = runs.copy()
new_group.max_value = self.max_value
+ # Unit is common.
new_group.prev_avg = prev_avg
new_group.cached_bits = None
else:
new_group = BitCountingGroup(
- run_list=runs, max_value=self.max_value, prev_avg=prev_avg)
+ run_list=runs,
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=prev_avg,
+ )
self.bits_except_last = self.bits
self.group_list.append(new_group)
return self
- def append_run_to_to_last_group(self, run):
+ def append_run_to_to_last_group(
+ self, run: typing.Union[float, AvgStdevStats]
+ ) -> "BitCountingGroupList":
"""Mutate to add new run at the end of the last group.
Basically a one-liner, only returning group list instead of last group.
@@ -170,7 +186,9 @@ class BitCountingGroupList:
self.group_list[-1].append(run)
return self
- def extend_runs_to_last_group(self, runs):
+ def extend_runs_to_last_group(
+ self, runs: typing.Iterable[typing.Union[float, AvgStdevStats]]
+ ) -> "BitCountingGroupList":
"""Mutate to add new runs to the end of the last group.
A faster alternative to appending runs one by one in a loop.
diff --git a/resources/libraries/python/jumpavg/BitCountingStats.py b/resources/libraries/python/jumpavg/bit_counting_stats.py
index 0addec013b..3d1cb8aef0 100644
--- a/resources/libraries/python/jumpavg/BitCountingStats.py
+++ b/resources/libraries/python/jumpavg/bit_counting_stats.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,11 +13,14 @@
"""Module holding BitCountingStats class."""
+import dataclasses
import math
+import typing
-from .AvgStdevStats import AvgStdevStats
+from .avg_stdev_stats import AvgStdevStats
+@dataclasses.dataclass
class BitCountingStats(AvgStdevStats):
"""Class for statistics which include information content of a group.
@@ -33,11 +36,22 @@ class BitCountingStats(AvgStdevStats):
Only for_runs method calls the parent implementation, without using super().
"""
- def __init__(
- self, size=0, avg=None, stdev=0.0, max_value=None, prev_avg=None):
- """Construct the stats object by computing from the values needed.
+ max_value: float = None
+ """Maximal sample value (real or estimated).
+ Default value is there just for argument ordering reasons,
+ leaving None leads to exceptions."""
+ unit: float = 1.0
+ """Typical resolution of the values."""
+ prev_avg: typing.Optional[float] = None
+ """Population average of the previous group (if any)."""
+ bits: float = None
+ """The computed information content of the group.
+ It is formally an argument to init function, just to keep repr string
+ a valid call. ut the init value is ignored and always recomputed.
+ """
- The values are not sanitized, faulty callers can cause math errors.
+ def __post_init__(self):
+ """Construct the stats object by computing from the values needed.
The None values are allowed for stats for zero size data,
but such stats can report arbitrary avg and max_value.
@@ -54,91 +68,60 @@ class BitCountingStats(AvgStdevStats):
(but not with floating point mechanic).
The hope is the difference will have
no real impact on the classification procedure.
-
- :param size: Number of values participating in this group.
- :param avg: Population average of the participating sample values.
- :param stdev: Population standard deviation of the sample values.
- :param max_value: Maximal expected value.
- TODO: This might be more optimal,
- but max-invariant algorithm will be nicer.
- :param prev_avg: Population average of the previous group.
- If None, no previous average is taken into account.
- If not None, the given previous average is used to discourage
- consecutive groups with similar averages
- (opposite triangle distribution is assumed).
- :type avg: float
- :type size: int
- :type stdev: float
- :type max_value: Union[float, NoneType]
- :type prev_avg: Union[float, NoneType]
"""
- self.avg = avg
- self.size = size
- self.stdev = stdev
- self.max_value = max_value
- self.prev_avg = prev_avg
# Zero size should in principle have non-zero bits (coding zero size),
# but zero allows users to add empty groups without affecting bits.
self.bits = 0.0
if self.size < 1:
return
- if avg is None:
- raise ValueError(f"Avg is None: {self!r}")
- if max_value is None or max_value <= 0.0:
+ if self.max_value <= 0.0:
raise ValueError(f"Invalid max value: {self!r}")
+ max_value = self.max_value / self.unit
+ avg = self.avg / self.unit
# Length of the sequence must be also counted in bits,
# otherwise the message would not be decodable.
# Model: probability of k samples is 1/k - 1/(k+1) == 1/k/(k+1)
# This is compatible with zero size leading to zero bits.
- self.bits += math.log(size * (size + 1), 2)
- if prev_avg is None:
+ self.bits += math.log(self.size * (self.size + 1), 2)
+ if self.prev_avg is None:
# Avg is considered to be uniformly distributed
# from zero to max_value.
- self.bits += math.log(max_value + 1.0, 2)
+ self.bits += math.log(max_value + 1, 2)
else:
# Opposite triangle distribution with minimum.
- self.bits += math.log(
- max_value * (max_value + 1) / (abs(avg - prev_avg) + 1), 2)
+ prev_avg = self.prev_avg / self.unit
+ norm = prev_avg * prev_avg
+ norm -= (prev_avg - 1) * max_value
+ norm += max_value * max_value / 2
+ self.bits -= math.log((abs(avg - prev_avg) + 1) / norm, 2)
if self.size < 2:
return
- # Stdev is considered to be uniformly distributed
- # from zero to max_value. That is quite a bad expectation,
- # but resilient to negative samples etc.
- self.bits += math.log(max_value + 1.0, 2)
+ stdev = self.stdev / self.unit
+ # Stdev can be anything between zero and max value.
+ # For size==2, sphere surface is 2 points regardless of radius,
+ # we need to penalize large stdev already when encoding the stdev.
+ # The simplest way is to use the same distribution as with size...
+ self.bits += math.log((stdev + 1) * (stdev + 2), 2)
+ # .. just with added normalization from the max value cut-off.
+ self.bits += math.log(1 - 1 / (max_value + 2), 2)
# Now we know the samples lie on sphere in size-1 dimensions.
# So it is (size-2)-sphere, with radius^2 == stdev^2 * size.
# https://en.wikipedia.org/wiki/N-sphere
- sphere_area_ln = math.log(2) + math.log(math.pi) * ((size - 1) / 2.0)
- sphere_area_ln -= math.lgamma((size - 1) / 2.0)
- sphere_area_ln += math.log(stdev + 1.0) * (size - 2)
- sphere_area_ln += math.log(size) * ((size - 2) / 2.0)
+ sphere_area_ln = math.log(2)
+ sphere_area_ln += math.log(math.pi) * ((self.size - 1) / 2)
+ sphere_area_ln -= math.lgamma((self.size - 1) / 2)
+ sphere_area_ln += math.log(stdev + 1) * (self.size - 2)
+ sphere_area_ln += math.log(self.size) * ((self.size - 2) / 2)
self.bits += sphere_area_ln / math.log(2)
- def __str__(self):
- """Return string with human readable description of the group.
-
- :returns: Readable description.
- :rtype: str
- """
- return (
- f"size={self.size} avg={self.avg} stdev={self.stdev}"
- f" bits={self.bits}"
- )
-
- def __repr__(self):
- """Return string executable as Python constructor call.
-
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"BitCountingStats(size={self.size!r},avg={self.avg!r}"
- f",stdev={self.stdev!r},max_value={self.max_value!r}"
- f",prev_avg={self.prev_avg!r})"
- )
-
@classmethod
- def for_runs(cls, runs, max_value=None, prev_avg=None):
+ def for_runs_and_params(
+ cls,
+ runs: typing.Iterable[typing.Union[float, AvgStdevStats]],
+ max_value: float,
+ unit: float = 1.0,
+ prev_avg: typing.Optional[float] = None,
+ ):
"""Return new stats instance describing the sequence of runs.
If you want to append data to existing stats object,
@@ -156,14 +139,22 @@ class BitCountingStats(AvgStdevStats):
:param runs: Sequence of data to describe by the new metadata.
:param max_value: Maximal expected value.
+ :param unit: Typical resolution of the values.
:param prev_avg: Population average of the previous group, if any.
:type runs: Iterable[Union[float, AvgStdevStats]]
:type max_value: Union[float, NoneType]
+ :type unit: float
:type prev_avg: Union[float, NoneType]
:returns: The new stats instance.
:rtype: cls
"""
asd = AvgStdevStats.for_runs(runs)
- ret_obj = cls(size=asd.size, avg=asd.avg, stdev=asd.stdev,
- max_value=max_value, prev_avg=prev_avg)
+ ret_obj = cls(
+ size=asd.size,
+ avg=asd.avg,
+ stdev=asd.stdev,
+ max_value=max_value,
+ unit=unit,
+ prev_avg=prev_avg,
+ )
return ret_obj
diff --git a/resources/libraries/python/jumpavg/classify.py b/resources/libraries/python/jumpavg/classify.py
index 5f5ce6160c..cc3cdcceed 100644
--- a/resources/libraries/python/jumpavg/classify.py
+++ b/resources/libraries/python/jumpavg/classify.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,32 +13,54 @@
"""Module holding the classify function
-Classification os one of primary purposes of this package.
+Classification is one of primary purposes of this package.
Minimal message length principle is used
for grouping results into the list of groups,
assuming each group is a population of different Gaussian distribution.
"""
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingGroupList import BitCountingGroupList
+from typing import Iterable, Optional, Union
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_group_list import BitCountingGroupList
-def classify(values):
+
+def classify(
+ values: Iterable[Union[float, Iterable[float]]],
+ unit: Optional[float] = None,
+ sbps: Optional[float] = None,
+) -> BitCountingGroupList:
"""Return the values in groups of optimal bit count.
Here, a value is either a float, or an iterable of floats.
Such iterables represent an undivisible sequence of floats.
+ Int is accepted anywhere instead of float.
Internally, such sequence is replaced by AvgStdevStats
after maximal value is found.
+ If the values are smaller than expected (below one unit),
+ the underlying assumption break down and the classification is wrong.
+ Use the "unit" parameter to hint at what the input resolution is.
+
+ If the correct value of unit is not known beforehand,
+ the argument "sbps" (Significant Bits Per Sample) can be used
+ to set unit such that maximal sample value is this many ones in binary.
+ If neither "unit" nor "sbps" are given, "sbps" of 12 is used by default.
+
:param values: Sequence of runs to classify.
+ :param unit: Typical resolution of the values.
+ Zero and None means no unit given.
+ :param sbps: Significant Bits Per Sample. None on zero means 12.
+ If units is not set, this is used to compute unit from max sample value.
:type values: Iterable[Union[float, Iterable[float]]]
+ :type unit: Optional[float]
+ :type sbps: Optional[float]
:returns: Classified group list.
:rtype: BitCountingGroupList
"""
- processed_values = list()
+ processed_values = []
max_value = 0.0
for value in values:
if isinstance(value, (float, int)):
@@ -50,27 +72,27 @@ def classify(values):
if subvalue > max_value:
max_value = subvalue
processed_values.append(AvgStdevStats.for_runs(value))
- open_at = list()
- closed_before = [BitCountingGroupList(max_value=max_value)]
- for index, value in enumerate(processed_values):
- newly_open = closed_before[index].copy()
- newly_open.append_group_of_runs([value])
- open_at.append(newly_open)
- record_group_list = newly_open
- for previous_index, old_open in enumerate(open_at[:index]):
- new_open = old_open.copy().append_run_to_to_last_group(value)
- open_at[previous_index] = new_open
- if new_open.bits < record_group_list.bits:
- record_group_list = new_open
- closed_before.append(record_group_list)
- partition = closed_before[-1]
- previous_average = partition[0].stats.avg
- for group in partition:
- if group.stats.avg == previous_average:
- group.comment = u"normal"
- elif group.stats.avg < previous_average:
- group.comment = u"regression"
+ if not unit:
+ if not sbps:
+ sbps = 12.0
+ max_in_units = pow(2.0, sbps + 1.0) - 1.0
+ unit = max_value / max_in_units
+ # Glist means group list (BitCountingGroupList).
+ open_glists = []
+ record_glist = BitCountingGroupList(max_value=max_value, unit=unit)
+ for value in processed_values:
+ new_open_glist = record_glist.copy_fast().append_group_of_runs([value])
+ record_glist = new_open_glist
+ for old_open_glist in open_glists:
+ old_open_glist.append_run_to_to_last_group(value)
+ if old_open_glist.bits < record_glist.bits:
+ record_glist = old_open_glist
+ open_glists.append(new_open_glist)
+ previous_average = record_glist[0].stats.avg
+ for group in record_glist:
+ if group.stats.avg < previous_average:
+ group.comment = "regression"
elif group.stats.avg > previous_average:
- group.comment = u"progression"
+ group.comment = "progression"
previous_average = group.stats.avg
- return partition
+ return record_glist
diff --git a/resources/libraries/python/model/ExportJson.py b/resources/libraries/python/model/ExportJson.py
new file mode 100644
index 0000000000..3f923d6d0e
--- /dev/null
+++ b/resources/libraries/python/model/ExportJson.py
@@ -0,0 +1,395 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module tracking json in-memory data and saving it to files.
+
+Each test case, suite setup (hierarchical) and teardown has its own file pair.
+
+Validation is performed for output files with available JSON schema.
+Validation is performed in data deserialized from disk,
+as serialization might have introduced subtle errors.
+"""
+
+import datetime
+import os.path
+
+from binascii import b2a_base64
+from dateutil.parser import parse
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from zlib import compress
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.jumpavg import AvgStdevStats
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version, export_tg_type_and_version
+)
+from resources.libraries.python.model.MemDump import write_output
+from resources.libraries.python.model.validate import (
+ get_validators, validate
+)
+
+
+class ExportJson():
+ """Class handling the json data setting and export."""
+
+ ROBOT_LIBRARY_SCOPE = "GLOBAL"
+
+ def __init__(self):
+ """Declare required fields, cache output dir.
+
+ Also memorize schema validator instances.
+ """
+ self.output_dir = BuiltIn().get_variable_value("\\${OUTPUT_DIR}", ".")
+ self.file_path = None
+ self.data = None
+ self.validators = get_validators()
+
+ def _detect_test_type(self):
+ """Return test_type, as inferred from robot test tags.
+
+ :returns: The inferred test type value.
+ :rtype: str
+ :raises RuntimeError: If the test tags does not contain expected values.
+ """
+ tags = self.data["tags"]
+ # First 5 options are specific for VPP tests.
+ if "DEVICETEST" in tags:
+ test_type = "device"
+ elif "LDP_NGINX" in tags:
+ test_type = "hoststack"
+ elif "HOSTSTACK" in tags:
+ test_type = "hoststack"
+ elif "GSO_TRUE" in tags or "GSO_FALSE" in tags:
+ test_type = "mrr"
+ elif "RECONF" in tags:
+ test_type = "reconf"
+ # The remaining 3 options could also apply to DPDK and TRex tests.
+ elif "SOAK" in tags:
+ test_type = "soak"
+ elif "NDRPDR" in tags:
+ test_type = "ndrpdr"
+ elif "MRR" in tags:
+ test_type = "mrr"
+ else:
+ raise RuntimeError(f"Unable to infer test type from tags: {tags}")
+ return test_type
+
+ def export_pending_data(self):
+ """Write the accumulated data to disk.
+
+ Create missing directories.
+ Reset both file path and data to avoid writing multiple times.
+
+ Functions which finalize content for given file are calling this,
+ so make sure each test and non-empty suite setup or teardown
+ is calling this as their last keyword.
+
+ If no file path is set, do not write anything,
+ as that is the failsafe behavior when caller from unexpected place.
+ Aso do not write anything when EXPORT_JSON constant is false.
+
+ Regardless of whether data was written, it is cleared.
+ """
+ if not Constants.EXPORT_JSON or not self.file_path:
+ self.data = None
+ self.file_path = None
+ return
+ new_file_path = write_output(self.file_path, self.data)
+ # Data is going to be cleared (as a sign that export succeeded),
+ # so this is the last chance to detect if it was for a test case.
+ is_testcase = "result" in self.data
+ self.data = None
+ # Validation for output goes here when ready.
+ self.file_path = None
+ if is_testcase:
+ validate(new_file_path, self.validators["tc_info"])
+
+ def warn_on_bad_export(self):
+ """If bad state is detected, log a warning and clean up state."""
+ if self.file_path is not None or self.data is not None:
+ logger.warn(f"Previous export not clean, path {self.file_path}")
+ self.data = None
+ self.file_path = None
+
+ def start_suite_setup_export(self):
+ """Set new file path, initialize data for the suite setup.
+
+ This has to be called explicitly at start of suite setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ output_dir = self.output_dir
+ self.file_path = os.path.join(
+ output_dir, suite_path_part, "setup.info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ self.data["suite_documentation"] = BuiltIn().get_variable_value(
+ "\\${SUITE_DOCUMENTATION}"
+ )
+ # "end_time" and "duration" are added on flush.
+ self.data["hosts"] = set()
+ self.data["telemetry"] = list()
+
+ def start_test_export(self):
+ """Set new file path, initialize data to minimal tree for the test case.
+
+ It is assumed Robot variables DUT_TYPE and DUT_VERSION
+ are already set (in suite setup) to correct values.
+
+ This function has to be called explicitly at the start of test setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite and test.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ test_name = BuiltIn().get_variable_value("\\${TEST_NAME}")
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part,
+ test_name.lower().replace(" ", "_") + ".info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ self.data["test_name"] = test_name
+ test_doc = BuiltIn().get_variable_value("\\${TEST_DOCUMENTATION}", "")
+ self.data["test_documentation"] = test_doc
+ # "test_type" is added on flush.
+ # "tags" is detected and added on flush.
+ # "end_time" and "duration" is added on flush.
+ # Robot status and message are added on flush.
+ self.data["result"] = dict(type="unknown")
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["telemetry"] = list()
+ export_dut_type_and_version()
+ export_tg_type_and_version()
+
+ def start_suite_teardown_export(self):
+ """Set new file path, initialize data for the suite teardown.
+
+ This has to be called explicitly at start of suite teardown,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part, "teardown.info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ # "end_time" and "duration" is added on flush.
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["telemetry"] = list()
+
+ def finalize_suite_setup_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite setup.
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["end_time"] = end_time
+ self.export_pending_data()
+
+ def finalize_test_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be at the end of test teardown, as the implementation
+ reads various Robot variables, some of them only available at teardown.
+
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ message = BuiltIn().get_variable_value("\\${TEST_MESSAGE}")
+ test_tags = BuiltIn().get_variable_value("\\${TEST_TAGS}")
+ self.data["end_time"] = end_time
+ start_float = parse(self.data["start_time"]).timestamp()
+ end_float = parse(self.data["end_time"]).timestamp()
+ self.data["duration"] = end_float - start_float
+ self.data["tags"] = list(test_tags)
+ self.data["message"] = message
+ self.process_passed()
+ self.process_test_name()
+ self.process_results()
+ self.export_pending_data()
+
+ def finalize_suite_teardown_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite teardown
+ (but before the explicit write in the global suite teardown).
+ The write is done at next start (or explicitly for global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data["end_time"] = end_time
+ self.export_pending_data()
+
+ def process_test_name(self):
+ """Replace raw test name with short and long test name and set
+ test_type.
+
+ Perform in-place edits on the data dictionary.
+ Remove raw suite_name and test_name, they are not published.
+ Return early if the data is not for test case.
+ Insert test ID and long and short test name into the data.
+ Besides suite_name and test_name, also test tags are read.
+
+ Short test name is basically a suite tag, but with NIC driver prefix,
+ if the NIC driver used is not the default one (drv_vfio_pci for VPP
+ tests).
+
+ Long test name has the following form:
+ {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
+ Lookup in test tags is needed to get the threads value.
+ The threads_and_cores part may be empty, e.g. for TRex tests.
+
+ Test ID has form {suite_name}.{test_name} where the two names come from
+ Robot variables, converted to lower case and spaces replaces by
+ undescores.
+
+ Test type is set in an internal function.
+
+ :raises RuntimeError: If the data does not contain expected values.
+ """
+ suite_part = self.data.pop("suite_name").lower().replace(" ", "_")
+ if "test_name" not in self.data:
+ # There will be no test_id, provide suite_id instead.
+ self.data["suite_id"] = suite_part
+ return
+ test_part = self.data.pop("test_name").lower().replace(" ", "_")
+ self.data["test_id"] = f"{suite_part}.{test_part}"
+ tags = self.data["tags"]
+ # Test name does not contain thread count.
+ subparts = test_part.split("-")
+ if any("tg" in s for s in subparts) and subparts[1] == "":
+ # Physical core count not detected, assume it is a TRex test.
+ if "--" not in test_part:
+ raise RuntimeError(f"Invalid TG test name for: {subparts}")
+ short_name = test_part.split("--", 1)[1]
+ else:
+ short_name = "-".join(subparts[2:])
+ # Add threads to test_part.
+ core_part = subparts[1]
+ tag = list(filter(lambda t: subparts[1].upper() in t, tags))[0]
+ test_part = test_part.replace(f"-{core_part}-", f"-{tag.lower()}-")
+ # For long name we need NIC model, which is only in suite name.
+ last_suite_part = suite_part.split(".")[-1]
+ # Short name happens to be the suffix we want to ignore.
+ prefix_part = last_suite_part.split(short_name)[0]
+ # Also remove the trailing dash.
+ prefix_part = prefix_part[:-1]
+ # Throw away possible link prefix such as "1n1l-".
+ nic_code = prefix_part.split("-", 1)[-1]
+ nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
+ long_name = f"{nic_short}-{test_part}"
+ # Set test type.
+ test_type = self._detect_test_type()
+ self.data["test_type"] = test_type
+ # Remove trailing test type from names (if present).
+ short_name = short_name.split(f"-{test_type}")[0]
+ long_name = long_name.split(f"-{test_type}")[0]
+ # Store names.
+ self.data["test_name_short"] = short_name
+ self.data["test_name_long"] = long_name
+
+ def process_passed(self):
+ """Process the test status information as boolean.
+
+ Boolean is used to make post processing more efficient.
+ In case the test status is PASS, we will truncate the test message.
+ """
+ status = BuiltIn().get_variable_value("\\${TEST_STATUS}")
+ if status is not None:
+ self.data["passed"] = (status == "PASS")
+ if self.data["passed"]:
+ # Also truncate success test messages.
+ self.data["message"] = ""
+
+ def process_results(self):
+ """Process measured results.
+
+ Results are used to avoid future post processing, making it more
+ efficient to consume.
+ """
+ if self.data["telemetry"]:
+ telemetry_encode = "\n".join(self.data["telemetry"]).encode()
+ telemetry_compress = compress(telemetry_encode, level=9)
+ telemetry_base64 = b2a_base64(telemetry_compress, newline=False)
+ self.data["telemetry"] = [telemetry_base64.decode()]
+ if "result" not in self.data:
+ return
+ result_node = self.data["result"]
+ result_type = result_node["type"]
+ if result_type == "unknown":
+ # Device or something else not supported.
+ return
+
+ # Compute avg and stdev for mrr (rate and bandwidth).
+ if result_type == "mrr":
+ for node_name in ("rate", "bandwidth"):
+ node = result_node["receive_rate"].get(node_name, None)
+ if node is not None:
+ stats = AvgStdevStats.for_runs(node["values"])
+ node["avg"] = stats.avg
+ node["stdev"] = stats.stdev
+ return
+
+ # Multiple processing steps for ndrpdr.
+ if result_type != "ndrpdr":
+ return
+ # Filter out invalid latencies.
+ for which_key in ("latency_forward", "latency_reverse"):
+ if which_key not in result_node:
+ # Probably just an unidir test.
+ continue
+ for load in ("pdr_0", "pdr_10", "pdr_50", "pdr_90"):
+ if result_node[which_key][load]["max"] <= 0:
+ # One invalid number is enough to remove all loads.
+ break
+ else:
+ # No break means all numbers are ok, nothing to do here.
+ continue
+ # Break happened, something is invalid, remove all loads.
+ result_node.pop(which_key)
+ return
diff --git a/resources/libraries/python/model/ExportResult.py b/resources/libraries/python/model/ExportResult.py
new file mode 100644
index 0000000000..f155848913
--- /dev/null
+++ b/resources/libraries/python/model/ExportResult.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module with keywords that publish parts of result structure."""
+
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.model.util import descend, get_export_data
+
+
+def export_dut_type_and_version(dut_type="unknown", dut_version="unknown"):
+ """Export the arguments as dut type and version.
+
+ Robot tends to convert "none" into None, hence the unusual default values.
+
+ If either argument is missing, the value from robot variable is used.
+ If argument is present, the value is also stored to robot suite variable.
+
+ :param dut_type: DUT type, e.g. VPP or DPDK.
+ :param dut_version: DUT version as determined by the caller.
+ :type dut_type: Optional[str]
+ :type dut_version: Optiona[str]
+ :raises RuntimeError: If value is neither in argument not robot variable.
+ """
+ if dut_type == "unknown":
+ dut_type = BuiltIn().get_variable_value("\\${DUT_TYPE}", "unknown")
+ if dut_type == "unknown":
+ raise RuntimeError("Dut type not provided.")
+ else:
+ # We want to set a variable in higher level suite setup
+ # to be available to test setup several levels lower.
+ BuiltIn().set_suite_variable(
+ "\\${DUT_TYPE}", dut_type, "children=True"
+ )
+ if dut_version == "unknown":
+ dut_version = BuiltIn().get_variable_value(
+ "\\${DUT_VERSION}", "unknown"
+ )
+ if dut_type == "unknown":
+ raise RuntimeError("Dut version not provided.")
+ else:
+ BuiltIn().set_suite_variable(
+ "\\${DUT_VERSION}", dut_version, "children=True"
+ )
+ data = get_export_data()
+ data["dut_type"] = dut_type.lower()
+ data["dut_version"] = dut_version
+
+
+def export_tg_type_and_version(tg_type="unknown", tg_version="unknown"):
+ """Export the arguments as tg type and version.
+
+ Robot tends to convert "none" into None, hence the unusual default values.
+
+ If either argument is missing, the value from robot variable is used.
+ If argument is present, the value is also stored to robot suite variable.
+
+ :param tg_type: TG type, e.g. TREX.
+ :param tg_version: TG version as determined by the caller.
+ :type tg_type: Optional[str]
+ :type tg_version: Optiona[str]
+ :raises RuntimeError: If value is neither in argument not robot variable.
+ """
+ if tg_type == "unknown":
+ tg_type = BuiltIn().get_variable_value("\\${TG_TYPE}", "unknown")
+ if tg_type == "unknown":
+ raise RuntimeError("TG type not provided!")
+ else:
+ # We want to set a variable in higher level suite setup
+ # to be available to test setup several levels lower.
+ BuiltIn().set_suite_variable(
+ "\\${TG_TYPE}", tg_type, "children=True"
+ )
+ if tg_version == "unknown":
+ tg_version = BuiltIn().get_variable_value(
+ "\\${TG_VERSION}", "unknown"
+ )
+ if tg_type == "unknown":
+ raise RuntimeError("TG version not provided!")
+ else:
+ BuiltIn().set_suite_variable(
+ "\\${TG_VERSION}", tg_version, "children=True"
+ )
+ data = get_export_data()
+ data["tg_type"] = tg_type.lower()
+ data["tg_version"] = tg_version
+
+
+def append_mrr_value(mrr_value, mrr_unit, bandwidth_value=None,
+ bandwidth_unit="bps"):
+ """Store mrr value to proper place so it is dumped into json.
+
+ The value is appended only when unit is not empty.
+
+ :param mrr_value: Forwarding rate from MRR trial.
+ :param mrr_unit: Unit of measurement for the rate.
+ :param bandwidth_value: The same value recomputed into L1 bits per second.
+ :type mrr_value: float
+ :type mrr_unit: str
+ :type bandwidth_value: Optional[float]
+ :type bandwidth_unit: Optional[str]
+ """
+ if not mrr_unit:
+ return
+ data = get_export_data()
+ data["result"]["type"] = "mrr"
+
+ for node_val, node_unit, node_name in ((mrr_value, mrr_unit, "rate"),
+ (bandwidth_value, bandwidth_unit, "bandwidth")):
+ if node_val is not None:
+ node = descend(descend(data["result"], "receive_rate"), node_name)
+ node["unit"] = str(node_unit)
+ values_list = descend(node, "values", list)
+ values_list.append(float(node_val))
+
+
+def export_search_bound(text, value, unit, bandwidth=None):
+ """Store bound value and unit.
+
+ This function works for both NDRPDR and SOAK, decided by text.
+
+ If a node does not exist, it is created.
+ If a previous value exists, it is overwritten silently.
+ Result type is set (overwritten) to ndrpdr (or soak).
+
+ Text is used to determine whether it is ndr or pdr, upper or lower bound,
+ as the Robot caller has the information only there.
+
+ :param text: Info from Robot caller to determime bound type.
+ :param value: The bound value in packets (or connections) per second.
+ :param unit: Rate unit the bound is measured (or estimated) in.
+ :param bandwidth: The same value recomputed into L1 bits per second.
+ :type text: str
+ :type value: float
+ :type unit: str
+ :type bandwidth: Optional[float]
+ """
+ value = float(value)
+ text = str(text).lower()
+ result_type = "soak" if "plrsearch" in text else "ndrpdr"
+ upper_or_lower = "upper" if "upper" in text else "lower"
+ ndr_or_pdr = "ndr" if "ndr" in text else "pdr"
+
+ result_node = get_export_data()["result"]
+ result_node["type"] = result_type
+ rate_item = dict(rate=dict(value=value, unit=unit))
+ if bandwidth:
+ rate_item["bandwidth"] = dict(value=float(bandwidth), unit="bps")
+ if result_type == "soak":
+ descend(result_node, "critical_rate")[upper_or_lower] = rate_item
+ return
+ descend(result_node, ndr_or_pdr)[upper_or_lower] = rate_item
+
+
+def _add_latency(result_node, percent, whichward, latency_string):
+ """Descend to a corresponding node and add values from latency string.
+
+ This is an internal block, moved out from export_ndrpdr_latency,
+ as it can be called up to 4 times.
+
+ :param result_node: UTI tree node to descend from.
+ :param percent: Percent value to use in node key (90, 50, 10, 0).
+ :param whichward: "forward" or "reverse".
+ :param latency_item: Unidir output from TRex utility, min/avg/max/hdrh.
+ :type result_node: dict
+ :type percent: int
+ :type whichward: str
+ :latency_string: str
+ """
+ l_min, l_avg, l_max, l_hdrh = latency_string.split("/", 3)
+ whichward_node = descend(result_node, f"latency_{whichward}")
+ percent_node = descend(whichward_node, f"pdr_{percent}")
+ percent_node["min"] = int(l_min)
+ percent_node["avg"] = int(l_avg)
+ percent_node["max"] = int(l_max)
+ percent_node["hdrh"] = l_hdrh
+ percent_node["unit"] = "us"
+
+
+def export_ndrpdr_latency(text, latency):
+ """Store NDRPDR hdrh latency data.
+
+ If "latency" node does not exist, it is created.
+ If a previous value exists, it is overwritten silently.
+
+ Text is used to determine what percentage of PDR is the load,
+ as the Robot caller has the information only there.
+
+ Reverse data may be missing, we assume the test was unidirectional.
+
+ :param text: Info from Robot caller to determime load.
+ :param latency: Output from TRex utility, min/avg/max/hdrh.
+ :type text: str
+ :type latency: 1-tuple or 2-tuple of str
+ """
+ result_node = get_export_data()["result"]
+ percent = 0
+ if "90" in text:
+ percent = 90
+ elif "50" in text:
+ percent = 50
+ elif "10" in text:
+ percent = 10
+ _add_latency(result_node, percent, "forward", latency[0])
+ # Else TRex does not support latency measurement for this traffic profile.
+ if len(latency) < 2:
+ return
+ _add_latency(result_node, percent, "reverse", latency[1])
+
+
+def export_reconf_result(packet_rate, packet_loss, bandwidth):
+ """Export the RECONF type results.
+
+ Result type is set to reconf.
+
+ :param packet_rate: Aggregate offered load in packets per second.
+ :param packet_loss: How many of the packets were dropped or unsent.
+ :param bandwidth: The offered load recomputed into L1 bits per second.
+ :type packet_rate: float
+ :type packet_loss: int
+ :type bandwidth: float
+ """
+ result_node = get_export_data()["result"]
+ result_node["type"] = "reconf"
+
+ time_loss = int(packet_loss) / float(packet_rate)
+ result_node["aggregate_rate"] = dict(
+ bandwidth=dict(
+ unit="bps",
+ value=float(bandwidth)
+ ),
+ rate=dict(
+ unit="pps",
+ value=float(packet_rate)
+ )
+ )
+ result_node["loss"] = dict(
+ packet=dict(
+ unit="packets",
+ value=int(packet_loss)
+ ),
+ time=dict(
+ unit="s",
+ value=time_loss
+ )
+ )
+
+
+def export_hoststack_results(
+ bandwidth, rate=None, rate_unit=None, latency=None,
+ failed_requests=None, completed_requests=None, retransmits=None,
+ duration=None
+):
+ """Export the HOSTSTACK type results.
+
+ Result type is set to hoststack.
+
+ :param bandwidth: Measured transfer rate using bps as a unit.
+ :param rate: Resulting rate measured by the test. [Optional]
+ :param rate_unit: CPS or RPS. [Optional]
+ :param latency: Measure latency. [Optional]
+ :param failed_requests: Number of failed requests. [Optional]
+ :param completed_requests: Number of completed requests. [Optional]
+ :param retransmits: Retransmitted TCP packets. [Optional]
+ :param duration: Measurment duration. [Optional]
+ :type bandwidth: float
+ :type rate: float
+ :type rate_unit: str
+ :type latency: float
+ :type failed_requests: int
+ :type completed_requests: int
+ :type retransmits: int
+ :type duration: float
+ """
+ result_node = get_export_data()["result"]
+ result_node["type"] = "hoststack"
+
+ result_node["bandwidth"] = dict(unit="bps", value=bandwidth)
+ if rate is not None:
+ result_node["rate"] = \
+ dict(unit=rate_unit, value=rate)
+ if latency is not None:
+ result_node["latency"] = \
+ dict(unit="ms", value=latency)
+ if failed_requests is not None:
+ result_node["failed_requests"] = \
+ dict(unit="requests", value=failed_requests)
+ if completed_requests is not None:
+ result_node["completed_requests"] = \
+ dict(unit="requests", value=completed_requests)
+ if retransmits is not None:
+ result_node["retransmits"] = \
+ dict(unit="packets", value=retransmits)
+ if duration is not None:
+ result_node["duration"] = \
+ dict(unit="s", value=duration)
+
+
+def append_telemetry(telemetry_item):
+ """Append telemetry entry to proper place so it is dumped into json.
+
+ :param telemetry_item: Telemetry entry.
+ :type telemetry_item: str
+ """
+ data = get_export_data()
+ data["telemetry"].append(telemetry_item)
diff --git a/resources/libraries/python/model/MemDump.py b/resources/libraries/python/model/MemDump.py
new file mode 100644
index 0000000000..b391569286
--- /dev/null
+++ b/resources/libraries/python/model/MemDump.py
@@ -0,0 +1,194 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for converting in-memory data into JSON output.
+
+CSIT and VPP PAPI are using custom data types that are not directly serializable
+into JSON.
+
+Thus, before writing the output onto disk, the data is recursively converted to
+equivalent serializable types, in extreme cases replaced by string
+representation.
+
+Validation is outside the scope of this module, as it should use the JSON data
+read from disk.
+"""
+
+import json
+import os
+
+from collections.abc import Iterable, Mapping, Set
+from enum import IntFlag
+from dateutil.parser import parse
+
+
+def _pre_serialize_recursive(data):
+ """Recursively sort and convert to a more serializable form.
+
+ VPP PAPI code can give data with its own MACAddres type,
+ or various other enum and flag types.
+ The default json.JSONEncoder method raises TypeError on that.
+ First point of this function is to apply str() or repr()
+ to leaf values that need it.
+
+ Also, PAPI responses are namedtuples, which confuses
+ the json.JSONEncoder method (so it does not recurse).
+ Dictization (see PapiExecutor) helps somewhat, but it turns namedtuple
+ into a UserDict, which also confuses json.JSONEncoder.
+ Therefore, we recursively convert any Mapping into an ordinary dict.
+
+ We also convert iterables to list (sorted if the iterable was a set),
+ and prevent numbers from getting converted to strings.
+
+ As we are doing such low level operations,
+ we also convert mapping keys to strings
+ and sort the mapping items by keys alphabetically,
+ except "data" field moved to the end.
+
+ :param data: Object to make serializable, dictized when applicable.
+ :type data: object
+ :returns: Serializable equivalent of the argument.
+ :rtype: object
+ :raises ValueError: If the argument does not support string conversion.
+ """
+ # Recursion ends at scalar values, first handle irregular ones.
+ if isinstance(data, IntFlag):
+ return repr(data)
+ if isinstance(data, bytes):
+ return data.hex()
+ # The regular ones are good to go.
+ if isinstance(data, (str, int, float, bool)):
+ return data
+ # Recurse over, convert and sort mappings.
+ if isinstance(data, Mapping):
+ # Convert and sort alphabetically.
+ ret = {
+ str(key): _pre_serialize_recursive(data[key])
+ for key in sorted(data.keys())
+ }
+ # If exists, move "data" field to the end.
+ if u"data" in ret:
+ data_value = ret.pop(u"data")
+ ret[u"data"] = data_value
+ # If exists, move "type" field at the start.
+ if u"type" in ret:
+ type_value = ret.pop(u"type")
+ ret_old = ret
+ ret = dict(type=type_value)
+ ret.update(ret_old)
+ return ret
+ # Recurse over and convert iterables.
+ if isinstance(data, Iterable):
+ list_data = [_pre_serialize_recursive(item) for item in data]
+ # Additionally, sets are exported as sorted.
+ if isinstance(data, Set):
+ list_data = sorted(list_data)
+ return list_data
+ # Unknown structure, attempt str().
+ return str(data)
+
+
+def _pre_serialize_root(data):
+ """Recursively convert to a more serializable form, tweak order.
+
+ See _pre_serialize_recursive for most of changes this does.
+
+ The logic here (outside the recursive function) only affects
+ field ordering in the root mapping,
+ to make it more human friendly.
+ We are moving "version" to the top,
+ followed by start time and end time.
+ and various long fields to the bottom.
+
+ Some edits are done in-place, do not trust the argument value after calling.
+
+ :param data: Root data to make serializable, dictized when applicable.
+ :type data: dict
+ :returns: Order-tweaked version of the argument.
+ :rtype: dict
+ :raises KeyError: If the data does not contain required fields.
+ :raises TypeError: If the argument is not a dict.
+ :raises ValueError: If the argument does not support string conversion.
+ """
+ if not isinstance(data, dict):
+ raise RuntimeError(f"Root data object needs to be a dict: {data!r}")
+ data = _pre_serialize_recursive(data)
+ new_data = dict(version=data.pop(u"version"))
+ new_data[u"start_time"] = data.pop(u"start_time")
+ new_data[u"end_time"] = data.pop(u"end_time")
+ new_data.update(data)
+ return new_data
+
+
+def _merge_into_suite_info_file(teardown_path):
+ """Move setup and teardown data into a singe file, remove old files.
+
+ The caller has to confirm the argument is correct, e.g. ending in
+ "/teardown.info.json".
+
+ :param teardown_path: Local filesystem path to teardown file.
+ :type teardown_path: str
+ :returns: Local filesystem path to newly created suite file.
+ :rtype: str
+ """
+ # Manual right replace: https://stackoverflow.com/a/9943875
+ setup_path = u"setup".join(teardown_path.rsplit(u"teardown", 1))
+ with open(teardown_path, u"rt", encoding="utf-8") as file_in:
+ teardown_data = json.load(file_in)
+ # Transforming setup data into suite data.
+ with open(setup_path, u"rt", encoding="utf-8") as file_in:
+ suite_data = json.load(file_in)
+
+ end_time = teardown_data[u"end_time"]
+ suite_data[u"end_time"] = end_time
+ start_float = parse(suite_data[u"start_time"]).timestamp()
+ end_float = parse(suite_data[u"end_time"]).timestamp()
+ suite_data[u"duration"] = end_float - start_float
+ setup_telemetry = suite_data.pop(u"telemetry")
+ suite_data[u"setup_telemetry"] = setup_telemetry
+ suite_data[u"teardown_telemetry"] = teardown_data[u"telemetry"]
+
+ suite_path = u"suite".join(teardown_path.rsplit(u"teardown", 1))
+ with open(suite_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(suite_data, file_out, indent=1)
+ # We moved everything useful from temporary setup/teardown info files.
+ os.remove(setup_path)
+ os.remove(teardown_path)
+
+ return suite_path
+
+
+def write_output(file_path, data):
+ """Prepare data for serialization and dump into a file.
+
+ Ancestor directories are created if needed.
+
+ :param file_path: Local filesystem path, including the file name.
+ :param data: Root data to make serializable, dictized when applicable.
+ :type file_path: str
+ :type data: dict
+ """
+ data = _pre_serialize_root(data)
+
+ # Lets move Telemetry to the end.
+ telemetry = data.pop(u"telemetry")
+ data[u"telemetry"] = telemetry
+
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ with open(file_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(data, file_out, indent=1)
+
+ if file_path.endswith(u"/teardown.info.json"):
+ file_path = _merge_into_suite_info_file(file_path)
+
+ return file_path
diff --git a/resources/libraries/python/model/__init__.py b/resources/libraries/python/model/__init__.py
new file mode 100644
index 0000000000..36e32b89c4
--- /dev/null
+++ b/resources/libraries/python/model/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for directory resources/libraries/python/model
+"""
diff --git a/resources/libraries/python/model/parse.py b/resources/libraries/python/model/parse.py
new file mode 100644
index 0000000000..1e0aebfe18
--- /dev/null
+++ b/resources/libraries/python/model/parse.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library for parsing results from JSON back to python objects.
+
+This is useful for vpp-csit jobs like per-patch performance verify.
+Such jobs invoke robot multiple times, each time on a different build.
+Each robot invocation may execute several test cases.
+How exactly are the results compared depends on the job type,
+but extracting just the main results from jsons (file trees) is a common task,
+so it is placed into this library.
+
+As such, the code in this file does not directly interact
+with the code in other files in this directory
+(result comparison is done outside robot invocation),
+but all files share common assumptions about json structure.
+
+The function here expects a particular tree created on a filesystem by
+a bootstrap script, including test results
+exported as json files according to a current model schema.
+This script extracts the results (according to result type)
+and joins them mapping from test IDs to lists of floats.
+Also, the result is cached into a results.json file,
+so each tree is parsed only once.
+
+The cached result does not depend on tree placement,
+so the bootstrap script may move and copy trees around
+before or after parsing.
+"""
+
+import json
+import os
+import pathlib
+
+from typing import Dict, List
+
+
+def parse(dirpath: str, fake_value: float = 1.0) -> Dict[str, List[float]]:
+ """Look for test jsons, extract scalar results.
+
+ Files other than .json are skipped, jsons without test_id are skipped.
+ If the test failed, four fake values are used as a fake result.
+
+ Units are ignored, as both parent and current are tested
+ with the same CSIT code so the unit should be identical.
+
+ The test results are sorted by test_id,
+ as the filesystem order is not deterministic enough.
+
+ The result is also cached as results.json file.
+
+ :param dirpath: Path to the directory tree to examine.
+ :param fail_value: Fake value to use for test cases that failed.
+ :type dirpath: str
+ :type fail_falue: float
+ :returns: Mapping from test IDs to list of measured values.
+ :rtype: Dict[str, List[float]]
+ :raises RuntimeError: On duplicate test ID or unknown test type.
+ """
+ if not pathlib.Path(dirpath).is_dir():
+ # This happens when per-patch runs out of iterations.
+ return {}
+ resultpath = pathlib.Path(f"{dirpath}/results.json")
+ if resultpath.is_file():
+ with open(resultpath, "rt", encoding="utf8") as file_in:
+ return json.load(file_in)
+ results = {}
+ for root, _, files in os.walk(dirpath):
+ for filename in files:
+ if not filename.endswith(".json"):
+ continue
+ filepath = os.path.join(root, filename)
+ with open(filepath, "rt", encoding="utf8") as file_in:
+ data = json.load(file_in)
+ if "test_id" not in data:
+ continue
+ name = data["test_id"]
+ if name in results:
+ raise RuntimeError(f"Duplicate: {name}")
+ if not data["passed"]:
+ results[name] = [fake_value] * 4
+ continue
+ result_object = data["result"]
+ result_type = result_object["type"]
+ if result_type == "mrr":
+ results[name] = result_object["receive_rate"]["rate"]["values"]
+ elif result_type == "ndrpdr":
+ results[name] = [result_object["pdr"]["lower"]["rate"]["value"]]
+ elif result_type == "soak":
+ results[name] = [
+ result_object["critical_rate"]["lower"]["rate"]["value"]
+ ]
+ elif result_type == "reconf":
+ results[name] = [result_object["loss"]["time"]["value"]]
+ elif result_type == "hoststack":
+ results[name] = [result_object["bandwidth"]["value"]]
+ else:
+ raise RuntimeError(f"Unknown result type: {result_type}")
+ results = {test_id: results[test_id] for test_id in sorted(results)}
+ with open(resultpath, "wt", encoding="utf8") as file_out:
+ json.dump(results, file_out, indent=1, separators=(", ", ": "))
+ return results
diff --git a/resources/libraries/python/model/util.py b/resources/libraries/python/model/util.py
new file mode 100644
index 0000000000..db2ef14bbb
--- /dev/null
+++ b/resources/libraries/python/model/util.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module hosting few utility functions useful when dealing with modelled data.
+
+This is for storing varied utility functions, which are too short and diverse
+to be put into more descriptive modules.
+"""
+
+
+from robot.libraries.BuiltIn import BuiltIn
+
+
+def descend(parent_node, key, default_factory=None):
+ """Return a sub-node, create and insert it when it does not exist.
+
+ Without this function:
+ child_node = parent_node.get(key, dict())
+ parent_node[key] = child_node
+
+ With this function:
+ child_node = descend(parent_node, key)
+
+ New code is shorter and avoids the need to type key and parent_node twice.
+
+ :param parent_node: Reference to inner node of a larger structure
+ we want to descend from.
+ :param key: Key of the maybe existing child node.
+ :param default_factory: If the key does not exist, call this
+ to create a new value to be inserted under the key.
+ None means dict. The other popular option is list.
+ :type parent_node: dict
+ :type key: str
+ :type default_factory: Optional[Callable[[], object]]
+ :returns: The reference to (maybe just created) child node.
+ :rtype: object
+ """
+ if key not in parent_node:
+ factory = dict if default_factory is None else default_factory
+ parent_node[key] = factory()
+ return parent_node[key]
+
+
+def get_export_data():
+ """Return data member of ExportJson library instance.
+
+ This assumes the data has been initialized already.
+ Return None if Robot is not running.
+
+ :returns: Current library instance's raw data field.
+ :rtype: Optional[dict]
+ :raises AttributeError: If library is not imported yet.
+ """
+ instance = BuiltIn().get_library_instance(
+ u"resources.libraries.python.model.ExportJson"
+ )
+ if instance is None:
+ return None
+ return instance.data
diff --git a/resources/libraries/python/model/validate.py b/resources/libraries/python/model/validate.py
new file mode 100644
index 0000000000..85c4b993c9
--- /dev/null
+++ b/resources/libraries/python/model/validate.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for validating JSON instances against schemas.
+
+Short module currently, as we validate only testcase info outputs.
+Structure will probably change when we start validation mode file types.
+"""
+
+import json
+import jsonschema
+import yaml
+
+
+def get_validators():
+ """Return mapping from file types to validator instances.
+
+ Uses hardcoded file types and paths to schemas on disk.
+
+ :returns: Validators, currently just for tc_info_output.
+ :rtype: Mapping[str, jsonschema.validators.Validator]
+ :raises RuntimeError: If schemas are not readable or not valid.
+ """
+ relative_path = "resources/model_schema/test_case.schema.yaml"
+ # Robot is always started when CWD is CSIT_DIR.
+ with open(relative_path, "rt", encoding="utf-8") as file_in:
+ schema = json.loads(
+ json.dumps(yaml.safe_load(file_in.read()), indent=2)
+ )
+ validator_class = jsonschema.validators.validator_for(schema)
+ validator_class.check_schema(schema)
+ fmt_checker = jsonschema.FormatChecker()
+ validator = validator_class(schema, format_checker=fmt_checker)
+
+ return dict(tc_info=validator)
+
+
+def validate(file_path, validator):
+ """Load data from disk, use validator to validate it.
+
+ :param file_path: Local filesystem path including the file name to load.
+ :param validator: Validator instance to use for validation.
+ :type file_path: str
+ :type validator: jsonschema.validators.Validator
+ :raises ValidationError: If schema validation fails.
+ """
+ with open(file_path, "rt", encoding="utf-8") as file_in:
+ instance = json.load(file_in)
+ error = jsonschema.exceptions.best_match(validator.iter_errors(instance))
+ if error is not None:
+ print(json.dumps(instance, indent=4))
+ raise error
diff --git a/resources/libraries/python/parsers/JsonParser.py b/resources/libraries/python/parsers/JsonParser.py
deleted file mode 100644
index bebe2a2407..0000000000
--- a/resources/libraries/python/parsers/JsonParser.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Used to parse JSON files or JSON data strings to dictionaries"""
-
-import json
-
-from io import open
-
-
-class JsonParser:
- """Parses JSON data string or files containing JSON data strings"""
- def __init__(self):
- pass
-
- @staticmethod
- def parse_data(json_data):
- """Return list parsed from JSON data string.
-
- Translates JSON data into list of values/dictionaries/lists.
-
- :param json_data: Data in JSON format.
- :type json_data: str
- :returns: JSON data parsed as python list.
- :rtype: list
- """
- parsed_data = json.loads(json_data)
- return parsed_data
-
- @staticmethod
- def parse_file(json_file):
- """Return list parsed from file containing JSON string.
-
- Translates JSON data found in file into list of
- values/dictionaries/lists.
-
- :param json_file: File with JSON type data.
- :type json_file: str
- :returns: JSON data parsed as python list.
- :rtype: list
- """
- input_data = open(json_file, u"rt").read()
- parsed_data = JsonParser.parse_data(input_data)
- return parsed_data
diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py
index 5c397eeb17..437b1ad3e6 100644
--- a/resources/libraries/python/ssh.py
+++ b/resources/libraries/python/ssh.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,7 +17,7 @@
import socket
from io import StringIO
-from time import time, sleep
+from time import monotonic, sleep
from paramiko import RSAKey, SSHClient, AutoAddPolicy
from paramiko.ssh_exception import SSHException, NoValidConnectionsError
@@ -82,7 +82,7 @@ class SSH:
raise IOError(f"Cannot connect to {node['host']}")
else:
try:
- start = time()
+ start = monotonic()
pkey = None
if u"priv_key" in node:
pkey = RSAKey.from_private_key(StringIO(node[u"priv_key"]))
@@ -101,7 +101,7 @@ class SSH:
SSH.__existing_connections[node_hash] = self._ssh
logger.debug(
f"New SSH to {self._ssh.get_transport().getpeername()} "
- f"took {time() - start} seconds: {self._ssh}"
+ f"took {monotonic() - start} seconds: {self._ssh}"
)
except SSHException as exc:
raise IOError(f"Cannot connect to {node[u'host']}") from exc
@@ -151,6 +151,7 @@ class SSH:
:param log_stdout_err: If True, stdout and stderr are logged. stdout
and stderr are logged also if the return code is not zero
independently of the value of log_stdout_err.
+ Needed for calls outside Robot (e.g. from reservation script).
:type cmd: str or OptionString
:type timeout: int
:type log_stdout_err: bool
@@ -174,7 +175,7 @@ class SSH:
logger.trace(f"exec_command on {peer} with timeout {timeout}: {cmd}")
- start = time()
+ start = monotonic()
chan.exec_command(cmd)
while not chan.exit_status_ready() and timeout is not None:
if chan.recv_ready():
@@ -187,7 +188,8 @@ class SSH:
stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \
if isinstance(s_err, bytes) else s_err
- if time() - start > timeout:
+ duration = monotonic() - start
+ if duration > timeout:
raise SSHTimeout(
f"Timeout exception during execution of command: {cmd}\n"
f"Current contents of stdout buffer: "
@@ -209,8 +211,8 @@ class SSH:
stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \
if isinstance(s_err, bytes) else s_err
- end = time()
- logger.trace(f"exec_command on {peer} took {end-start} seconds")
+ duration = monotonic() - start
+ logger.trace(f"exec_command on {peer} took {duration} seconds")
logger.trace(f"return RC {return_code}")
if log_stdout_err or int(return_code):
@@ -230,6 +232,7 @@ class SSH:
:param cmd_input: Input redirected to the command.
:param timeout: Timeout.
:param log_stdout_err: If True, stdout and stderr are logged.
+ Needed for calls outside Robot (e.g. from reservation script).
:type cmd: str
:type cmd_input: str
:type timeout: int
@@ -322,7 +325,7 @@ class SSH:
:param chan: SSH channel with opened terminal.
:param cmd: Command to be executed.
:param prompt: Command prompt, sequence of characters used to
- indicate readiness to accept commands.
+ indicate readiness to accept commands.
:returns: Command output.
.. warning:: Interruptingcow is used here, and it uses
@@ -370,9 +373,9 @@ class SSH:
connect() method has to be called first!
:param local_path: Path to local file that should be uploaded; or
- path where to save remote file.
+ path where to save remote file.
:param remote_path: Remote path where to place uploaded file; or
- path to remote file which should be downloaded.
+ path to remote file which should be downloaded.
:param get: scp operation to perform. Default is put.
:param timeout: Timeout value in seconds.
:param wildcard: If path has wildcard characters. Default is false.
@@ -400,17 +403,20 @@ class SSH:
self._ssh.get_transport(), sanitize=lambda x: x,
socket_timeout=timeout
)
- start = time()
+ start = monotonic()
if not get:
scp.put(local_path, remote_path)
else:
scp.get(remote_path, local_path)
scp.close()
- end = time()
- logger.trace(f"SCP took {end-start} seconds")
+ duration = monotonic() - start
+ logger.trace(f"SCP took {duration} seconds")
-def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False):
+def exec_cmd(
+ node, cmd, timeout=600, sudo=False, disconnect=False,
+ log_stdout_err=True
+ ):
"""Convenience function to ssh/exec/return rc, out & err.
Returns (rc, stdout, stderr).
@@ -420,13 +426,18 @@ def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False):
:param timeout: Timeout value in seconds. Default: 600.
:param sudo: Sudo privilege execution flag. Default: False.
:param disconnect: Close the opened SSH connection if True.
+ :param log_stdout_err: If True, stdout and stderr are logged. stdout
+ and stderr are logged also if the return code is not zero
+ independently of the value of log_stdout_err.
+ Needed for calls outside Robot (e.g. from reservation script).
:type node: dict
:type cmd: str or OptionString
:type timeout: int
:type sudo: bool
:type disconnect: bool
+ :type log_stdout_err: bool
:returns: RC, Stdout, Stderr.
- :rtype: tuple(int, str, str)
+ :rtype: Tuple[int, str, str]
"""
if node is None:
raise TypeError(u"Node parameter is None")
@@ -445,10 +456,12 @@ def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False):
try:
if not sudo:
- ret_code, stdout, stderr = ssh.exec_command(cmd, timeout=timeout)
+ ret_code, stdout, stderr = ssh.exec_command(
+ cmd, timeout=timeout, log_stdout_err=log_stdout_err
+ )
else:
ret_code, stdout, stderr = ssh.exec_command_sudo(
- cmd, timeout=timeout
+ cmd, timeout=timeout, log_stdout_err=log_stdout_err
)
except SSHException as err:
logger.error(repr(err))
@@ -462,7 +475,8 @@ def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False):
def exec_cmd_no_error(
node, cmd, timeout=600, sudo=False, message=None, disconnect=False,
- retries=0, include_reason=False):
+ retries=0, include_reason=False, log_stdout_err=True
+ ):
"""Convenience function to ssh/exec/return out & err.
Verifies that return code is zero.
@@ -478,6 +492,10 @@ def exec_cmd_no_error(
:param disconnect: Close the opened SSH connection if True.
:param retries: How many times to retry on failure.
:param include_reason: Whether default info should be appended to message.
+ :param log_stdout_err: If True, stdout and stderr are logged. stdout
+ and stderr are logged also if the return code is not zero
+ independently of the value of log_stdout_err.
+ Needed for calls outside Robot thread (e.g. parallel framework setup).
:type node: dict
:type cmd: str or OptionString
:type timeout: int
@@ -486,13 +504,15 @@ def exec_cmd_no_error(
:type disconnect: bool
:type retries: int
:type include_reason: bool
+ :type log_stdout_err: bool
:returns: Stdout, Stderr.
:rtype: tuple(str, str)
:raises RuntimeError: If bash return code is not 0.
"""
for _ in range(retries + 1):
ret_code, stdout, stderr = exec_cmd(
- node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect
+ node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect,
+ log_stdout_err=log_stdout_err
)
if ret_code == 0:
break
diff --git a/resources/libraries/python/topology.py b/resources/libraries/python/topology.py
index c39e5afabb..22ed3666c3 100644
--- a/resources/libraries/python/topology.py
+++ b/resources/libraries/python/topology.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -71,6 +71,8 @@ class SocketType:
PAPI = u"PAPI"
# VPP PAPI Stats (legacy option until stats are migrated to Socket PAPI)
STATS = u"STATS"
+ # VPP Socket CLI
+ CLI = u"CLI"
DICT__nodes = load_topo_from_yaml()
@@ -174,7 +176,8 @@ class Topology:
port_types = (
u"subinterface", u"vlan_subif", u"memif", u"tap", u"vhost",
u"loopback", u"gre_tunnel", u"vxlan_tunnel", u"eth_bond",
- u"eth_avf", u"eth_rdma", u"geneve_tunnel"
+ u"eth_avf", u"eth_rdma", u"geneve_tunnel", u"eth_af_xdp",
+ u"gtpu_tunnel"
)
for node_data in nodes.values():
@@ -376,16 +379,19 @@ class Topology:
return links
@staticmethod
- def _get_interface_by_key_value(node, key, value):
+ def _get_interface_by_key_value(node, key, value, subsequent=False):
"""Return node interface key from topology file
according to key and value.
:param node: The node dictionary.
:param key: Key by which to select the interface.
:param value: Value that should be found using the key.
+ :param subsequent: Use second interface of the link. Useful for
+ back-to-back links. Default: False
:type node: dict
:type key: string
:type value: string
+ :type subsequent: bool
:returns: Interface key from topology file
:rtype: string
"""
@@ -395,8 +401,11 @@ class Topology:
k_val = if_val.get(key)
if k_val is not None:
if k_val == value:
- retval = if_key
- break
+ if subsequent:
+ subsequent = False
+ else:
+ retval = if_key
+ break
return retval
@staticmethod
@@ -416,7 +425,7 @@ class Topology:
return Topology._get_interface_by_key_value(node, u"name", iface_name)
@staticmethod
- def get_interface_by_link_name(node, link_name):
+ def get_interface_by_link_name(node, link_name, subsequent=False):
"""Return interface key of link on node.
This method returns the interface name associated with a given link
@@ -424,12 +433,17 @@ class Topology:
:param node: The node topology dictionary.
:param link_name: Name of the link that a interface is connected to.
+ :param subsequent: Use second interface of the link. Useful for
+ back-to-back links. Default: False
:type node: dict
:type link_name: string
+ :type subsequent: bool
:returns: Interface key of the interface connected to the given link.
:rtype: str
"""
- return Topology._get_interface_by_key_value(node, u"link", link_name)
+ return Topology._get_interface_by_key_value(
+ node, u"link", link_name, subsequent=subsequent
+ )
def get_interfaces_by_link_names(self, node, link_names):
"""Return dictionary of dictionaries {"interfaceN", interface name}.
@@ -752,7 +766,9 @@ class Topology:
# find link
for node_data in nodes_info.values():
# skip self
- if node_data[u"host"] == node[u"host"]:
+ l_hash = node_data[u"host"]+str(node_data[u"port"])
+ r_hash = node[u"host"]+str(node[u"port"])
+ if l_hash == r_hash:
continue
for if_key, if_val \
in node_data[u"interfaces"].items():
@@ -835,13 +851,15 @@ class Topology:
return None
@staticmethod
- def _get_node_active_link_names(node, filter_list=None):
+ def _get_node_active_link_names(node, filter_list=None, topo_has_dut=True):
"""Return list of link names that are other than mgmt links.
:param node: Node topology dictionary.
:param filter_list: Link filter criteria.
+ :param topo_has_dut: Whether we require back-to-back links.
:type node: dict
:type filter_list: list of strings
+ :type topo_has_dut: bool
:returns: List of link names occupied by the node.
:rtype: None or list of string
"""
@@ -861,6 +879,17 @@ class Topology:
link_names.append(interface[u"link"])
if not link_names:
link_names = None
+ if not topo_has_dut:
+ new_link_names = list()
+ for link_name in link_names:
+ count = 0
+ for interface in interfaces.values():
+ link = interface.get(u"link", None)
+ if link == link_name:
+ count += 1
+ if count == 2:
+ new_link_names.append(link_name)
+ link_names = new_link_names
return link_names
def get_active_connecting_links(
@@ -879,12 +908,19 @@ class Topology:
:rtype: list
"""
- node1_links = self._get_node_active_link_names(
- node1, filter_list=filter_list_node1
- )
- node2_links = self._get_node_active_link_names(
- node2, filter_list=filter_list_node2
- )
+ if node1 != node2:
+ node1_links = self._get_node_active_link_names(
+ node1, filter_list=filter_list_node1
+ )
+ node2_links = self._get_node_active_link_names(
+ node2, filter_list=filter_list_node2
+ )
+ else:
+ # Looking for back-to-back links.
+ node1_links = self._get_node_active_link_names(
+ node1, filter_list=filter_list_node1, topo_has_dut=False
+ )
+ node2_links = node1_links
connecting_links = None
if node1_links is None:
@@ -1051,6 +1087,19 @@ class Topology:
except KeyError:
return None
+ def get_bus(node):
+ """Return bus configuration of the node.
+
+ :param node: Node created from topology.
+ :type node: dict
+ :returns: bus configuration string.
+ :rtype: str
+ """
+ try:
+ return node[u"bus"]
+ except KeyError:
+ return None
+
@staticmethod
def get_uio_driver(node):
"""Return uio-driver configuration of the node.