aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/Constants.py429
-rw-r--r--resources/libraries/python/ContainerUtils.py74
-rw-r--r--resources/libraries/python/CoreDumpUtil.py12
-rw-r--r--resources/libraries/python/CpuUtils.py178
-rw-r--r--resources/libraries/python/DMAUtil.py213
-rw-r--r--resources/libraries/python/DPDK/L3fwdTest.py59
-rw-r--r--resources/libraries/python/DPDK/TestpmdTest.py30
-rw-r--r--resources/libraries/python/DUTSetup.py232
-rw-r--r--resources/libraries/python/FlowUtil.py6
-rw-r--r--resources/libraries/python/HoststackUtil.py118
-rw-r--r--resources/libraries/python/IPTopology.py177
-rw-r--r--resources/libraries/python/IPUtil.py54
-rw-r--r--resources/libraries/python/IPsecUtil.py1946
-rw-r--r--resources/libraries/python/InterfaceUtil.py94
-rw-r--r--resources/libraries/python/Iperf3.py6
-rw-r--r--resources/libraries/python/L2Util.py20
-rw-r--r--resources/libraries/python/LispSetup.py2
-rw-r--r--resources/libraries/python/LoadBalancerUtil.py8
-rw-r--r--resources/libraries/python/MLRsearch/AbstractMeasurer.py32
-rw-r--r--resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py48
-rw-r--r--resources/libraries/python/MLRsearch/MeasurementDatabase.py157
-rw-r--r--resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py485
-rw-r--r--resources/libraries/python/MLRsearch/PerDurationDatabase.py123
-rw-r--r--resources/libraries/python/MLRsearch/ProgressState.py60
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateInterval.py74
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py125
-rw-r--r--resources/libraries/python/MLRsearch/WidthArithmetics.py137
-rw-r--r--resources/libraries/python/MLRsearch/__init__.py16
-rw-r--r--resources/libraries/python/MLRsearch/candidate.py153
-rw-r--r--resources/libraries/python/MLRsearch/config.py179
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/__init__.py (renamed from resources/libraries/python/parsers/__init__.py)7
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/dc_property.py173
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/field.py44
-rw-r--r--resources/libraries/python/MLRsearch/discrete_interval.py140
-rw-r--r--resources/libraries/python/MLRsearch/discrete_load.py316
-rw-r--r--resources/libraries/python/MLRsearch/discrete_result.py76
-rw-r--r--resources/libraries/python/MLRsearch/discrete_width.py197
-rw-r--r--resources/libraries/python/MLRsearch/expander.py102
-rw-r--r--resources/libraries/python/MLRsearch/global_width.py70
-rw-r--r--resources/libraries/python/MLRsearch/goal_result.py72
-rw-r--r--resources/libraries/python/MLRsearch/limit_handler.py198
-rw-r--r--resources/libraries/python/MLRsearch/load_rounding.py205
-rw-r--r--resources/libraries/python/MLRsearch/load_stats.py112
-rw-r--r--resources/libraries/python/MLRsearch/measurement_database.py126
-rw-r--r--resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py325
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/__init__.py24
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/classes.py34
-rw-r--r--resources/libraries/python/MLRsearch/relevant_bounds.py56
-rw-r--r--resources/libraries/python/MLRsearch/search_goal.py119
-rw-r--r--resources/libraries/python/MLRsearch/search_goal_tuple.py60
-rw-r--r--resources/libraries/python/MLRsearch/selector.py183
-rw-r--r--resources/libraries/python/MLRsearch/strategy/__init__.py35
-rw-r--r--resources/libraries/python/MLRsearch/strategy/base.py132
-rw-r--r--resources/libraries/python/MLRsearch/strategy/bisect.py193
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_hi.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_lo.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/halve.py83
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_hi.py55
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_lo.py53
-rw-r--r--resources/libraries/python/MLRsearch/target_scaling.py103
-rw-r--r--resources/libraries/python/MLRsearch/target_spec.py95
-rw-r--r--resources/libraries/python/MLRsearch/target_stat.py153
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py55
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py161
-rw-r--r--resources/libraries/python/MLRsearch/trimmed_stat.py52
-rw-r--r--resources/libraries/python/Memif.py20
-rw-r--r--resources/libraries/python/NATUtil.py12
-rw-r--r--resources/libraries/python/NGINX/NGINXTools.py14
-rw-r--r--resources/libraries/python/NodePath.py4
-rw-r--r--resources/libraries/python/PLRsearch/Integrator.py59
-rw-r--r--resources/libraries/python/PLRsearch/PLRsearch.py186
-rw-r--r--resources/libraries/python/PLRsearch/log_plus.py8
-rw-r--r--resources/libraries/python/PLRsearch/stat_trackers.py58
-rw-r--r--resources/libraries/python/PapiExecutor.py752
-rw-r--r--resources/libraries/python/PapiHistory.py24
-rw-r--r--resources/libraries/python/Policer.py16
-rw-r--r--resources/libraries/python/QATUtil.py92
-rw-r--r--resources/libraries/python/QemuUtils.py6
-rw-r--r--resources/libraries/python/SRv6.py11
-rw-r--r--resources/libraries/python/SetupFramework.py16
-rw-r--r--resources/libraries/python/TRexConfigGenerator.py301
-rw-r--r--resources/libraries/python/Tap.py4
-rw-r--r--resources/libraries/python/TelemetryUtil.py136
-rw-r--r--resources/libraries/python/TestConfig.py142
-rw-r--r--resources/libraries/python/TrafficGenerator.py606
-rw-r--r--resources/libraries/python/VPPUtil.py105
-rw-r--r--resources/libraries/python/VatExecutor.py397
-rw-r--r--resources/libraries/python/VatJsonUtil.py218
-rw-r--r--resources/libraries/python/VppApiCrc.py41
-rw-r--r--resources/libraries/python/VppConfigGenerator.py353
-rw-r--r--resources/libraries/python/WireGuardUtil.py94
-rw-r--r--resources/libraries/python/autogen/Regenerator.py164
-rw-r--r--resources/libraries/python/jumpavg/__init__.py10
-rw-r--r--resources/libraries/python/jumpavg/avg_stdev_stats.py (renamed from resources/libraries/python/jumpavg/AvgStdevStats.py)58
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_group.py (renamed from resources/libraries/python/jumpavg/BitCountingGroup.py)146
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_group_list.py (renamed from resources/libraries/python/jumpavg/BitCountingGroupList.py)140
-rw-r--r--resources/libraries/python/jumpavg/bit_counting_stats.py (renamed from resources/libraries/python/jumpavg/BitCountingStats.py)131
-rw-r--r--resources/libraries/python/jumpavg/classify.py78
-rw-r--r--resources/libraries/python/model/ExportJson.py395
-rw-r--r--resources/libraries/python/model/ExportLog.py148
-rw-r--r--resources/libraries/python/model/ExportResult.py221
-rw-r--r--resources/libraries/python/model/MemDump.py (renamed from resources/libraries/python/model/mem2raw.py)87
-rw-r--r--resources/libraries/python/model/export_json.py236
-rw-r--r--resources/libraries/python/model/parse.py112
-rw-r--r--resources/libraries/python/model/raw2info.py294
-rw-r--r--resources/libraries/python/model/util.py8
-rw-r--r--resources/libraries/python/model/validate.py41
-rw-r--r--resources/libraries/python/parsers/JsonParser.py54
-rw-r--r--resources/libraries/python/ssh.py51
-rw-r--r--resources/libraries/python/topology.py19
111 files changed, 9389 insertions, 5906 deletions
diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py
index 566dec7421..70452e6ff4 100644
--- a/resources/libraries/python/Constants.py
+++ b/resources/libraries/python/Constants.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -39,7 +39,7 @@ def get_str_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: str
"""
- prefixes = (u"FDIO_CSIT_", u"CSIT_", u"")
+ prefixes = ("FDIO_CSIT_", "CSIT_", "")
if not isinstance(env_var_names, (list, tuple)):
env_var_names = [env_var_names]
for name in env_var_names:
@@ -62,7 +62,7 @@ def get_int_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: int
"""
- env_str = get_str_from_env(env_var_names, u"")
+ env_str = get_str_from_env(env_var_names, "")
try:
return int(env_str)
except ValueError:
@@ -81,7 +81,7 @@ def get_float_from_env(env_var_names, default_value):
:returns: The value read, or default value.
:rtype: float
"""
- env_str = get_str_from_env(env_var_names, u"")
+ env_str = get_str_from_env(env_var_names, "")
try:
return float(env_str)
except ValueError:
@@ -98,8 +98,8 @@ def get_pessimistic_bool_from_env(env_var_names):
:returns: The value read, or False.
:rtype: bool
"""
- env_str = get_str_from_env(env_var_names, u"").lower()
- return bool(env_str in (u"true", u"yes", u"y", u"1"))
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str in ("true", "yes", "y", "1"))
def get_optimistic_bool_from_env(env_var_names):
@@ -112,60 +112,54 @@ def get_optimistic_bool_from_env(env_var_names):
:returns: The value read, or True.
:rtype: bool
"""
- env_str = get_str_from_env(env_var_names, u"").lower()
- return bool(env_str not in (u"false", u"no", u"n", u"0"))
+ env_str = get_str_from_env(env_var_names, "").lower()
+ return bool(env_str not in ("false", "no", "n", "0"))
class Constants:
"""Constants used in CSIT."""
# Version for CSIT data model. See docs/model/.
- MODEL_VERSION = u"1.0.1"
+ MODEL_VERSION = "1.5.1"
# Global off-switch in case JSON export is large or slow.
- EXPORT_JSON = get_optimistic_bool_from_env(u"EXPORT_JSON")
+ EXPORT_JSON = get_optimistic_bool_from_env("EXPORT_JSON")
# OpenVPP testing directory location at topology nodes
- REMOTE_FW_DIR = u"/tmp/openvpp-testing"
+ REMOTE_FW_DIR = "/tmp/openvpp-testing"
# shell scripts location
- RESOURCES_LIB_SH = u"resources/libraries/bash"
+ RESOURCES_LIB_SH = "resources/libraries/bash"
# python scripts location
- RESOURCES_LIB_PY = u"resources/libraries/python"
+ RESOURCES_LIB_PY = "resources/libraries/python"
# shell scripts location
- RESOURCES_TOOLS = u"resources/tools"
+ RESOURCES_TOOLS = "resources/tools"
# Python API provider location
- RESOURCES_PAPI_PROVIDER = u"resources/tools/papi/vpp_papi_provider.py"
+ RESOURCES_PAPI_PROVIDER = "resources/tools/papi/vpp_papi_provider.py"
# Templates location
- RESOURCES_TPL = u"resources/templates"
-
- # vat templates location
- RESOURCES_TPL_VAT = u"resources/templates/vat"
+ RESOURCES_TPL = "resources/templates"
# Kubernetes templates location
- RESOURCES_TPL_K8S = u"resources/templates/kubernetes"
+ RESOURCES_TPL_K8S = "resources/templates/kubernetes"
# Container templates location
- RESOURCES_TPL_CONTAINER = u"resources/templates/container"
+ RESOURCES_TPL_CONTAINER = "resources/templates/container"
# VPP Communications Library templates location
- RESOURCES_TPL_VCL = u"resources/templates/vcl"
+ RESOURCES_TPL_VCL = "resources/templates/vcl"
# VPP Communications Library templates location
- RESOURCES_TPL_TELEMETRY = u"resources/templates/telemetry"
+ RESOURCES_TPL_TELEMETRY = "resources/templates/telemetry"
# VPP Communications Library LD_PRELOAD library
- VCL_LDPRELOAD_LIBRARY = u"/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
-
- # OpenVPP VAT binary name
- VAT_BIN_NAME = u"vpp_api_test"
+ VCL_LDPRELOAD_LIBRARY = "/usr/lib/x86_64-linux-gnu/libvcl_ldpreload.so"
# VPP service unit name
- VPP_UNIT = u"vpp"
+ VPP_UNIT = "vpp"
# Number of system CPU cores.
CPU_CNT_SYSTEM = 1
@@ -174,105 +168,121 @@ class Constants:
CPU_CNT_MAIN = 1
# QEMU binary path
- QEMU_BIN_PATH = u"/usr/bin"
+ QEMU_BIN_PATH = "/usr/bin"
# QEMU VM kernel image path
- QEMU_VM_KERNEL = u"/opt/boot/vmlinuz"
+ QEMU_VM_KERNEL = "/opt/boot/vmlinuz"
# QEMU VM kernel initrd path
- QEMU_VM_KERNEL_INITRD = u"/opt/boot/initrd.img"
+ QEMU_VM_KERNEL_INITRD = "/opt/boot/initrd.img"
# QEMU VM nested image path
- QEMU_VM_IMAGE = u"/var/lib/vm/image.iso"
+ QEMU_VM_IMAGE = "/var/lib/vm/image.iso"
# QEMU VM DPDK path
- QEMU_VM_DPDK = u"/opt/dpdk-22.03"
+ QEMU_VM_DPDK = "/opt/dpdk-23.11"
# Docker container SUT image
- DOCKER_SUT_IMAGE_UBUNTU = u"csit_sut-ubuntu2004:local"
+ DOCKER_SUT_IMAGE_UBUNTU = "csit_sut-ubuntu2204:local"
# Docker container arm SUT image
- DOCKER_SUT_IMAGE_UBUNTU_ARM = u"csit_sut-ubuntu2004:local"
+ DOCKER_SUT_IMAGE_UBUNTU_ARM = "csit_sut-ubuntu2204:local"
- # TRex install directory
- TREX_INSTALL_DIR = u"/opt/trex-core-2.97"
+ # TRex install directory.
+ TREX_INSTALL_DIR = "/opt/trex-core-3.03"
- # TRex pcap files directory
+ # TRex pcap files directory.
TREX_PCAP_DIR = f"{TREX_INSTALL_DIR}/scripts/avl"
# TRex limit memory.
- TREX_LIMIT_MEMORY = get_int_from_env(u"TREX_LIMIT_MEMORY", 8192)
+ TREX_LIMIT_MEMORY = get_int_from_env("TREX_LIMIT_MEMORY", 8192)
+
+ # TRex limit memory in case multiple dual interfaces configurations.
+ TREX_LIMIT_MEMORY_MULTI = get_int_from_env("TREX_LIMIT_MEMORY_MULTI", 16384)
+
+ # TRex number of cores.
+ TREX_CORE_COUNT = get_int_from_env("TREX_CORE_COUNT", 16)
- # TRex number of cores
- TREX_CORE_COUNT = get_int_from_env(u"TREX_CORE_COUNT", 16)
+ # TRex number of cores in case multiple dual interface configurations.
+ TREX_CORE_COUNT_MULTI = get_int_from_env("TREX_CORE_COUNT_MULTI", 8)
- # TRex set number of RX/TX descriptors
- # Set to 0 to use default values
+ # TRex set number of RX/TX descriptors.
+ # Set to 0 to use default values.
TREX_TX_DESCRIPTORS_COUNT = get_int_from_env(
- u"TREX_TX_DESCRIPTORS_COUNT", 0
+ "TREX_TX_DESCRIPTORS_COUNT", 0
)
+
TREX_RX_DESCRIPTORS_COUNT = get_int_from_env(
- u"TREX_RX_DESCRIPTORS_COUNT", 0
+ "TREX_RX_DESCRIPTORS_COUNT", 0
)
- # Trex force start regardless ports state
- TREX_SEND_FORCE = get_pessimistic_bool_from_env(u"TREX_SEND_FORCE")
+ # Trex force start regardless ports state.
+ TREX_SEND_FORCE = get_pessimistic_bool_from_env("TREX_SEND_FORCE")
- # TRex extra commandline arguments
+ # TRex extra commandline arguments.
TREX_EXTRA_CMDLINE = get_str_from_env(
- u"TREX_EXTRA_CMDLINE", u"--mbuf-factor 32")
+ "TREX_EXTRA_CMDLINE", "--mbuf-factor 32"
+ )
+
+ # TRex port driver default vfio-pci or set to igb_uio.
+ TREX_PORT_DRIVER = get_str_from_env(
+ "TREX_PORT_DRIVER", "vfio-pci"
+ )
# Graph node variant value
- GRAPH_NODE_VARIANT = get_str_from_env(u"GRAPH_NODE_VARIANT", u"")
+ GRAPH_NODE_VARIANT = get_str_from_env("GRAPH_NODE_VARIANT", "")
# Default memory page size in case multiple configured in system
- DEFAULT_HUGEPAGE_SIZE = get_str_from_env(u"DEFAULT_HUGEPAGE_SIZE", u"2M")
+ DEFAULT_HUGEPAGE_SIZE = get_str_from_env("DEFAULT_HUGEPAGE_SIZE", "2M")
# Sysctl kernel.core_pattern
- KERNEL_CORE_PATTERN = u"/tmp/%p-%u-%g-%s-%t-%h-%e.core"
+ KERNEL_CORE_PATTERN = "/tmp/%p-%u-%g-%s-%t-%h-%e.core"
# Core dump directory
- CORE_DUMP_DIR = u"/tmp"
+ CORE_DUMP_DIR = "/tmp"
# Perf stat events (comma separated).
PERF_STAT_EVENTS = get_str_from_env(
- u"PERF_STAT_EVENTS",
- u"cpu-clock,context-switches,cpu-migrations,page-faults,"
- u"cycles,instructions,branches,branch-misses,L1-icache-load-misses")
+ "PERF_STAT_EVENTS",
+ "cpu-clock,context-switches,cpu-migrations,page-faults,"
+ "cycles,instructions,branches,branch-misses,L1-icache-load-misses")
# Equivalent to ~0 used in vpp code
BITWISE_NON_ZERO = 0xffffffff
# Default path to VPP API socket.
- SOCKSVR_PATH = u"/run/vpp/api.sock"
+ SOCKSVR_PATH = "/run/vpp/api.sock"
# Default path to VPP CLI socket.
- SOCKCLI_PATH = u"/run/vpp/cli.sock"
+ SOCKCLI_PATH = "/run/vpp/cli.sock"
# Default path to VPP API Stats socket.
- SOCKSTAT_PATH = u"/run/vpp/stats.sock"
+ SOCKSTAT_PATH = "/run/vpp/stats.sock"
+
+ # This MTU value is used to force VPP to fragment 1518B packet into two.
+ MTU_FOR_FRAGMENTATION = 1043
# Number of trials to execute in MRR test.
- PERF_TRIAL_MULTIPLICITY = get_int_from_env(u"PERF_TRIAL_MULTIPLICITY", 10)
+ PERF_TRIAL_MULTIPLICITY = get_int_from_env("PERF_TRIAL_MULTIPLICITY", 10)
# Duration [s] of one trial in MRR test.
- PERF_TRIAL_DURATION = get_float_from_env(u"PERF_TRIAL_DURATION", 1.0)
+ PERF_TRIAL_DURATION = get_float_from_env("PERF_TRIAL_DURATION", 1.0)
# Whether to use latency streams in main search trials.
- PERF_USE_LATENCY = get_pessimistic_bool_from_env(u"PERF_USE_LATENCY")
+ PERF_USE_LATENCY = get_pessimistic_bool_from_env("PERF_USE_LATENCY")
# Duration of one latency-specific trial in NDRPDR test.
PERF_TRIAL_LATENCY_DURATION = get_float_from_env(
- u"PERF_TRIAL_LATENCY_DURATION", 5.0)
+ "PERF_TRIAL_LATENCY_DURATION", 5.0)
# For some testbeds TG takes longer than usual to start sending traffic.
# This constant [s] allows longer wait, without affecting
# the approximate duration. For example, use 0.098 for AWS.
- PERF_TRIAL_STL_DELAY = get_float_from_env(u"PERF_TRIAL_STL_DELAY", 0.0)
+ PERF_TRIAL_STL_DELAY = get_float_from_env("PERF_TRIAL_STL_DELAY", 0.0)
# ASTF usually needs a different value for the delay.
PERF_TRIAL_ASTF_DELAY = get_float_from_env(
- u"PERF_TRIAL_ASTF_DELAY", 0.112
+ "PERF_TRIAL_ASTF_DELAY", 0.112
)
# Number of data frames in TPUT transaction, used both by TCP and UDP.
@@ -281,151 +291,248 @@ class Constants:
# it means we can send only 5 full data frames in a burst.
# https://github.com/cisco-system-traffic-generator/
# trex-core/blob/v2.88/src/44bsd/tcp_var.h#L896-L903
- ASTF_N_DATA_FRAMES = get_int_from_env(u"ASTF_N_DATA_FRAMES", 5)
+ ASTF_N_DATA_FRAMES = get_int_from_env("ASTF_N_DATA_FRAMES", 5)
# Extended debug (incl. vpp packet trace, linux perf stat, ...).
# Full list is available as suite variable (__init__.robot) or is
# override by test.
- EXTENDED_DEBUG = get_pessimistic_bool_from_env(u"EXTENDED_DEBUG")
+ EXTENDED_DEBUG = get_pessimistic_bool_from_env("EXTENDED_DEBUG")
# UUID string of DUT1 /tmp volume created outside of the
# DUT1 docker in case of vpp-device test. ${EMPTY} value means that
# /tmp directory is inside the DUT1 docker.
- DUT1_UUID = get_str_from_env(u"DUT1_UUID", u"")
+ DUT1_UUID = get_str_from_env("DUT1_UUID", "")
# Global "kill switch" for CRC checking during runtime.
FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env(
- u"FAIL_ON_CRC_MISMATCH"
+ "FAIL_ON_CRC_MISMATCH"
)
# Default IP4 prefix length (if not defined in topology file)
- DEFAULT_IP4_PREFIX_LENGTH = u"24"
+ DEFAULT_IP4_PREFIX_LENGTH = "24"
# Maximum number of interfaces in a data path
DATAPATH_INTERFACES_MAX = 100
# Mapping from NIC name to its bps limit.
NIC_NAME_TO_BPS_LIMIT = {
- u"Intel-X520-DA2": 10000000000,
- u"Intel-X553": 10000000000,
- u"Intel-X710": 10000000000,
- u"Intel-XL710": 24500000000,
- u"Intel-XXV710": 24500000000,
- u"Intel-E810XXV": 24500000000,
- u"Intel-E810CQ": 100000000000,
- u"Mellanox-CX556A": 100000000000,
- u"Amazon-Nitro-50G": 10000000000,
- u"virtual": 100000000,
+ "Intel-X520-DA2": 10000000000,
+ "Intel-X710": 10000000000,
+ "Intel-XL710": 24500000000,
+ "Intel-XXV710": 24500000000,
+ "Intel-E810XXV": 24500000000,
+ "Intel-E822CQ": 24500000000,
+ "Intel-E823C": 24500000000,
+ "Intel-E810CQ": 100000000000,
+ "Mellanox-CX556A": 100000000000,
+ "Mellanox-CX6DX": 100000000000,
+ "Mellanox-CX7VEAT": 200000000000,
+ "Amazon-Nitro-50G": 10000000000,
+ "Amazon-Nitro-100G": 10000000000,
+ "Amazon-Nitro-200G": 16000000000,
+ "virtual": 100000000,
}
# Mapping from NIC name to its pps limit.
NIC_NAME_TO_PPS_LIMIT = {
- u"Intel-X520-DA2": 14880952,
- u"Intel-X553": 14880952,
- u"Intel-X710": 14880952,
- u"Intel-XL710": 18750000,
- u"Intel-XXV710": 18750000,
- u"Intel-E810XXV": 29000000,
- u"Intel-E810CQ": 58500000,
- u"Mellanox-CX556A": 148809523,
- u"Amazon-Nitro-50G": 1200000,
- u"virtual": 14880952,
+ "Intel-X520-DA2": 14880952,
+ "Intel-X710": 14880952,
+ "Intel-XL710": 18750000,
+ "Intel-XXV710": 18750000,
+ "Intel-E810XXV": 29000000,
+ "Intel-E822CQ": 29000000,
+ "Intel-E823C": 29000000,
+ "Intel-E810CQ": 58500000,
+ "Mellanox-CX556A": 148809523,
+ "Mellanox-CX6DX": 148809523,
+ "Mellanox-CX7VEAT": 297619046,
+ "Amazon-Nitro-50G": 1500000,
+ "Amazon-Nitro-100G": 3000000,
+ "Amazon-Nitro-200G": 6000000,
+ "virtual": 14880952,
}
# Suite file names use codes for NICs.
NIC_NAME_TO_CODE = {
- u"Intel-X520-DA2": u"10ge2p1x520",
- u"Intel-X553": u"10ge2p1x553",
- u"Intel-X710": u"10ge2p1x710",
- u"Intel-XL710": u"40ge2p1xl710",
- u"Intel-XXV710": u"25ge2p1xxv710",
- u"Intel-E810XXV": u"25ge2p1e810xxv",
- u"Intel-E810CQ": u"100ge2p1e810cq",
- u"Amazon-Nitro-50G": u"50ge1p1ena",
- u"Mellanox-CX556A": u"100ge2p1cx556a",
+ "Intel-X520-DA2": "10ge2p1x520",
+ "Intel-X710": "10ge2p1x710",
+ "Intel-XL710": "40ge2p1xl710",
+ "Intel-XXV710": "25ge2p1xxv710",
+ "Intel-E810XXV": "25ge2p1e810xxv",
+ "Intel-E822CQ": "25ge2p1e822cq",
+ "Intel-E823C": "25ge2p1e823c",
+ "Intel-E810CQ": "100ge2p1e810cq",
+ "Amazon-Nitro-50G": "50ge1p1ena",
+ "Amazon-Nitro-100G": "100ge1p1ena",
+ "Amazon-Nitro-200G": "200ge1p1ena",
+ "Mellanox-CX556A": "100ge2p1cx556a",
+ "Mellanox-CX6DX": "100ge2p1cx6dx",
+ "Mellanox-CX7VEAT": "200ge2p1cx7veat",
+ "Mellanox-CX7VEAT": "200ge6p3cx7veat",
+ "virtual": "1ge1p82540em",
+ }
+ NIC_CODE_TO_NAME = {
+ "10ge2p1x520": "Intel-X520-DA2",
+ "10ge2p1x710": "Intel-X710",
+ "40ge2p1xl710": "Intel-XL710",
+ "25ge2p1xxv710": "Intel-XXV710",
+ "25ge2p1e810xxv": "Intel-E810XXV",
+ "25ge2p1e822cq": "Intel-E822CQ",
+ "25ge2p1e823c": "Intel-E823C",
+ "100ge2p1e810cq": "Intel-E810CQ",
+ "50ge1p1ena": "Amazon-Nitro-50G",
+ "100ge1p1ena": "Amazon-Nitro-100G",
+ "200ge1p1ena": "Amazon-Nitro-200G",
+ "100ge2p1cx556a": "Mellanox-CX556A",
+ "100ge2p1cx6dx": "Mellanox-CX6DX",
+ "200ge2p1cx7veat": "Mellanox-CX7VEAT",
+ "200ge6p3cx7veat": "Mellanox-CX7VEAT",
+ "1ge1p82540em": "virtual",
}
# Shortened lowercase NIC model name, useful for presentation.
NIC_CODE_TO_SHORT_NAME = {
- u"10ge2p1x520": u"x520",
- u"10ge2p1x553": u"x553",
- u"10ge2p1x710": u"x710",
- u"40ge2p1xl710": u"xl710",
- u"25ge2p1xxv710": u"xxv710",
- u"25ge2p1e810xxv": u"e810xxv",
- u"100ge2p1e810cq": u"e810cq",
- u"50ge1p1ena": u"ena",
- u"100ge2p1cx556a": u"cx556a",
+ "10ge2p1x520": "x520",
+ "10ge2p1x710": "x710",
+ "40ge2p1xl710": "xl710",
+ "25ge2p1xxv710": "xxv710",
+ "25ge2p1e810xxv": "e810xxv",
+ "25ge2p1e822cq": "e822cq",
+ "25ge2p1e823c": "e823c",
+ "100ge2p1e810cq": "e810cq",
+ "50ge1p1ena": "ena",
+ "100ge1p1ena": "ena100",
+ "200ge1p1ena": "ena200",
+ "100ge2p1cx556a": "cx556a",
+ "100ge2p1cx6dx": "cx6dx",
+ "200ge2p1cx7veat": "cx7veat",
+ "200ge6p3cx7veat": "cx7veat",
+ "1ge1p82540em": "82540em",
}
# Not each driver is supported by each NIC.
NIC_NAME_TO_DRIVER = {
- u"Intel-X520-DA2": [u"vfio-pci", u"af_xdp"],
- u"Intel-X553": [u"vfio-pci", u"af_xdp"],
- u"Intel-X710": [u"vfio-pci", u"avf", u"af_xdp"],
- u"Intel-XL710": [u"vfio-pci", u"avf", u"af_xdp"],
- u"Intel-XXV710": [u"vfio-pci", u"avf", u"af_xdp"],
- u"Intel-E810XXV": [u"vfio-pci", u"avf", u"af_xdp"],
- u"Intel-E810CQ": [u"vfio-pci", u"avf", u"af_xdp"],
- u"Amazon-Nitro-50G": [u"vfio-pci"],
- u"Mellanox-CX556A": [u"rdma-core", u"af_xdp"],
+ "Intel-X520-DA2": ["vfio-pci", "af_xdp"],
+ "Intel-X710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-XL710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-XXV710": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E810XXV": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E822CQ": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E823C": ["vfio-pci", "avf", "af_xdp"],
+ "Intel-E810CQ": ["vfio-pci", "avf", "af_xdp"],
+ "Amazon-Nitro-50G": ["vfio-pci"],
+ "Amazon-Nitro-100G": ["vfio-pci"],
+ "Amazon-Nitro-200G": ["vfio-pci"],
+ "Mellanox-CX556A": ["rdma-core", "mlx5_core", "af_xdp"],
+ "Mellanox-CX6DX": ["rdma-core", "mlx5_core", "af_xdp"],
+ "Mellanox-CX7VEAT": ["rdma-core", "mlx5_core", "af_xdp"],
+ "virtual": ["vfio-pci"],
}
- # Each driver needs different prugin to work.
+ # Each driver needs different plugin to work.
NIC_DRIVER_TO_PLUGINS = {
- u"vfio-pci": u"dpdk_plugin.so",
- u"avf": u"avf_plugin.so",
- u"rdma-core": u"rdma_plugin.so",
- u"af_xdp": u"af_xdp_plugin.so",
+ "vfio-pci": "dpdk_plugin.so",
+ "avf": "avf_plugin.so",
+ "rdma-core": "rdma_plugin.so",
+ "mlx5_core": "dpdk_plugin.so",
+ "af_xdp": "af_xdp_plugin.so",
}
# Tags to differentiate tests for different NIC driver.
NIC_DRIVER_TO_TAG = {
- u"vfio-pci": u"DRV_VFIO_PCI",
- u"avf": u"DRV_AVF",
- u"rdma-core": u"DRV_RDMA_CORE",
- u"af_xdp": u"DRV_AF_XDP",
+ "vfio-pci": "DRV_VFIO_PCI",
+ "avf": "DRV_AVF",
+ "rdma-core": "DRV_RDMA_CORE",
+ "mlx5_core": "DRV_MLX5_CORE",
+ "af_xdp": "DRV_AF_XDP",
}
# Suite names have to be different, add prefix.
NIC_DRIVER_TO_SUITE_PREFIX = {
- u"vfio-pci": u"",
- u"avf": u"avf-",
- u"rdma-core": u"rdma-",
- u"af_xdp": u"af-xdp-",
+ "vfio-pci": "",
+ "avf": "avf-",
+ "rdma-core": "rdma-",
+ "mlx5_core": "mlx5-",
+ "af_xdp": "af-xdp-",
}
# Number of virtual functions of physical nic.
NIC_DRIVER_TO_VFS = {
- u"vfio-pci": u"nic_vfs}= | 0",
- u"avf": u"nic_vfs}= | 1",
- u"rdma-core": u"nic_vfs}= | 0",
- u"af_xdp": u"nic_vfs}= | 0",
+ "vfio-pci": "nic_vfs}= | 0",
+ "avf": "nic_vfs}= | 1",
+ "rdma-core": "nic_vfs}= | 0",
+ "mlx5_core": "nic_vfs}= | 0",
+ "af_xdp": "nic_vfs}= | 0",
+ }
+
+ # Number of physical interfaces of physical nic.
+ NIC_CODE_TO_PFS = {
+ "10ge2p1x520": "nic_pfs}= | 2",
+ "10ge2p1x710": "nic_pfs}= | 2",
+ "40ge2p1xl710": "nic_pfs}= | 2",
+ "25ge2p1xxv710": "nic_pfs}= | 2",
+ "25ge2p1e810xxv": "nic_pfs}= | 2",
+ "25ge2p1e822cq": "nic_pfs}= | 2",
+ "25ge2p1e823c": "nic_pfs}= | 2",
+ "100ge2p1e810cq": "nic_pfs}= | 2",
+ "50ge1p1ena": "nic_pfs}= | 2",
+ "100ge1p1ena": "nic_pfs}= | 2",
+ "200ge1p1ena": "nic_pfs}= | 2",
+ "100ge2p1cx556a": "nic_pfs}= | 2",
+ "100ge2p1cx6dx": "nic_pfs}= | 2",
+ "200ge2p1cx7veat": "nic_pfs}= | 2",
+ "200ge6p3cx7veat": "nic_pfs}= | 6",
+ "1ge1p82540em": "nic_pfs}= | 2",
+ }
+
+ NIC_CODE_TO_CORESCALE = {
+ "10ge2p1x520": 1,
+ "10ge2p1x710": 1,
+ "40ge2p1xl710": 1,
+ "25ge2p1xxv710": 1,
+ "25ge2p1e810xxv": 1,
+ "25ge2p1e822cq": 1,
+ "25ge2p1e823c": 1,
+ "100ge2p1e810cq": 1,
+ "50ge1p1ena": 1,
+ "100ge1p1ena": 1,
+ "200ge1p1ena": 1,
+ "100ge2p1cx556a": 1,
+ "100ge2p1cx6dx": 1,
+ "200ge2p1cx7veat": 1,
+ "200ge6p3cx7veat": 3,
+ "1ge1p82540em": 1,
}
# Not each driver is supported by each NIC.
DPDK_NIC_NAME_TO_DRIVER = {
- u"Intel-X520-DA2": [u"vfio-pci"],
- u"Intel-X553": [u"vfio-pci"],
- u"Intel-X710": [u"vfio-pci"],
- u"Intel-XL710": [u"vfio-pci"],
- u"Intel-XXV710": [u"vfio-pci"],
- u"Intel-E810XXV": [u"vfio-pci"],
- u"Intel-E810CQ": [u"vfio-pci"],
- u"Amazon-Nitro-50G": [u"vfio-pci"],
- u"Mellanox-CX556A": [u"mlx5_core"],
+ "Intel-X520-DA2": ["vfio-pci"],
+ "Intel-X710": ["vfio-pci"],
+ "Intel-XL710": ["vfio-pci"],
+ "Intel-XXV710": ["vfio-pci"],
+ "Intel-E810XXV": ["vfio-pci"],
+ "Intel-E822CQ": ["vfio-pci"],
+ "Intel-E823C": ["vfio-pci"],
+ "Intel-E810CQ": ["vfio-pci"],
+ "Amazon-Nitro-50G": ["vfio-pci"],
+ "Amazon-Nitro-100G": ["vfio-pci"],
+ "Amazon-Nitro-200G": ["vfio-pci"],
+ "Mellanox-CX556A": ["mlx5_core"],
+ "Mellanox-CX6DX": ["mlx5_core"],
+ "Mellanox-CX7VEAT": ["mlx5_core"],
+ "virtual": ["vfio-pci"],
}
# Tags to differentiate tests for different NIC driver.
DPDK_NIC_DRIVER_TO_TAG = {
- u"vfio-pci": u"DRV_VFIO_PCI",
- u"mlx5_core": u"DRV_MLX5_CORE",
+ "vfio-pci": "DRV_VFIO_PCI",
+ "mlx5_core": "DRV_MLX5_CORE",
}
# Suite names have to be different, add prefix.
DPDK_NIC_DRIVER_TO_SUITE_PREFIX = {
- u"vfio-pci": u"",
- u"mlx5_core": u"mlx5-",
+ "vfio-pci": "",
+ "mlx5_core": "mlx5-",
}
# Some identifiers constructed from suite names
@@ -441,31 +548,33 @@ class Constants:
# TODO CSIT-1481: Crypto HW should be read from topology file instead.
NIC_NAME_TO_CRYPTO_HW = {
- u"Intel-X553": u"HW_C3xxx",
- u"Intel-X710": u"HW_DH895xcc",
- u"Intel-XL710": u"HW_DH895xcc",
+ "Intel-E810CQ": "HW_4xxx",
+ "Intel-E822CQ": "HW_C4xxx",
+ "Intel-E823C": "HW_C4xxx",
+ "Intel-X710": "HW_DH895xcc",
+ "Intel-XL710": "HW_DH895xcc",
}
DEVICE_TYPE_TO_KEYWORD = {
- u"scapy": None
+ "scapy": None
}
PERF_TYPE_TO_KEYWORD = {
- u"mrr": u"Traffic should pass with maximum rate",
- u"ndrpdr": u"Find NDR and PDR intervals using optimized search",
- u"soak": u"Find critical load using PLRsearch",
+ "mrr": "Traffic should pass with maximum rate",
+ "ndrpdr": "Find NDR and PDR intervals using optimized search",
+ "soak": "Find critical load using PLRsearch",
}
PERF_TYPE_TO_SUITE_DOC_VER = {
- u"mrr": u'''fication:** In MaxReceivedRate tests TG sends traffic at \\
+ "mrr": u'''fication:** In MaxReceivedRate tests TG sends traffic at \\
| ... | line rate and reports total received packets over trial period. \\''',
- u"ndrpdr": u'''rification:** TG finds and reports throughput NDR (Non \\
+ "ndrpdr": u'''rification:** TG finds and reports throughput NDR (Non \\
| ... | Drop Rate) with zero packet loss tolerance and throughput PDR \\
| ... | (Partial Drop Rate) with non-zero packet loss tolerance (LT) \\
| ... | expressed in percentage of packets transmitted. NDR and PDR are \\
| ... | discovered for different Ethernet L2 frame sizes using MLRsearch \\
| ... | library.''',
- u"soak": u'''rification:** TG sends traffic at dynamically computed \\
+ "soak": u'''rification:** TG sends traffic at dynamically computed \\
| ... | rate as PLRsearch algorithm gathers data and improves its estimate \\
| ... | of a rate at which a prescribed small fraction of packets \\
| ... | would be lost. After set time, the serarch stops \\
@@ -473,8 +582,8 @@ class Constants:
}
PERF_TYPE_TO_TEMPLATE_DOC_VER = {
- u"mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames \\
+ "mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames \\
| | ... | using burst trials throughput test. \\''',
- u"ndrpdr": u"Measure NDR and PDR values using MLRsearch algorithm.",
- u"soak": u"Estimate critical rate using PLRsearch algorithm. \\",
+ "ndrpdr": "Measure NDR and PDR values using MLRsearch algorithm.",
+ "soak": "Estimate critical rate using PLRsearch algorithm. \\",
}
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index 7ec8258686..fc32248f6b 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -205,8 +205,8 @@ class ContainerManager:
dut_cnt = len(
Counter(
[
- self.containers[container].node[u"host"]
- for container in self.containers
+ f"{container.node['host']}{container.node['port']}"
+ for container in self.containers.values()
]
)
)
@@ -256,6 +256,11 @@ class ContainerManager:
self._configure_vpp_chain_ipsec(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, nf_instance=idx, **kwargs)
+ elif chain_topology == u"chain_dma":
+ self._configure_vpp_chain_dma(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
else:
raise RuntimeError(
f"Container topology {chain_topology} not implemented"
@@ -278,6 +283,25 @@ class ContainerManager:
f"{self.engine.container.name}-{kwargs[u'sid2']}"
)
+ def _configure_vpp_chain_dma(self, **kwargs):
+ """Configure VPP in chain topology with l2xc (dma).
+
+ :param kwargs: Named parameters.
+ :type kwargs: dict
+ """
+ dma_wqs = kwargs[f"dma_wqs"]
+ self.engine.create_vpp_startup_config_dma(dma_wqs)
+
+ self.engine.create_vpp_exec_config(
+ u"memif_create_chain_dma.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
+ socket1=f"{kwargs[u'guest_dir']}/memif-"
+ f"{self.engine.container.name}-{kwargs[u'sid1']}",
+ socket2=f"{kwargs[u'guest_dir']}/memif-"
+ f"{self.engine.container.name}-{kwargs[u'sid2']}"
+ )
+
def _configure_vpp_cross_horiz(self, **kwargs):
"""Configure VPP in cross horizontal topology (single memif).
@@ -581,8 +605,7 @@ class ContainerEngine:
def start_vpp(self, verify=True):
"""Start VPP inside a container."""
self.execute(
- u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
- u">/tmp/vppd.log 2>&1 < /dev/null &")
+ u"/usr/bin/vpp -c /etc/vpp/startup.conf")
topo_instance = BuiltIn().get_library_instance(
u"resources.libraries.python.topology.Topology"
@@ -636,7 +659,7 @@ class ContainerEngine:
# Execute puts the command into single quotes,
# so inner arguments are enclosed in qouble quotes here.
self.execute(
- u'vppctl show pci 2>&1 | '
+ u'/usr/bin/vppctl show pci 2>&1 | '
u'fgrep -v "Connection refused" | '
u'fgrep -v "No such file or directory"'
)
@@ -694,7 +717,6 @@ class ContainerEngine:
vpp_config = VppConfigGenerator()
vpp_config.set_node(self.container.node)
vpp_config.add_unix_cli_listen()
- vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec(u"/tmp/running.exec")
vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
if cpuset_cpus:
@@ -770,6 +792,22 @@ class ContainerEngine:
f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
)
+ def create_vpp_startup_config_dma(self, dma_devices):
+ """Create startup configuration of VPP DMA.
+
+ :param dma_devices: DMA devices list.
+ :type dma_devices: list
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_plugin(u"enable", u"dma_intel_plugin.so")
+ vpp_config.add_dma_dev(dma_devices)
+
+ # Apply configuration
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
+ )
+
def create_vpp_exec_config(self, template_file, **kwargs):
"""Create VPP exec configuration on container.
@@ -808,31 +846,19 @@ class ContainerEngine:
:raises RuntimeError: If applying cgroup settings via cgset failed.
"""
ret, _, _ = self.container.ssh.exec_command_sudo(
- u"cgset -r cpuset.cpu_exclusive=0 /"
- )
- if int(ret) != 0:
- raise RuntimeError(u"Failed to apply cgroup settings.")
-
- ret, _, _ = self.container.ssh.exec_command_sudo(
- u"cgset -r cpuset.mem_exclusive=0 /"
- )
- if int(ret) != 0:
- raise RuntimeError(u"Failed to apply cgroup settings.")
-
- ret, _, _ = self.container.ssh.exec_command_sudo(
f"cgcreate -g cpuset:/{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to copy cgroup settings from root.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- f"cgset -r cpuset.cpu_exclusive=0 /{name}"
+ f"cgset -r cpuset.cpus=0 /{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to apply cgroup settings.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- f"cgset -r cpuset.mem_exclusive=0 /{name}"
+ f"cgset -r cpuset.mems=0 /{name}"
)
if int(ret) != 0:
raise RuntimeError(u"Failed to apply cgroup settings.")
@@ -863,7 +889,7 @@ class LXC(ContainerEngine):
else u"amd64"
image = self.container.image if self.container.image \
- else f"-d ubuntu -r focal -a {target_arch}"
+ else f"-d ubuntu -r jammy -a {target_arch}"
cmd = f"lxc-create -t download --name {self.container.name} " \
f"-- {image} --no-validate"
@@ -1128,8 +1154,8 @@ class Docker(ContainerEngine):
if self.container.mnt else u""
cmd = f"docker run --privileged --detach --interactive --tty --rm " \
- f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
- f"{env} {volume} --name {self.container.name} " \
+ f"--cgroup-parent docker.slice {cpuset_cpus} {cpuset_mems} " \
+ f"{publish} {env} {volume} --name {self.container.name} " \
f"{self.container.image} {command}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
diff --git a/resources/libraries/python/CoreDumpUtil.py b/resources/libraries/python/CoreDumpUtil.py
index 97948ad28a..b70afa858e 100644
--- a/resources/libraries/python/CoreDumpUtil.py
+++ b/resources/libraries/python/CoreDumpUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -139,11 +139,11 @@ class CoreDumpUtil:
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
command = (
- f"for f in {Constants.CORE_DUMP_DIR}/*.core; do "
- f"sudo gdb /usr/bin/vpp ${{f}} "
- f"-ex 'source -v {Constants.REMOTE_FW_DIR}"
- f"/resources/tools/scripts/gdb-commands' -ex quit; "
- f"sudo rm -f ${{f}}; done"
+ f"for f in {Constants.CORE_DUMP_DIR}/*.core; do"
+ f" sleep 10; sudo gdb /usr/bin/vpp ${{f}}"
+ f" -ex 'source -v {Constants.REMOTE_FW_DIR}"
+ f"/resources/tools/scripts/gdb-commands' -ex quit;"
+ f" sudo rm -f ${{f}}; done"
)
try:
exec_cmd_no_error(node, command, timeout=3600)
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index 5805ba7787..c77d0f83b1 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,11 +13,13 @@
"""CPU utilities library."""
+from random import choice
+
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, NodeType
__all__ = [u"CpuUtils"]
@@ -388,25 +390,25 @@ class CpuUtils:
@staticmethod
def get_affinity_trex(
- node, if1_pci, if2_pci, tg_mtc=1, tg_dtc=1, tg_ltc=1):
+ node, if_key, tg_mtc=1, tg_dtc=1, tg_ltc=1, tg_dtc_offset=0):
"""Get affinity for T-Rex. Result will be used to pin T-Rex threads.
:param node: TG node.
- :param if1_pci: TG first interface.
- :param if2_pci: TG second interface.
+ :param if_key: TG first interface.
:param tg_mtc: TG main thread count.
:param tg_dtc: TG dataplane thread count.
:param tg_ltc: TG latency thread count.
+ :param tg_dtc_offset: TG dataplane thread offset.
:type node: dict
- :type if1_pci: str
- :type if2_pci: str
+ :type if_key: str
:type tg_mtc: int
:type tg_dtc: int
:type tg_ltc: int
+ :type tg_dtc_offset: int
:returns: List of CPUs allocated to T-Rex including numa node.
:rtype: int, int, int, list
"""
- interface_list = [if1_pci, if2_pci]
+ interface_list = [if_key]
cpu_node = Topology.get_interfaces_numa_node(node, *interface_list)
master_thread_id = CpuUtils.cpu_slice_of_list_per_node(
@@ -414,12 +416,11 @@ class CpuUtils:
smt_used=False)
threads = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_dtc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc + tg_ltc + tg_dtc_offset,
+ cpu_cnt=tg_dtc, smt_used=False)
latency_thread_id = CpuUtils.cpu_slice_of_list_per_node(
- node, cpu_node, skip_cnt=tg_mtc + tg_dtc, cpu_cnt=tg_ltc,
- smt_used=False)
+ node, cpu_node, skip_cnt=tg_mtc, cpu_cnt=tg_ltc, smt_used=False)
return master_thread_id[0], latency_thread_id[0], cpu_node, threads
@@ -501,17 +502,15 @@ class CpuUtils:
@staticmethod
def get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=None, rxd=None, txd=None):
- """Get affinity for vswitch.
+ nodes, phy_cores, rx_queues=None, rxd=None, txd=None):
+ """Get affinity for vswitch on all DUTs.
:param nodes: Topology nodes.
- :param node: Topology node string.
:param phy_cores: Number of physical cores to allocate.
:param rx_queues: Number of RX queues. (Optional, Default: None)
:param rxd: Number of RX descriptors. (Optional, Default: None)
:param txd: Number of TX descriptors. (Optional, Default: None)
:type nodes: dict
- :type node: str
:type phy_cores: int
:type rx_queues: int
:type rxd: int
@@ -519,76 +518,83 @@ class CpuUtils:
:returns: Compute resource information dictionary.
:rtype: dict
"""
- # Number of Data Plane physical cores.
- dp_cores_count = BuiltIn().get_variable_value(
- f"${{dp_cores_count}}", phy_cores
- )
- # Number of Feature Plane physical cores.
- fp_cores_count = BuiltIn().get_variable_value(
- f"${{fp_cores_count}}", phy_cores - dp_cores_count
- )
- # Ratio between RX queues and data plane threads.
- rxq_ratio = BuiltIn().get_variable_value(
- f"${{rxq_ratio}}", 1
- )
-
- dut_pf_keys = BuiltIn().get_variable_value(
- f"${{{node}_pf_keys}}"
- )
- # SMT override in case of non standard test cases.
- smt_used = BuiltIn().get_variable_value(
- f"${{smt_used}}", CpuUtils.is_smt_enabled(nodes[node][u"cpuinfo"])
- )
-
- cpu_node = Topology.get_interfaces_numa_node(nodes[node], *dut_pf_keys)
- skip_cnt = Constants.CPU_CNT_SYSTEM
- cpu_main = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=Constants.CPU_CNT_MAIN,
- smt_used=False
- )
- skip_cnt += Constants.CPU_CNT_MAIN
- cpu_dp = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=int(dp_cores_count),
- smt_used=smt_used
- ) if int(dp_cores_count) else u""
- skip_cnt = skip_cnt + int(dp_cores_count)
- cpu_fp = CpuUtils.cpu_list_per_node_str(
- nodes[node], cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=int(fp_cores_count),
- smt_used=smt_used
- ) if int(fp_cores_count) else u""
-
- fp_count_int = \
- int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
- else int(fp_cores_count)
- dp_count_int = \
- int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
- else int(dp_cores_count)
-
- rxq_count_int = rx_queues if rx_queues else int(dp_count_int/rxq_ratio)
- rxq_count_int = 1 if not rxq_count_int else rxq_count_int
-
compute_resource_info = dict()
- compute_resource_info[u"buffers_numa"] = 215040 if smt_used else 107520
- compute_resource_info[u"smt_used"] = smt_used
- compute_resource_info[u"cpu_main"] = cpu_main
- compute_resource_info[u"cpu_dp"] = cpu_dp
- compute_resource_info[u"cpu_fp"] = cpu_fp
- compute_resource_info[u"cpu_wt"] = \
- u",".join(filter(None, [cpu_dp, cpu_fp]))
- compute_resource_info[u"cpu_alloc_str"] = \
- u",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
- compute_resource_info[u"cpu_count_int"] = \
- int(dp_cores_count) + int(fp_cores_count)
- compute_resource_info[u"rxd_count_int"] = rxd
- compute_resource_info[u"txd_count_int"] = txd
- compute_resource_info[u"rxq_count_int"] = rxq_count_int
- compute_resource_info[u"fp_count_int"] = fp_count_int
- compute_resource_info[u"dp_count_int"] = dp_count_int
+ for node_name, node in nodes.items():
+ if node["type"] != NodeType.DUT:
+ continue
+ # Number of Data Plane physical cores.
+ dp_cores_count = BuiltIn().get_variable_value(
+ "${dp_cores_count}", phy_cores
+ )
+ # Number of Feature Plane physical cores.
+ fp_cores_count = BuiltIn().get_variable_value(
+ "${fp_cores_count}", phy_cores - dp_cores_count
+ )
+ # Ratio between RX queues and data plane threads.
+ rxq_ratio = BuiltIn().get_variable_value(
+ "${rxq_ratio}", 1
+ )
+
+ dut_pf_keys = BuiltIn().get_variable_value(
+ f"${{{node_name}_pf_keys}}"
+ )
+ # SMT override in case of non standard test cases.
+ smt_used = BuiltIn().get_variable_value(
+ "${smt_used}", CpuUtils.is_smt_enabled(node["cpuinfo"])
+ )
+
+ cpu_node = Topology.get_interfaces_numa_node(node, *dut_pf_keys)
+ skip_cnt = Constants.CPU_CNT_SYSTEM
+ cpu_main = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=Constants.CPU_CNT_MAIN if phy_cores else 0,
+ smt_used=False
+ )
+ cpu_main = cpu_main if phy_cores else choice(cpu_main.split(","))
+ skip_cnt += Constants.CPU_CNT_MAIN
+ cpu_dp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(dp_cores_count),
+ smt_used=smt_used
+ ) if int(dp_cores_count) else ""
+ skip_cnt = skip_cnt + int(dp_cores_count)
+ cpu_fp = CpuUtils.cpu_list_per_node_str(
+ node, cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=int(fp_cores_count),
+ smt_used=smt_used
+ ) if int(fp_cores_count) else ""
+
+ fp_count_int = \
+ int(fp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(fp_cores_count)
+ dp_count_int = \
+ int(dp_cores_count) * CpuUtils.NR_OF_THREADS if smt_used \
+ else int(dp_cores_count)
+
+ rxq_count_int = \
+ int(rx_queues) if rx_queues \
+ else int(dp_count_int/rxq_ratio)
+ rxq_count_int = 1 if not rxq_count_int else rxq_count_int
+
+ compute_resource_info["buffers_numa"] = \
+ 215040 if smt_used else 107520
+ compute_resource_info["smt_used"] = smt_used
+ compute_resource_info[f"{node_name}_cpu_main"] = cpu_main
+ compute_resource_info[f"{node_name}_cpu_dp"] = cpu_dp
+ compute_resource_info[f"{node_name}_cpu_fp"] = cpu_fp
+ compute_resource_info[f"{node_name}_cpu_wt"] = \
+ ",".join(filter(None, [cpu_dp, cpu_fp]))
+ compute_resource_info[f"{node_name}_cpu_alloc_str"] = \
+ ",".join(filter(None, [cpu_main, cpu_dp, cpu_fp]))
+ compute_resource_info["cpu_count_int"] = \
+ int(dp_cores_count) + int(fp_cores_count)
+ compute_resource_info["rxd_count_int"] = rxd
+ compute_resource_info["txd_count_int"] = txd
+ compute_resource_info["rxq_count_int"] = rxq_count_int
+ compute_resource_info["fp_count_int"] = fp_count_int
+ compute_resource_info["dp_count_int"] = dp_count_int
return compute_resource_info
diff --git a/resources/libraries/python/DMAUtil.py b/resources/libraries/python/DMAUtil.py
new file mode 100644
index 0000000000..f904ea4e3d
--- /dev/null
+++ b/resources/libraries/python/DMAUtil.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2024 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""DMA util library."""
+
+from re import search
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+
+
+class DMAUtil:
+ """Common DMA utilities"""
+
+ @staticmethod
+ def get_dma_resource(node, dma_device):
+ """Get DMA resource from DMA device.
+
+ :param node: Topology node.
+ :param dma_device: DMA device.
+ :type node: dict
+ :type dma_device: str
+ :returns: DMA resource.
+ :rtype: dict
+ """
+
+ cmd = f"grep -H . /sys/bus/pci/devices/{dma_device}/dsa*/*"
+ _, stdout, stderr = exec_cmd(node, cmd, sudo=True)
+
+ dma_info = dict()
+ dma_info["dma_device"] = dma_device
+ dma_info["engine"] = list()
+ dma_info["wq"] = list()
+ dma_info["group"] = list()
+
+ for line in stdout.split():
+ g1 = search(r"/(dsa\d+)/(.+):(.+)", line)
+ if g1 is not None:
+ dma_info["dma_name"] = g1.group(1)
+ dma_info[f"{g1.group(2)}"] = g1.group(3)
+
+ for line in stderr.split():
+ g2 = search(r"/(dsa\d+)/((engine|group|wq)\d+\.\d+)", line)
+ if g2 is not None:
+ dev_type = g2.group(3)
+ dev = g2.group(2)
+ dma_info[dev_type].append(dev)
+
+ return dma_info
+
+ @staticmethod
+ def disable_dma_device(node, dma_name):
+ """Disable DMA device.
+
+ :param node: Topology node.
+ :param dma_name: DMA name.
+ :type node: dict
+ :type dma_name: str
+ """
+ cmd = f"cat /sys/bus/dsa/devices/{dma_name}/state"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to get dma state.")
+ if stdout.strip() == "disabled":
+ return
+
+ cmd = f"accel-config disable-device -f {dma_name}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to disable DMA on DUT.")
+
+ @staticmethod
+ def enable_dma_device(node, dma_name, groups, engines, wqs, wq_size,
+ max_batch_size, max_transfer_size):
+ """Enable DMA device.
+
+ :param node: Topology node.
+ :param dma_name: DMA name.
+ :param groups: DMA groups.
+ :param engines: DMA engines.
+ :param wqs: DMA work queues.
+ :param wq_size: DMA work queue size.
+ :param max_batch_size: Wq max batch size.
+ :param max_transfer_size: Wq max transfer size.
+ :type node: dict
+ :type dma_name: str
+ :type groups: list
+ :type engines: list
+ :type wqs: list
+ :type wq_size: int
+ :type max_batch_size: int
+ :type max_transfer_size: int
+ """
+
+ # Configure Device
+ cmd = f"accel-config config-device {dma_name}"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA device on DUT.")
+
+ # Configure DMA group
+ for i, group in enumerate(groups):
+ cmd = f"accel-config config-group " \
+ f"{dma_name}/{group} --read-buffers-reserved=0"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA group on DUT.")
+
+ # Configure DMA engine
+ for i, engine in enumerate(engines):
+ cmd = f"accel-config config-engine " \
+ f"{dma_name}/{engine} --group-id={i}"
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA engine on DUT.")
+
+ # Configure DMA work queue
+ for i, wq in enumerate(wqs):
+ cmd = f"accel-config config-wq {dma_name}/{wq} " \
+ f" --group-id={i%len(engines)} --type=user " \
+ f" --priority=10 --block-on-fault=1 " \
+ f" --wq-size={wq_size} --mode=dedicated " \
+ f" --name={dma_name}_{i} " \
+ f" --max-batch-size={max_batch_size} " \
+ f" --max-transfer-size={max_transfer_size} "
+
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to configure DMA work queue on DUT.")
+
+ # Enable DMA and work queues
+ cmd = f"accel-config enable-device {dma_name}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to enable DMA device on DUT.")
+
+ dma_wqs = [f"{dma_name}/{wq}" for wq in wqs]
+ cmd = f"accel-config enable-wq {' '.join(dma_wqs)}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True,
+ message="Failed to enable DMA work queue on DUT.")
+
+ @staticmethod
+ def enable_dmas_and_wqs_on_dut(node, wq_num):
+ """Enable DMAs and work queues on DUT.
+
+ :param node: Topology node.
+ :param wq_num: Number of work queues.
+ :type node: dict
+ :type wq_num: int
+ :returns: DMA work queues enabled.
+ :rtype: list
+ """
+ if node["type"] == NodeType.DUT:
+ dma_devs = Topology.get_bus(node)
+
+ enabled_wqs = list()
+
+ for dev in dma_devs.values():
+ if "Intel-DSA" not in dev["model"]:
+ continue
+
+ dev_pci = dev["pci_address"]
+ dma_info = DMAUtil.get_dma_resource(node, dev_pci)
+
+ dma_name = dma_info["dma_name"]
+ groups = dma_info["group"]
+ engines = dma_info["engine"]
+ wqs = dma_info["wq"]
+ wq_num_per_dma = wq_num//len(dma_devs) if wq_num > 1 else 1
+ max_transfer_size = \
+ int(dma_info["max_transfer_size"])//wq_num_per_dma
+ wq_size = int(dma_info["max_work_queues_size"])//wq_num_per_dma
+ max_batch_size = int(dma_info["max_batch_size"])
+
+ DMAUtil.disable_dma_device(node, dma_name)
+
+ DMAUtil.enable_dma_device(node,
+ dma_name,
+ groups[:wq_num_per_dma],
+ engines[:wq_num_per_dma],
+ wqs[:wq_num_per_dma],
+ wq_size,
+ max_batch_size,
+ max_transfer_size
+ )
+ enabled_wqs += wqs[:wq_num_per_dma]
+
+ cmd = f"lspci -vvv -s {dev_pci}"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ cmd = "accel-config list"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ cmd = "cat /proc/cmdline"
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed")
+
+ return enabled_wqs
diff --git a/resources/libraries/python/DPDK/L3fwdTest.py b/resources/libraries/python/DPDK/L3fwdTest.py
index 2ceeab2a51..178c747da5 100644
--- a/resources/libraries/python/DPDK/L3fwdTest.py
+++ b/resources/libraries/python/DPDK/L3fwdTest.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -55,12 +55,12 @@ class L3fwdTest:
"""
cpu_count_int = dp_count_int = int(phy_cores)
dp_cores = cpu_count_int+1
- for node in nodes:
- if u"DUT" in node:
- compute_resource_info = CpuUtils.get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=rx_queues,
- rxd=rxd, txd=txd
- )
+ tg_flip = topology_info[f"tg_if1_pci"] > topology_info[f"tg_if2_pci"]
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
if dp_count_int > 1:
BuiltIn().set_tags('MTHREAD')
else:
@@ -69,14 +69,14 @@ class L3fwdTest:
f"{dp_count_int}T{cpu_count_int}C"
)
- cpu_dp = compute_resource_info[u"cpu_dp"]
- rxq_count_int = compute_resource_info[u"rxq_count_int"]
- if1 = topology_info[f"{node}_pf1"][0]
- if2 = topology_info[f"{node}_pf2"][0]
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
L3fwdTest.start_l3fwd(
- nodes, nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+ nodes, node, if1=if1, if2=if2, lcores_list=cpu_dp,
nb_cores=dp_count_int, queue_nums=rxq_count_int,
- jumbo_frames=jumbo_frames
+ jumbo_frames=jumbo_frames, tg_flip=tg_flip
)
for node in nodes:
if u"DUT" in node:
@@ -88,7 +88,8 @@ class L3fwdTest:
L3fwdTest.start_l3fwd(
nodes, nodes[node], if1=if1, if2=if2,
lcores_list=cpu_dp, nb_cores=dp_count_int,
- queue_nums=rxq_count_int, jumbo_frames=jumbo_frames
+ queue_nums=rxq_count_int, jumbo_frames=jumbo_frames,
+ tg_flip=tg_flip
)
else:
message = f"Failed to start l3fwd at node {node}"
@@ -97,10 +98,14 @@ class L3fwdTest:
@staticmethod
def start_l3fwd(
nodes, node, if1, if2, lcores_list, nb_cores, queue_nums,
- jumbo_frames):
+ jumbo_frames, tg_flip):
"""
Execute the l3fwd on the dut_node.
+ L3fwd uses default IP forwarding table, but sorts ports by API address.
+ When that does not match the traffic profile (depends on topology),
+ the only way to fix is is to latch and recompile l3fwd app.
+
:param nodes: All the nodes info in the topology file.
:param node: DUT node.
:param if1: The test link interface 1.
@@ -110,6 +115,7 @@ class L3fwdTest:
:param queue_nums: The queues number for the NIC
:param jumbo_frames: Indication if the jumbo frames are used (True) or
not (False).
+ :param tg_flip: Whether TG ports are reordered.
:type nodes: dict
:type node: dict
:type if1: str
@@ -118,10 +124,11 @@ class L3fwdTest:
:type nb_cores: str
:type queue_nums: str
:type jumbo_frames: bool
+ :type tg_flip: bool
"""
if node[u"type"] == NodeType.DUT:
adj_mac0, adj_mac1, if_pci0, if_pci1 = L3fwdTest.get_adj_mac(
- nodes, node, if1, if2
+ nodes, node, if1, if2, tg_flip
)
lcores = [int(item) for item in lcores_list.split(u",")]
@@ -184,18 +191,24 @@ class L3fwdTest:
exec_cmd_no_error(node, command, timeout=1800, message=message)
@staticmethod
- def get_adj_mac(nodes, node, if1, if2):
+ def get_adj_mac(nodes, node, if1, if2, tg_flip):
"""
Get adjacency MAC addresses of the DUT node.
+ Interfaces are re-ordered according to PCI address,
+ but the need to patch and recompile also depends on TG port order.
+ "tg_flip" signals whether TG ports are reordered.
+
:param nodes: All the nodes info in the topology file.
:param node: DUT node.
:param if1: The test link interface 1.
:param if2: The test link interface 2.
+ :param tg_flip: Whether tg ports are reordered.
:type nodes: dict
:type node: dict
:type if1: str
:type if2: str
+ :type tg_flip: bool
:returns: Returns MAC addresses of adjacency DUT nodes and PCI
addresses.
:rtype: str
@@ -205,9 +218,19 @@ class L3fwdTest:
if_pci0 = Topology.get_interface_pci_addr(node, if_key0)
if_pci1 = Topology.get_interface_pci_addr(node, if_key1)
+ # Flipping routes logic:
+ # If TG and DUT ports are reordered -> flip
+ # If TG reordered and DUT not reordered -> don't flip
+ # If DUT reordered and TG not reordered -> don't flip
+ # If DUT and TG not reordered -> flip
+
# Detect which is the port 0.
- if min(if_pci0, if_pci1) != if_pci0:
+ dut_flip = if_pci0 > if_pci1
+ if dut_flip:
if_key0, if_key1 = if_key1, if_key0
+ if tg_flip:
+ L3fwdTest.patch_l3fwd(node, u"patch_l3fwd_flip_routes")
+ elif not tg_flip:
L3fwdTest.patch_l3fwd(node, u"patch_l3fwd_flip_routes")
adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface(
diff --git a/resources/libraries/python/DPDK/TestpmdTest.py b/resources/libraries/python/DPDK/TestpmdTest.py
index 091110f129..3baba30715 100644
--- a/resources/libraries/python/DPDK/TestpmdTest.py
+++ b/resources/libraries/python/DPDK/TestpmdTest.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -27,7 +27,7 @@ class TestpmdTest:
"""
This class start testpmd on topology nodes and check if properly started.
"""
-
+
@staticmethod
def start_testpmd_on_all_duts(
nodes, topology_info, phy_cores, rx_queues=None, jumbo_frames=False,
@@ -60,12 +60,11 @@ class TestpmdTest:
cpu_count_int = dp_count_int = int(phy_cores)
dp_cores = cpu_count_int+1
- for node in nodes:
- if u"DUT" in node:
- compute_resource_info = CpuUtils.get_affinity_vswitch(
- nodes, node, phy_cores, rx_queues=rx_queues,
- rxd=rxd, txd=txd
- )
+ compute_resource_info = CpuUtils.get_affinity_vswitch(
+ nodes, phy_cores, rx_queues=rx_queues, rxd=rxd, txd=txd
+ )
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
if dp_count_int > 1:
BuiltIn().set_tags('MTHREAD')
else:
@@ -74,12 +73,12 @@ class TestpmdTest:
f"{dp_count_int}T{cpu_count_int}C"
)
- cpu_dp = compute_resource_info[u"cpu_dp"]
- rxq_count_int = compute_resource_info[u"rxq_count_int"]
- if1 = topology_info[f"{node}_pf1"][0]
- if2 = topology_info[f"{node}_pf2"][0]
+ cpu_dp = compute_resource_info[f"{node_name}_cpu_dp"]
+ rxq_count_int = compute_resource_info["rxq_count_int"]
+ if1 = topology_info[f"{node_name}_pf1"][0]
+ if2 = topology_info[f"{node_name}_pf2"][0]
TestpmdTest.start_testpmd(
- nodes[node], if1=if1, if2=if2, lcores_list=cpu_dp,
+ node, if1=if1, if2=if2, lcores_list=cpu_dp,
nb_cores=dp_count_int, queue_nums=rxq_count_int,
jumbo_frames=jumbo_frames, rxq_size=nic_rxq_size,
txq_size=nic_txq_size
@@ -88,6 +87,11 @@ class TestpmdTest:
if u"DUT" in node:
for i in range(3):
try:
+ nic_model = nodes[node]["interfaces"][if1]["model"]
+ if "Mellanox-CX7VEAT" in nic_model:
+ break
+ if "Mellanox-CX6DX" in nic_model:
+ break
TestpmdTest.check_testpmd(nodes[node])
break
except RuntimeError:
diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py
index c7a560262c..f9758c5f9f 100644
--- a/resources/libraries/python/DUTSetup.py
+++ b/resources/libraries/python/DUTSetup.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,8 +16,7 @@
from time import sleep
from robot.api import logger
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
@@ -33,11 +32,12 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- command = u"cat /tmp/*supervisor*.log"\
- if DUTSetup.running_in_container(node) \
- else f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl " \
+ if DUTSetup.running_in_container(node):
+ return
+ command = (
+ f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl "
f"show -p InvocationID --value {service})"
-
+ )
message = f"Node {node[u'host']} failed to get logs from unit {service}"
exec_cmd_no_error(
@@ -66,9 +66,10 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- command = f"supervisorctl restart {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} restart"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl restart {service}"
+ else:
+ command = f"systemctl restart {service}"
message = f"Node {node[u'host']} failed to restart service {service}"
exec_cmd_no_error(
@@ -99,10 +100,10 @@ class DUTSetup:
:type node: dict
:type service: str
"""
- # TODO: change command to start once all parent function updated.
- command = f"supervisorctl restart {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} restart"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl restart {service}"
+ else:
+ command = f"systemctl restart {service}"
message = f"Node {node[u'host']} failed to start service {service}"
exec_cmd_no_error(
@@ -135,9 +136,10 @@ class DUTSetup:
"""
DUTSetup.get_service_logs(node, service)
- command = f"supervisorctl stop {service}" \
- if DUTSetup.running_in_container(node) \
- else f"service {service} stop"
+ if DUTSetup.running_in_container(node):
+ command = f"supervisorctl stop {service}"
+ else:
+ command = f"systemctl stop {service}"
message = f"Node {node[u'host']} failed to stop service {service}"
exec_cmd_no_error(
@@ -207,42 +209,25 @@ class DUTSetup:
exec_cmd_no_error(node, cmd, message=f"{program} is not installed")
@staticmethod
- def get_pid(node, process):
+ def get_pid(node, process, retries=3):
"""Get PID of running process.
:param node: DUT node.
:param process: process name.
+ :param retries: How many times to retry on failure.
:type node: dict
:type process: str
+ :type retries: int
:returns: PID
:rtype: int
:raises RuntimeError: If it is not possible to get the PID.
"""
- ssh = SSH()
- ssh.connect(node)
-
- retval = None
- for i in range(3):
- logger.trace(f"Try {i}: Get {process} PID")
- ret_code, stdout, stderr = ssh.exec_command(f"pidof {process}")
-
- if int(ret_code):
- raise RuntimeError(
- f"Not possible to get PID of {process} process on node: "
- f"{node[u'host']}\n {stdout + stderr}"
- )
-
- pid_list = stdout.split()
- if len(pid_list) == 1:
- return [int(stdout)]
- if not pid_list:
- logger.debug(f"No {process} PID found on node {node[u'host']}")
- continue
- logger.debug(f"More than one {process} PID found " \
- f"on node {node[u'host']}")
- retval = [int(pid) for pid in pid_list]
-
- return retval
+ cmd = f"pidof {process}"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, retries=retries,
+ message=f"No {process} PID found on node {node[u'host']}")
+ pid_list = stdout.split()
+ return [int(pid) for pid in pid_list]
@staticmethod
def get_vpp_pids(nodes):
@@ -260,81 +245,6 @@ class DUTSetup:
return pids
@staticmethod
- def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
- """Verify if Crypto QAT device virtual functions are initialized on all
- DUTs. If parameter force initialization is set to True, then try to
- initialize or remove VFs on QAT.
-
- :param node: DUT node.
- :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
- :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
- :param force_init: If True then try to initialize to specific value.
- :type node: dict
- :type crypto_type: string
- :type numvfs: int
- :type force_init: bool
- :returns: nothing
- :raises RuntimeError: If QAT VFs are not created and force init is set
- to False.
- """
- pci_addr = Topology.get_cryptodev(node)
- sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
-
- if sriov_numvfs != numvfs:
- if force_init:
- # QAT is not initialized and we want to initialize with numvfs
- DUTSetup.crypto_device_init(node, crypto_type, numvfs)
- else:
- raise RuntimeError(
- f"QAT device failed to create VFs on {node[u'host']}"
- )
-
- @staticmethod
- def crypto_device_init(node, crypto_type, numvfs):
- """Init Crypto QAT device virtual functions on DUT.
-
- :param node: DUT node.
- :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
- :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
- :type node: dict
- :type crypto_type: string
- :type numvfs: int
- :returns: nothing
- :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
- """
- if crypto_type == u"HW_DH895xcc":
- kernel_mod = u"qat_dh895xcc"
- kernel_drv = u"dh895xcc"
- elif crypto_type == u"HW_C3xxx":
- kernel_mod = u"qat_c3xxx"
- kernel_drv = u"c3xxx"
- else:
- raise RuntimeError(
- f"Unsupported crypto device type on {node[u'host']}"
- )
-
- pci_addr = Topology.get_cryptodev(node)
-
- # QAT device must be re-bound to kernel driver before initialization.
- DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
-
- # Stop VPP to prevent deadlock.
- DUTSetup.stop_service(node, Constants.VPP_UNIT)
-
- current_driver = DUTSetup.get_pci_dev_driver(
- node, pci_addr.replace(u":", r"\:")
- )
- if current_driver is not None:
- DUTSetup.pci_driver_unbind(node, pci_addr)
-
- # Bind to kernel driver.
- DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
-
- # Initialize QAT VFs.
- if numvfs > 0:
- DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
-
- @staticmethod
def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
"""Get PCI address of Virtual Function.
@@ -388,19 +298,21 @@ class DUTSetup:
return sriov_numvfs
@staticmethod
- def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
+ def set_sriov_numvfs(node, pf_pci_addr, path="devices", numvfs=0):
"""Init or reset SR-IOV virtual functions by setting its number on PCI
device on DUT. Setting to zero removes all VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
+ :param path: Either device or driver.
:param numvfs: Number of VFs to initialize, 0 - removes the VFs.
:type node: dict
:type pf_pci_addr: str
+ :type path: str
:type numvfs: int
:raises RuntimeError: Failed to create VFs on PCI.
"""
- cmd = f"test -f /sys/bus/pci/devices/{pf_pci_addr}/sriov_numvfs"
+ cmd = f"test -f /sys/bus/pci/{path}/{pf_pci_addr}/sriov_numvfs"
sriov_unsupported, _, _ = exec_cmd(node, cmd)
# if sriov_numvfs doesn't exist, then sriov_unsupported != 0
if int(sriov_unsupported):
@@ -416,7 +328,7 @@ class DUTSetup:
pci = pf_pci_addr.replace(u":", r"\:")
command = f"sh -c \"echo {numvfs} | " \
- f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\""
+ f"tee /sys/bus/pci/{path}/{pci}/sriov_numvfs\""
message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
f"on {node[u'host']}"
@@ -456,8 +368,10 @@ class DUTSetup:
:type pci_addrs: list
"""
for pci_addr in pci_addrs:
- if not driver or \
- DUTSetup.get_pci_dev_driver(node, pci_addr) != driver:
+ cur_driver = DUTSetup.get_pci_dev_driver(node, pci_addr)
+ if not cur_driver:
+ return
+ if not driver or cur_driver != driver:
DUTSetup.pci_driver_unbind(node, pci_addr)
@staticmethod
@@ -655,60 +569,6 @@ class DUTSetup:
exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
@staticmethod
- def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
- """Install VPP on all DUT nodes. Start the VPP service in case of
- systemd is not available or does not support autostart.
-
- :param nodes: Nodes in the topology.
- :param vpp_pkg_dir: Path to directory where VPP packages are stored.
- :type nodes: dict
- :type vpp_pkg_dir: str
- :raises RuntimeError: If failed to remove or install VPP.
- """
- for node in nodes.values():
- message = f"Failed to install VPP on host {node[u'host']}!"
- if node[u"type"] == NodeType.DUT:
- command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
- exec_cmd_no_error(node, command, sudo=True)
-
- command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
- stdout, _ = exec_cmd_no_error(node, command)
-
- if stdout.strip() == u"Ubuntu":
- exec_cmd_no_error(
- node, u"apt-get purge -y '*vpp*' || true",
- timeout=120, sudo=True
- )
- # workaround to avoid installation of vpp-api-python
- exec_cmd_no_error(
- node, f"rm -f {vpp_pkg_dir}vpp-api-python.deb",
- timeout=120, sudo=True
- )
- exec_cmd_no_error(
- node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
- timeout=120, sudo=True, message=message
- )
- exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
- if DUTSetup.running_in_container(node):
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
- else:
- exec_cmd_no_error(
- node, u"yum -y remove '*vpp*' || true",
- timeout=120, sudo=True
- )
- # workaround to avoid installation of vpp-api-python
- exec_cmd_no_error(
- node, f"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
- timeout=120, sudo=True
- )
- exec_cmd_no_error(
- node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
- timeout=120, sudo=True, message=message
- )
- exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
-
- @staticmethod
def running_in_container(node):
"""This method tests if topology node is running inside container.
@@ -718,18 +578,15 @@ class DUTSetup:
to detect.
:rtype: bool
"""
- command = u"fgrep docker /proc/1/cgroup"
- message = u"Failed to get cgroup settings."
+ command = "cat /.dockerenv"
try:
- exec_cmd_no_error(
- node, command, timeout=30, sudo=False, message=message
- )
+ exec_cmd_no_error(node, command, timeout=30)
except RuntimeError:
return False
return True
@staticmethod
- def get_docker_mergeddir(node, uuid):
+ def get_docker_mergeddir(node, uuid=None):
"""Get Docker overlay for MergedDir diff.
:param node: DUT node.
@@ -740,8 +597,15 @@ class DUTSetup:
:rtype: str
:raises RuntimeError: If getting output failed.
"""
- command = f"docker inspect " \
+ if not uuid:
+ command = 'fgrep "hostname" /proc/self/mountinfo | cut -f 4 -d" "'
+ message = "Failed to get UUID!"
+ stdout, _ = exec_cmd_no_error(node, command, message=message)
+ uuid = stdout.split(sep="/")[-2]
+ command = (
+ f"docker inspect "
f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
+ )
message = f"Failed to get directory of {uuid} on host {node[u'host']}"
stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
diff --git a/resources/libraries/python/FlowUtil.py b/resources/libraries/python/FlowUtil.py
index 23293b6dc6..054356b9a2 100644
--- a/resources/libraries/python/FlowUtil.py
+++ b/resources/libraries/python/FlowUtil.py
@@ -1,4 +1,4 @@
-# copyright (c) 2022 Intel and/or its affiliates.
+# copyright (c) 2023 Intel and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -400,7 +400,7 @@ class FlowUtil:
:rtype: int
:raises ValueError: If action type is not supported.
"""
- cmd = u"flow_add"
+ cmd = u"flow_add_v2"
if action == u"redirect-to-queue":
flow_rule = {
@@ -454,7 +454,7 @@ class FlowUtil:
hw_if_index=int(sw_if_index)
)
- err_msg = u"Failed to enable flow on host {node[u'host']}"
+ err_msg = f"Failed to enable flow on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
diff --git a/resources/libraries/python/HoststackUtil.py b/resources/libraries/python/HoststackUtil.py
index 7e6ba56913..399395d41a 100644
--- a/resources/libraries/python/HoststackUtil.py
+++ b/resources/libraries/python/HoststackUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,9 +17,12 @@ from time import sleep
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
-from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.model.ExportResult import (
+ export_hoststack_results
+)
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
class HoststackUtil():
"""Utilities for Host Stack tests."""
@@ -41,7 +44,7 @@ class HoststackUtil():
vpp_echo_cmd = {}
vpp_echo_cmd[u"name"] = u"vpp_echo"
vpp_echo_cmd[u"args"] = f"{vpp_echo_attributes[u'role']} " \
- f"socket-name {vpp_echo_attributes[u'vpp_api_socket']} " \
+ f"socket-name {vpp_echo_attributes[u'app_api_socket']} " \
f"{vpp_echo_attributes[u'json_output']} " \
f"uri {proto}://{addr}/{port} " \
f"nthreads {vpp_echo_attributes[u'nthreads']} " \
@@ -56,6 +59,8 @@ class HoststackUtil():
vpp_echo_cmd[u"args"] += u" rx-results-diff"
if vpp_echo_attributes[u"tx_results_diff"]:
vpp_echo_cmd[u"args"] += u" tx-results-diff"
+ if vpp_echo_attributes[u"use_app_socket_api"]:
+ vpp_echo_cmd[u"args"] += u" use-app-socket-api"
return vpp_echo_cmd
@staticmethod
@@ -84,7 +89,6 @@ class HoststackUtil():
ip_address = f" {iperf3_attributes[u'ip_address']}" if u"ip_address" \
in iperf3_attributes else u""
iperf3_cmd[u"name"] = u"iperf3"
- # TODO: Use OptionString library.
iperf3_cmd[u"args"] = f"--{iperf3_attributes[u'role']}{ip_address} " \
f"--interval 0{json_results} " \
f"--version{iperf3_attributes[u'ip_version']}"
@@ -152,15 +156,14 @@ class HoststackUtil():
raise
@staticmethod
- def get_hoststack_test_program_logs(node, program):
+ def _get_hoststack_test_program_logs(node, program_name):
"""Get HostStack test program stdout log.
:param node: DUT node.
- :param program: test program.
+ :param program_name: test program.
:type node: dict
- :type program: dict
+ :type program_name: str
"""
- program_name = program[u"name"]
cmd = f"sh -c \'cat /tmp/{program_name}_stdout.log\'"
stdout_log, _ = exec_cmd_no_error(node, cmd, sudo=True, \
message=f"Get {program_name} stdout log failed!")
@@ -168,9 +171,30 @@ class HoststackUtil():
cmd = f"sh -c \'cat /tmp/{program_name}_stderr.log\'"
stderr_log, _ = exec_cmd_no_error(node, cmd, sudo=True, \
message=f"Get {program_name} stderr log failed!")
+
return stdout_log, stderr_log
@staticmethod
+ def get_hoststack_test_program_logs(node, program):
+ """Get HostStack test program stdout log.
+
+ :param node: DUT node.
+ :param program: test program.
+ :type node: dict
+ :type program: dict
+ """
+ program_name = program[u"name"]
+ program_stdout_log, program_stderr_log = \
+ HoststackUtil._get_hoststack_test_program_logs(node,
+ program_name)
+ if len(program_stdout_log) == 0 and len(program_stderr_log) == 0:
+ logger.trace(f"Retrying {program_name} log retrieval")
+ program_stdout_log, program_stderr_log = \
+ HoststackUtil._get_hoststack_test_program_logs(node,
+ program_name)
+ return program_stdout_log, program_stderr_log
+
+ @staticmethod
def get_nginx_command(nginx_attributes, nginx_version, nginx_ins_dir):
"""Construct the NGINX command using the specified attributes.
@@ -274,22 +298,69 @@ class HoststackUtil():
exec_cmd_no_error(node, cmd, message=errmsg, sudo=True)
@staticmethod
- def hoststack_test_program_finished(node, program_pid):
+ def hoststack_test_program_finished(node, program_pid, program,
+ other_node, other_program):
"""Wait for the specified HostStack test program process to complete.
:param node: DUT node.
:param program_pid: test program pid.
+ :param program: test program
+ :param other_node: DUT node of other hoststack program
+ :param other_program: other test program
:type node: dict
:type program_pid: str
+ :type program: dict
+ :type other_node: dict
+ :type other_program: dict
:raises RuntimeError: If node subtype is not a DUT.
"""
if node[u"type"] != u"DUT":
raise RuntimeError(u"Node type is not a DUT!")
+ if other_node[u"type"] != u"DUT":
+ raise RuntimeError(u"Other node type is not a DUT!")
cmd = f"sh -c 'strace -qqe trace=none -p {program_pid}'"
- exec_cmd(node, cmd, sudo=True)
+ try:
+ exec_cmd(node, cmd, sudo=True)
+ except:
+ sleep(180)
+ if u"client" in program[u"args"]:
+ role = u"client"
+ else:
+ role = u"server"
+ program_stdout, program_stderr = \
+ HoststackUtil.get_hoststack_test_program_logs(node, program)
+ if len(program_stdout) > 0:
+ logger.debug(f"{program[u'name']} {role} stdout log:\n"
+ f"{program_stdout}")
+ else:
+ logger.debug(f"Empty {program[u'name']} {role} stdout log :(")
+ if len(program_stderr) > 0:
+ logger.debug(f"{program[u'name']} stderr log:\n"
+ f"{program_stderr}")
+ else:
+ logger.debug(f"Empty {program[u'name']} stderr log :(")
+ if u"client" in other_program[u"args"]:
+ role = u"client"
+ else:
+ role = u"server"
+ program_stdout, program_stderr = \
+ HoststackUtil.get_hoststack_test_program_logs(other_node,
+ other_program)
+ if len(program_stdout) > 0:
+ logger.debug(f"{other_program[u'name']} {role} stdout log:\n"
+ f"{program_stdout}")
+ else:
+ logger.debug(f"Empty {other_program[u'name']} "
+ f"{role} stdout log :(")
+ if len(program_stderr) > 0:
+ logger.debug(f"{other_program[u'name']} {role} stderr log:\n"
+ f"{program_stderr}")
+ else:
+ logger.debug(f"Empty {other_program[u'name']} "
+ f"{role} stderr log :(")
+ raise
# Wait a bit for stdout/stderr to be flushed to log files
- # TODO: see if sub-second sleep works e.g. sleep(0.1)
sleep(1)
@staticmethod
@@ -323,10 +394,6 @@ class HoststackUtil():
program_name = program[u"name"]
program_stdout, program_stderr = \
HoststackUtil.get_hoststack_test_program_logs(node, program)
- if len(program_stdout) == 0 and len(program_stderr) == 0:
- logger.trace(f"Retrying {program_name} log retrieval")
- program_stdout, program_stderr = \
- HoststackUtil.get_hoststack_test_program_logs(node, program)
env_vars = f"{program[u'env_vars']} " if u"env_vars" in program else u""
program_cmd = f"{env_vars}{program_name} {program[u'args']}"
@@ -346,7 +413,6 @@ class HoststackUtil():
f"bits/sec, pkt-drop-rate {nsim_attr[u'packets_per_drop']} " \
f"pkts/drop\n"
- # TODO: Incorporate show error stats into results analysis
test_results += \
f"\n{role} VPP 'show errors' on host {node[u'host']}:\n" \
f"{PapiSocketExecutor.run_cli_cmd(node, u'show error')}\n"
@@ -364,18 +430,28 @@ class HoststackUtil():
if u"JSON stats" in program_stdout and \
u'"has_failed": "0"' in program_stdout:
json_start = program_stdout.find(u"{")
- #TODO: Fix parsing once vpp_echo produces valid
- # JSON output. Truncate for now.
json_end = program_stdout.find(u',\n "closing"')
json_results = f"{program_stdout[json_start:json_end]}\n}}"
program_json = json.loads(json_results)
+ export_hoststack_results(
+ bandwidth=program_json["rx_bits_per_second"],
+ duration=float(program_json["time"])
+ )
else:
test_results += u"Invalid test data output!\n" + program_stdout
return (True, test_results)
elif program[u"name"] == u"iperf3":
test_results += program_stdout
- iperf3_json = json.loads(program_stdout)
- program_json = iperf3_json[u"intervals"][0][u"sum"]
+ program_json = json.loads(program_stdout)[u"intervals"][0][u"sum"]
+ try:
+ retransmits = program_json["retransmits"]
+ except KeyError:
+ retransmits = None
+ export_hoststack_results(
+ bandwidth=program_json["bits_per_second"],
+ duration=program_json["seconds"],
+ retransmits=retransmits
+ )
else:
test_results += u"Unknown HostStack Test Program!\n" + \
program_stdout
diff --git a/resources/libraries/python/IPTopology.py b/resources/libraries/python/IPTopology.py
new file mode 100644
index 0000000000..3b459cd156
--- /dev/null
+++ b/resources/libraries/python/IPTopology.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""IP Topology Library."""
+
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.IPUtil import IPUtil
+
+
+class IPTopology:
+ """IP Topology Library."""
+
+ @staticmethod
+ def initialize_ipv4_forwarding(count=1, pfs=2, route_prefix=32):
+ """
+ Custom setup of IPv4 forwarding with scalability of IP routes on all
+ DUT nodes in 2-node / 3-node circular topology.
+
+ :param count: Number of routes to configure.
+ :param pfs: Number of physical interfaces to configure.
+ :param route_prefix: Route prefix to configure.
+ :type count: int
+ :type pfs: int
+ :type route_prefix: int
+ """
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ dut = topology["duts"][-1]
+ ifl = BuiltIn().get_variable_value("${int}")
+
+ for l, i in zip(range(pfs // 2), range(1, pfs, 2)):
+ dut1_int1 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i}}}[0]")
+ dut1_int2 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i+1}}}[0]")
+ dut_int1 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i}}}[0]")
+ dut_int2 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i+1}}}[0]")
+
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int1, f"1.{l}.1.1",
+ topology[f"TG_pf{i}_mac"][0]
+ )
+ if dut == "DUT2":
+ dut_mac1 = BuiltIn().get_variable_value(
+ f"${{{dut}_{ifl}{i}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int2, f"3.{l}.3.2", dut_mac1
+ )
+ dut_mac2 = BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{i+1}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT2"], dut_int1, f"3.{l}.3.1", dut_mac2
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology[dut], dut_int2, f"2.{l}.2.1",
+ topology[f"TG_pf{i+1}_mac"][0]
+ )
+
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int1, f"1.{l}.1.2", 30
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int2, f"3.{l}.3.1", 30
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT2"], dut_int1, f"3.{l}.3.2", 30
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology[dut], dut_int2, f"2.{l}.2.2", 30
+ )
+
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"{i}0.0.0.0", route_prefix,
+ gateway=f"1.{l}.1.1", interface=dut1_int1, count=count
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"{i+1}0.0.0.0", route_prefix,
+ gateway=f"3.{l}.3.2", interface=dut1_int2, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology["DUT2"], f"{i}0.0.0.0", route_prefix,
+ gateway=f"3.{l}.3.1", interface=dut_int1, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology[dut], f"{i+1}0.0.0.0", route_prefix,
+ gateway=f"2.{l}.2.1", interface=dut_int2, count=count
+ )
+
+
+ @staticmethod
+ def initialize_ipv6_forwarding(count=1, pfs=2, route_prefix=128):
+ """
+ Custom setup of IPv6 forwarding with scalability of IP routes on all
+ DUT nodes in 2-node / 3-node circular topology.
+
+ :param count: Number of routes to configure.
+ :param pfs: Number of physical interfaces to configure.
+ :param route_prefix: Route prefix to configure.
+ :type count: int
+ :type pfs: int
+ :type route_prefix: int
+ """
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ dut = topology["duts"][-1]
+ ifl = BuiltIn().get_variable_value("${int}")
+
+ for l, i in zip(range(pfs // 2), range(1, pfs, 2)):
+ dut1_int1 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i}}}[0]")
+ dut1_int2 = BuiltIn().get_variable_value(f"${{DUT1_{ifl}{i+1}}}[0]")
+ dut_int1 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i}}}[0]")
+ dut_int2 = BuiltIn().get_variable_value(f"${{{dut}_{ifl}{i+1}}}[0]")
+
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int1, f"2001:{l}::1",
+ topology[f"TG_pf{i}_mac"][0]
+ )
+ if dut == "DUT2":
+ dut_mac1 = BuiltIn().get_variable_value(
+ f"${{{dut}_{ifl}{i}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT1"], dut1_int2, f"2003:{l}::2", dut_mac1
+ )
+ dut_mac2 = BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{i+1}_mac}}[0]"
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology["DUT2"], dut_int1, f"2003:{l}::1", dut_mac2
+ )
+ IPUtil.vpp_add_ip_neighbor(
+ topology[dut], dut_int2, f"2002:{l}::1",
+ topology[f"TG_pf{i+1}_mac"][0]
+ )
+
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int1, f"2001:{l}::2", 64
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT1"], dut1_int2, f"2003:{l}::1", 64
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology["DUT2"], dut_int1, f"2003:{l}::2", 64
+ )
+ IPUtil.vpp_interface_set_ip_address(
+ topology[dut], dut_int2, f"2002:{l}::2", 64
+ )
+
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"2{i}00::0", route_prefix,
+ gateway=f"2001:{l}::1", interface=dut1_int1, count=count
+ )
+ if dut == "DUT2":
+ IPUtil.vpp_route_add(
+ topology["DUT1"], f"2{i+1}00::0", route_prefix,
+ gateway=f"2003:{l}::2", interface=dut1_int2, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology["DUT2"], f"2{i}00::0", route_prefix,
+ gateway=f"2003:{l}::1", interface=dut_int1, count=count
+ )
+ IPUtil.vpp_route_add(
+ topology[dut], f"2{i+1}00::0", route_prefix,
+ gateway=f"2002:{l}::1", interface=dut_int2, count=count
+ )
diff --git a/resources/libraries/python/IPUtil.py b/resources/libraries/python/IPUtil.py
index 4a5a413fc8..933fa34211 100644
--- a/resources/libraries/python/IPUtil.py
+++ b/resources/libraries/python/IPUtil.py
@@ -1,5 +1,5 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Copyright (c) 2021 PANTHEON.tech s.r.o.
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2023 PANTHEON.tech s.r.o.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -15,7 +15,6 @@
"""Common IP utilities library."""
import re
-import os
from enum import IntEnum
@@ -28,7 +27,6 @@ from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
from resources.libraries.python.topology import Topology
-from resources.libraries.python.VatExecutor import VatExecutor
from resources.libraries.python.Namespaces import Namespaces
@@ -738,52 +736,6 @@ class IPUtil:
"""
count = kwargs.get(u"count", 1)
- if count > 100:
- if not kwargs.get(u"multipath", True):
- raise RuntimeError(u"VAT exec supports only multipath behavior")
- gateway = kwargs.get(u"gateway", u"")
- interface = kwargs.get(u"interface", u"")
- local = kwargs.get(u"local", u"")
- if interface:
- interface = InterfaceUtil.vpp_get_interface_name(
- node, InterfaceUtil.get_interface_index(
- node, interface
- )
- )
- vrf = kwargs.get(u"vrf", None)
- trailers = list()
- if vrf:
- trailers.append(f"table {vrf}")
- if gateway:
- trailers.append(f"via {gateway}")
- if interface:
- trailers.append(interface)
- elif interface:
- trailers.append(f"via {interface}")
- if local:
- if gateway or interface:
- raise RuntimeError(u"Unsupported combination with local.")
- trailers.append(u"local")
- trailer = u" ".join(trailers)
- command_parts = [u"exec ip route add", u"network goes here"]
- if trailer:
- command_parts.append(trailer)
- netiter = NetworkIncrement(
- ip_network(f"{network}/{prefix_len}", strict=strict),
- format=u"slash"
- )
- tmp_filename = u"/tmp/routes.config"
- with open(tmp_filename, u"w") as tmp_file:
- for _ in range(count):
- command_parts[1] = netiter.inc_fmt()
- print(u" ".join(command_parts), file=tmp_file)
- VatExecutor().execute_script(
- tmp_filename, node, timeout=1800, json_out=False,
- copy_on_execute=True, history=False
- )
- os.remove(tmp_filename)
- return
-
cmd = u"ip_route_add_del"
args = dict(
is_add=True,
@@ -796,7 +748,7 @@ class IPUtil:
ip_network(f"{network}/{prefix_len}", strict=strict),
format=u"addr"
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(count):
args[u"route"] = IPUtil.compose_vpp_route_structure(
node, netiter.inc_fmt(), prefix_len, **kwargs
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 6ed2db1eae..19995e547d 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -1,5 +1,5 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Copyright (c) 2022 PANTHEON.tech s.r.o.
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech s.r.o.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,33 +14,40 @@
"""IPsec utilities library."""
-import os
-
from enum import Enum, IntEnum
-from io import open
-from ipaddress import ip_network, ip_address
+from io import open, TextIOWrapper
+from ipaddress import ip_network, ip_address, IPv4Address, IPv6Address
from random import choice
from string import ascii_letters
+from typing import Iterable, List, Optional, Sequence, Tuple, Union
+
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.IncrementUtil import ObjIncrement
-from resources.libraries.python.InterfaceUtil import InterfaceUtil, \
- InterfaceStatusFlags
+from resources.libraries.python.InterfaceUtil import (
+ InterfaceUtil,
+ InterfaceStatusFlags,
+)
from resources.libraries.python.IPAddress import IPAddress
-from resources.libraries.python.IPUtil import IPUtil, IpDscp, \
- MPLS_LABEL_INVALID, NetworkIncrement
+from resources.libraries.python.IPUtil import (
+ IPUtil,
+ IpDscp,
+ MPLS_LABEL_INVALID,
+ NetworkIncrement,
+)
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import scp_node
from resources.libraries.python.topology import Topology, NodeType
-from resources.libraries.python.VatExecutor import VatExecutor
from resources.libraries.python.VPPUtil import VPPUtil
from resources.libraries.python.FlowUtil import FlowUtil
-IPSEC_UDP_PORT_NONE = 0xffff
+IPSEC_UDP_PORT_DEFAULT = 4500
+IPSEC_REPLAY_WINDOW_DEFAULT = 64
-def gen_key(length):
+def gen_key(length: int) -> bytes:
"""Generate random string as a key.
:param length: Length of generated payload.
@@ -48,36 +55,40 @@ def gen_key(length):
:returns: The generated payload.
:rtype: bytes
"""
- return u"".join(
- choice(ascii_letters) for _ in range(length)
- ).encode(encoding=u"utf-8")
+ return "".join(choice(ascii_letters) for _ in range(length)).encode(
+ encoding="utf-8"
+ )
class PolicyAction(Enum):
"""Policy actions."""
- BYPASS = (u"bypass", 0)
- DISCARD = (u"discard", 1)
- PROTECT = (u"protect", 3)
- def __init__(self, policy_name, policy_int_repr):
+ BYPASS = ("bypass", 0)
+ DISCARD = ("discard", 1)
+ PROTECT = ("protect", 3)
+
+ def __init__(self, policy_name: str, policy_int_repr: int):
self.policy_name = policy_name
self.policy_int_repr = policy_int_repr
- def __str__(self):
+ def __str__(self) -> str:
return self.policy_name
- def __int__(self):
+ def __int__(self) -> int:
return self.policy_int_repr
class CryptoAlg(Enum):
"""Encryption algorithms."""
- AES_CBC_128 = (u"aes-cbc-128", 1, u"AES-CBC", 16)
- AES_CBC_256 = (u"aes-cbc-256", 3, u"AES-CBC", 32)
- AES_GCM_128 = (u"aes-gcm-128", 7, u"AES-GCM", 16)
- AES_GCM_256 = (u"aes-gcm-256", 9, u"AES-GCM", 32)
- def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
+ AES_CBC_128 = ("aes-cbc-128", 1, "AES-CBC", 16)
+ AES_CBC_256 = ("aes-cbc-256", 3, "AES-CBC", 32)
+ AES_GCM_128 = ("aes-gcm-128", 7, "AES-GCM", 16)
+ AES_GCM_256 = ("aes-gcm-256", 9, "AES-GCM", 32)
+
+ def __init__(
+ self, alg_name: str, alg_int_repr: int, scapy_name: str, key_len: int
+ ):
self.alg_name = alg_name
self.alg_int_repr = alg_int_repr
self.scapy_name = scapy_name
@@ -86,10 +97,13 @@ class CryptoAlg(Enum):
class IntegAlg(Enum):
"""Integrity algorithm."""
- SHA_256_128 = (u"sha-256-128", 4, u"SHA2-256-128", 32)
- SHA_512_256 = (u"sha-512-256", 6, u"SHA2-512-256", 64)
- def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
+ SHA_256_128 = ("sha-256-128", 4, "SHA2-256-128", 32)
+ SHA_512_256 = ("sha-512-256", 6, "SHA2-512-256", 64)
+
+ def __init__(
+ self, alg_name: str, alg_int_repr: int, scapy_name: str, key_len: int
+ ):
self.alg_name = alg_name
self.alg_int_repr = alg_int_repr
self.scapy_name = scapy_name
@@ -98,12 +112,14 @@ class IntegAlg(Enum):
class IPsecProto(IntEnum):
"""IPsec protocol."""
+
IPSEC_API_PROTO_ESP = 50
IPSEC_API_PROTO_AH = 51
class IPsecSadFlags(IntEnum):
"""IPsec Security Association Database flags."""
+
IPSEC_API_SAD_FLAG_NONE = 0
# Enable extended sequence numbers
IPSEC_API_SAD_FLAG_USE_ESN = 0x01
@@ -122,6 +138,7 @@ class IPsecSadFlags(IntEnum):
class TunnelEncpaDecapFlags(IntEnum):
"""Flags controlling tunnel behaviour."""
+
TUNNEL_API_ENCAP_DECAP_FLAG_NONE = 0
# at encap, copy the DF bit of the payload into the tunnel header
TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DF = 1
@@ -137,6 +154,7 @@ class TunnelEncpaDecapFlags(IntEnum):
class TunnelMode(IntEnum):
"""Tunnel modes."""
+
# point-to-point
TUNNEL_API_MODE_P2P = 0
# multi-point
@@ -147,7 +165,7 @@ class IPsecUtil:
"""IPsec utilities."""
@staticmethod
- def policy_action_bypass():
+ def policy_action_bypass() -> PolicyAction:
"""Return policy action bypass.
:returns: PolicyAction enum BYPASS object.
@@ -156,7 +174,7 @@ class IPsecUtil:
return PolicyAction.BYPASS
@staticmethod
- def policy_action_discard():
+ def policy_action_discard() -> PolicyAction:
"""Return policy action discard.
:returns: PolicyAction enum DISCARD object.
@@ -165,7 +183,7 @@ class IPsecUtil:
return PolicyAction.DISCARD
@staticmethod
- def policy_action_protect():
+ def policy_action_protect() -> PolicyAction:
"""Return policy action protect.
:returns: PolicyAction enum PROTECT object.
@@ -174,7 +192,7 @@ class IPsecUtil:
return PolicyAction.PROTECT
@staticmethod
- def crypto_alg_aes_cbc_128():
+ def crypto_alg_aes_cbc_128() -> CryptoAlg:
"""Return encryption algorithm aes-cbc-128.
:returns: CryptoAlg enum AES_CBC_128 object.
@@ -183,7 +201,7 @@ class IPsecUtil:
return CryptoAlg.AES_CBC_128
@staticmethod
- def crypto_alg_aes_cbc_256():
+ def crypto_alg_aes_cbc_256() -> CryptoAlg:
"""Return encryption algorithm aes-cbc-256.
:returns: CryptoAlg enum AES_CBC_256 object.
@@ -192,7 +210,7 @@ class IPsecUtil:
return CryptoAlg.AES_CBC_256
@staticmethod
- def crypto_alg_aes_gcm_128():
+ def crypto_alg_aes_gcm_128() -> CryptoAlg:
"""Return encryption algorithm aes-gcm-128.
:returns: CryptoAlg enum AES_GCM_128 object.
@@ -201,7 +219,7 @@ class IPsecUtil:
return CryptoAlg.AES_GCM_128
@staticmethod
- def crypto_alg_aes_gcm_256():
+ def crypto_alg_aes_gcm_256() -> CryptoAlg:
"""Return encryption algorithm aes-gcm-256.
:returns: CryptoAlg enum AES_GCM_128 object.
@@ -210,7 +228,7 @@ class IPsecUtil:
return CryptoAlg.AES_GCM_256
@staticmethod
- def get_crypto_alg_key_len(crypto_alg):
+ def get_crypto_alg_key_len(crypto_alg: CryptoAlg) -> int:
"""Return encryption algorithm key length.
:param crypto_alg: Encryption algorithm.
@@ -221,7 +239,7 @@ class IPsecUtil:
return crypto_alg.key_len
@staticmethod
- def get_crypto_alg_scapy_name(crypto_alg):
+ def get_crypto_alg_scapy_name(crypto_alg: CryptoAlg) -> str:
"""Return encryption algorithm scapy name.
:param crypto_alg: Encryption algorithm.
@@ -232,7 +250,7 @@ class IPsecUtil:
return crypto_alg.scapy_name
@staticmethod
- def integ_alg_sha_256_128():
+ def integ_alg_sha_256_128() -> IntegAlg:
"""Return integrity algorithm SHA-256-128.
:returns: IntegAlg enum SHA_256_128 object.
@@ -241,7 +259,7 @@ class IPsecUtil:
return IntegAlg.SHA_256_128
@staticmethod
- def integ_alg_sha_512_256():
+ def integ_alg_sha_512_256() -> IntegAlg:
"""Return integrity algorithm SHA-512-256.
:returns: IntegAlg enum SHA_512_256 object.
@@ -250,7 +268,7 @@ class IPsecUtil:
return IntegAlg.SHA_512_256
@staticmethod
- def get_integ_alg_key_len(integ_alg):
+ def get_integ_alg_key_len(integ_alg: Optional[IntegAlg]) -> int:
"""Return integrity algorithm key length.
None argument is accepted, returning zero.
@@ -263,7 +281,7 @@ class IPsecUtil:
return 0 if integ_alg is None else integ_alg.key_len
@staticmethod
- def get_integ_alg_scapy_name(integ_alg):
+ def get_integ_alg_scapy_name(integ_alg: Optional[IntegAlg]) -> str:
"""Return integrity algorithm scapy name.
:param integ_alg: Integrity algorithm.
@@ -274,7 +292,7 @@ class IPsecUtil:
return integ_alg.scapy_name
@staticmethod
- def ipsec_proto_esp():
+ def ipsec_proto_esp() -> int:
"""Return IPSec protocol ESP.
:returns: IPsecProto enum ESP object.
@@ -283,7 +301,7 @@ class IPsecUtil:
return int(IPsecProto.IPSEC_API_PROTO_ESP)
@staticmethod
- def ipsec_proto_ah():
+ def ipsec_proto_ah() -> int:
"""Return IPSec protocol AH.
:returns: IPsecProto enum AH object.
@@ -292,7 +310,9 @@ class IPsecUtil:
return int(IPsecProto.IPSEC_API_PROTO_AH)
@staticmethod
- def vpp_ipsec_select_backend(node, protocol, index=1):
+ def vpp_ipsec_select_backend(
+ node: dict, protocol: int, index: int = 1
+ ) -> None:
"""Select IPsec backend.
:param node: VPP node to select IPsec backend on.
@@ -304,19 +324,18 @@ class IPsecUtil:
:raises RuntimeError: If failed to select IPsec backend or if no API
reply received.
"""
- cmd = u"ipsec_select_backend"
- err_msg = f"Failed to select IPsec backend on host {node[u'host']}"
- args = dict(
- protocol=protocol,
- index=index
- )
+ cmd = "ipsec_select_backend"
+ err_msg = f"Failed to select IPsec backend on host {node['host']}"
+ args = dict(protocol=protocol, index=index)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_ipsec_set_async_mode(node, async_enable=1):
+ def vpp_ipsec_set_async_mode(node: dict, async_enable: int = 1) -> None:
"""Set IPsec async mode on|off.
+ Unconditionally, attempt to switch crypto dispatch into polling mode.
+
:param node: VPP node to set IPsec async mode.
:param async_enable: Async mode on or off.
:type node: dict
@@ -324,17 +343,26 @@ class IPsecUtil:
:raises RuntimeError: If failed to set IPsec async mode or if no API
reply received.
"""
- cmd = u"ipsec_set_async_mode"
- err_msg = f"Failed to set IPsec async mode on host {node[u'host']}"
- args = dict(
- async_enable=async_enable
- )
with PapiSocketExecutor(node) as papi_exec:
+ cmd = "ipsec_set_async_mode"
+ err_msg = f"Failed to set IPsec async mode on host {node['host']}"
+ args = dict(async_enable=async_enable)
papi_exec.add(cmd, **args).get_reply(err_msg)
+ cmd = "crypto_set_async_dispatch_v2"
+ err_msg = "Failed to set dispatch mode."
+ args = dict(mode=0, adaptive=False)
+ try:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+ except (AttributeError, RuntimeError):
+ # Expected when VPP build does not have the _v2 yet
+ # (after and before the first CRC check).
+ # TODO: Fail here when testing of pre-23.10 builds is over.
+ pass
@staticmethod
def vpp_ipsec_crypto_sw_scheduler_set_worker(
- node, workers, crypto_enable=False):
+ node: dict, workers: Iterable[int], crypto_enable: bool = False
+ ) -> None:
"""Enable or disable crypto on specific vpp worker threads.
:param node: VPP node to enable or disable crypto for worker threads.
@@ -347,39 +375,40 @@ class IPsecUtil:
thread or if no API reply received.
"""
for worker in workers:
- cmd = u"crypto_sw_scheduler_set_worker"
- err_msg = f"Failed to disable/enable crypto for worker thread " \
- f"on host {node[u'host']}"
- args = dict(
- worker_index=worker - 1,
- crypto_enable=crypto_enable
+ cmd = "crypto_sw_scheduler_set_worker"
+ err_msg = (
+ "Failed to disable/enable crypto for worker thread"
+ f" on host {node['host']}"
)
+ args = dict(worker_index=worker - 1, crypto_enable=crypto_enable)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_ipsec_crypto_sw_scheduler_set_worker_on_all_duts(
- nodes, workers, crypto_enable=False):
+ nodes: dict, crypto_enable: bool = False
+ ) -> None:
"""Enable or disable crypto on specific vpp worker threads.
:param node: VPP node to enable or disable crypto for worker threads.
- :param workers: List of VPP thread numbers.
:param crypto_enable: Disable or enable crypto work.
:type node: dict
- :type workers: Iterable[int]
:type crypto_enable: bool
:raises RuntimeError: If failed to enable or disable crypto for worker
thread or if no API reply received.
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
thread_data = VPPUtil.vpp_show_threads(node)
worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return None
+ return
worker_ids = list()
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
for item in thread_data:
- if str(item.cpu_id) in workers.split(u","):
+ if str(item.cpu_id) in workers.split(","):
worker_ids.append(item.id)
IPsecUtil.vpp_ipsec_crypto_sw_scheduler_set_worker(
@@ -388,8 +417,16 @@ class IPsecUtil:
@staticmethod
def vpp_ipsec_add_sad_entry(
- node, sad_id, spi, crypto_alg, crypto_key, integ_alg=None,
- integ_key=u"", tunnel_src=None, tunnel_dst=None):
+ node: dict,
+ sad_id: int,
+ spi: int,
+ crypto_alg: CryptoAlg,
+ crypto_key: str,
+ integ_alg: Optional[IntegAlg] = None,
+ integ_key: str = "",
+ tunnel_src: Optional[str] = None,
+ tunnel_dst: Optional[str] = None,
+ ) -> None:
"""Create Security Association Database entry on the VPP node.
:param node: VPP node to add SAD entry on.
@@ -410,21 +447,15 @@ class IPsecUtil:
:type crypto_key: str
:type integ_alg: Optional[IntegAlg]
:type integ_key: str
- :type tunnel_src: str
- :type tunnel_dst: str
+ :type tunnel_src: Optional[str]
+ :type tunnel_dst: Optional[str]
"""
if isinstance(crypto_key, str):
- crypto_key = crypto_key.encode(encoding=u"utf-8")
+ crypto_key = crypto_key.encode(encoding="utf-8")
if isinstance(integ_key, str):
- integ_key = integ_key.encode(encoding=u"utf-8")
- ckey = dict(
- length=len(crypto_key),
- data=crypto_key
- )
- ikey = dict(
- length=len(integ_key),
- data=integ_key if integ_key else 0
- )
+ integ_key = integ_key.encode(encoding="utf-8")
+ ckey = dict(length=len(crypto_key), data=crypto_key)
+ ikey = dict(length=len(integ_key), data=integ_key if integ_key else 0)
flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE)
if tunnel_src and tunnel_dst:
@@ -432,15 +463,18 @@ class IPsecUtil:
src_addr = ip_address(tunnel_src)
dst_addr = ip_address(tunnel_dst)
if src_addr.version == 6:
- flags = \
- flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6)
+ flags = flags | int(
+ IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
+ )
else:
- src_addr = u""
- dst_addr = u""
+ src_addr = ""
+ dst_addr = ""
- cmd = u"ipsec_sad_entry_add_del_v3"
- err_msg = f"Failed to add Security Association Database entry " \
- f"on host {node[u'host']}"
+ cmd = "ipsec_sad_entry_add_v2"
+ err_msg = (
+ "Failed to add Security Association Database entry"
+ f" on host {node['host']}"
+ )
sad_entry = dict(
sad_id=int(sad_id),
spi=int(spi),
@@ -459,21 +493,28 @@ class IPsecUtil:
dscp=int(IpDscp.IP_API_DSCP_CS0),
),
protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
- udp_src_port=4500, # default value in api
- udp_dst_port=4500 # default value in api
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_ipsec_add_sad_entries(
- node, n_entries, sad_id, spi, crypto_alg, crypto_key,
- integ_alg=None, integ_key=u"", tunnel_src=None,tunnel_dst=None,
- tunnel_addr_incr=True):
+ node: dict,
+ n_entries: int,
+ sad_id: int,
+ spi: int,
+ crypto_alg: CryptoAlg,
+ crypto_key: str,
+ integ_alg: Optional[IntegAlg] = None,
+ integ_key: str = "",
+ tunnel_src: Optional[str] = None,
+ tunnel_dst: Optional[str] = None,
+ tunnel_addr_incr: bool = True,
+ ) -> None:
"""Create multiple Security Association Database entries on VPP node.
:param node: VPP node to add SAD entry on.
@@ -500,59 +541,30 @@ class IPsecUtil:
:type crypto_key: str
:type integ_alg: Optional[IntegAlg]
:type integ_key: str
- :type tunnel_src: str
- :type tunnel_dst: str
+ :type tunnel_src: Optional[str]
+ :type tunnel_dst: Optional[str]
:type tunnel_addr_incr: bool
"""
if isinstance(crypto_key, str):
- crypto_key = crypto_key.encode(encoding=u"utf-8")
+ crypto_key = crypto_key.encode(encoding="utf-8")
if isinstance(integ_key, str):
- integ_key = integ_key.encode(encoding=u"utf-8")
+ integ_key = integ_key.encode(encoding="utf-8")
if tunnel_src and tunnel_dst:
src_addr = ip_address(tunnel_src)
dst_addr = ip_address(tunnel_dst)
else:
- src_addr = u""
- dst_addr = u""
+ src_addr = ""
+ dst_addr = ""
if tunnel_addr_incr:
- addr_incr = 1 << (128 - 96) if src_addr.version == 6 \
- else 1 << (32 - 24)
+ addr_incr = (
+ 1 << (128 - 96) if src_addr.version == 6 else 1 << (32 - 24)
+ )
else:
addr_incr = 0
- if int(n_entries) > 10:
- tmp_filename = f"/tmp/ipsec_sad_{sad_id}_add_del_entry.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- for i in range(n_entries):
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {integ_key.hex()}" \
- if integ_alg else u""
- tunnel = f"tunnel src {src_addr + i * addr_incr} " \
- f"tunnel dst {dst_addr + i * addr_incr}" \
- if tunnel_src and tunnel_dst else u""
- conf = f"exec ipsec sa add {sad_id + i} esp spi {spi + i} "\
- f"crypto-alg {crypto_alg.alg_name} " \
- f"crypto-key {crypto_key.hex()} " \
- f"{integ} {tunnel}\n"
- tmp_file.write(conf)
- vat = VatExecutor()
- vat.execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
- )
- os.remove(tmp_filename)
- return
-
- ckey = dict(
- length=len(crypto_key),
- data=crypto_key
- )
- ikey = dict(
- length=len(integ_key),
- data=integ_key if integ_key else 0
- )
+ ckey = dict(length=len(crypto_key), data=crypto_key)
+ ikey = dict(length=len(integ_key), data=integ_key if integ_key else 0)
flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE)
if tunnel_src and tunnel_dst:
@@ -562,9 +574,11 @@ class IPsecUtil:
IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
)
- cmd = u"ipsec_sad_entry_add_del_v3"
- err_msg = f"Failed to add Security Association Database entry " \
- f"on host {node[u'host']}"
+ cmd = "ipsec_sad_entry_add_v2"
+ err_msg = (
+ "Failed to add Security Association Database entry"
+ f" on host {node['host']}"
+ )
sad_entry = dict(
sad_id=int(sad_id),
@@ -584,24 +598,24 @@ class IPsecUtil:
dscp=int(IpDscp.IP_API_DSCP_CS0),
),
protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
- udp_src_port=4500, # default value in api
- udp_dst_port=4500 # default value in api
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
- with PapiSocketExecutor(node) as papi_exec:
+ args = dict(entry=sad_entry)
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(n_entries):
- args[u"entry"][u"sad_id"] = int(sad_id) + i
- args[u"entry"][u"spi"] = int(spi) + i
- args[u"entry"][u"tunnel"][u"src"] = (
+ args["entry"]["sad_id"] = int(sad_id) + i
+ args["entry"]["spi"] = int(spi) + i
+ args["entry"]["tunnel"]["src"] = (
str(src_addr + i * addr_incr)
- if tunnel_src and tunnel_dst else src_addr
+ if tunnel_src and tunnel_dst
+ else src_addr
)
- args[u"entry"][u"tunnel"][u"dst"] = (
+ args["entry"]["tunnel"]["dst"] = (
str(dst_addr + i * addr_incr)
- if tunnel_src and tunnel_dst else dst_addr
+ if tunnel_src and tunnel_dst
+ else dst_addr
)
history = bool(not 1 < i < n_entries - 2)
papi_exec.add(cmd, history=history, **args)
@@ -609,8 +623,15 @@ class IPsecUtil:
@staticmethod
def vpp_ipsec_set_ip_route(
- node, n_tunnels, tunnel_src, traffic_addr, tunnel_dst, interface,
- raddr_range, dst_mac=None):
+ node: dict,
+ n_tunnels: int,
+ tunnel_src: str,
+ traffic_addr: str,
+ tunnel_dst: str,
+ interface: str,
+ raddr_range: int,
+ dst_mac: Optional[str] = None,
+ ) -> None:
"""Set IP address and route on interface.
:param node: VPP node to add config on.
@@ -630,102 +651,80 @@ class IPsecUtil:
:type tunnel_dst: str
:type interface: str
:type raddr_range: int
- :type dst_mac: str
+ :type dst_mac: Optional[str]
"""
tunnel_src = ip_address(tunnel_src)
tunnel_dst = ip_address(tunnel_dst)
traffic_addr = ip_address(traffic_addr)
tunnel_dst_prefix = 128 if tunnel_dst.version == 6 else 32
- addr_incr = 1 << (128 - raddr_range) if tunnel_src.version == 6 \
+ addr_incr = (
+ 1 << (128 - raddr_range)
+ if tunnel_src.version == 6
else 1 << (32 - raddr_range)
+ )
- if int(n_tunnels) > 10:
- tmp_filename = u"/tmp/ipsec_set_ip.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- if_name = Topology.get_interface_name(node, interface)
- for i in range(n_tunnels):
- tunnel_dst_addr = tunnel_dst + i * addr_incr
- conf = f"exec set interface ip address {if_name} " \
- f"{tunnel_src + i * addr_incr}/{raddr_range}\n" \
- f"exec ip route add {traffic_addr + i}/" \
- f"{tunnel_dst_prefix} " \
- f"via {tunnel_dst_addr} {if_name}\n" \
- f"exec ip route add {tunnel_dst_addr}/" \
- f"{tunnel_dst_prefix} " \
- f"via {tunnel_dst_addr} {if_name}\n"
- if dst_mac:
- conf = f"{conf}exec set ip neighbor {if_name} " \
- f"{tunnel_dst + i * addr_incr} {dst_mac}\n"
- tmp_file.write(conf)
-
- VatExecutor().execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
- )
- os.remove(tmp_filename)
- return
-
- cmd1 = u"sw_interface_add_del_address"
+ cmd1 = "sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_add=True,
del_all=False,
- prefix=None
- )
- cmd2 = u"ip_route_add_del"
- args2 = dict(
- is_add=1,
- is_multipath=0,
- route=None
+ prefix=None,
)
- cmd3 = u"ip_neighbor_add_del"
+ cmd2 = "ip_route_add_del"
+ args2 = dict(is_add=1, is_multipath=0, route=None)
+ cmd3 = "ip_neighbor_add_del"
args3 = dict(
is_add=True,
neighbor=dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
flags=0,
mac_address=str(dst_mac),
- ip_address=None
- )
+ ip_address=None,
+ ),
+ )
+ err_msg = (
+ "Failed to configure IP addresses, IP routes and"
+ f" IP neighbor on interface {interface} on host {node['host']}"
+ if dst_mac
+ else "Failed to configure IP addresses and IP routes"
+ f" on interface {interface} on host {node['host']}"
)
- err_msg = f"Failed to configure IP addresses, IP routes and " \
- f"IP neighbor on interface {interface} on host {node[u'host']}" \
- if dst_mac \
- else f"Failed to configure IP addresses and IP routes " \
- f"on interface {interface} on host {node[u'host']}"
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(n_tunnels):
tunnel_dst_addr = tunnel_dst + i * addr_incr
- args1[u"prefix"] = IPUtil.create_prefix_object(
+ args1["prefix"] = IPUtil.create_prefix_object(
tunnel_src + i * addr_incr, raddr_range
)
- args2[u"route"] = IPUtil.compose_vpp_route_structure(
- node, traffic_addr + i,
+ args2["route"] = IPUtil.compose_vpp_route_structure(
+ node,
+ traffic_addr + i,
prefix_len=tunnel_dst_prefix,
- interface=interface, gateway=tunnel_dst_addr
+ interface=interface,
+ gateway=tunnel_dst_addr,
)
history = bool(not 1 < i < n_tunnels - 2)
- papi_exec.add(cmd1, history=history, **args1).\
- add(cmd2, history=history, **args2)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
- args2[u"route"] = IPUtil.compose_vpp_route_structure(
- node, tunnel_dst_addr,
+ args2["route"] = IPUtil.compose_vpp_route_structure(
+ node,
+ tunnel_dst_addr,
prefix_len=tunnel_dst_prefix,
- interface=interface, gateway=tunnel_dst_addr
+ interface=interface,
+ gateway=tunnel_dst_addr,
)
papi_exec.add(cmd2, history=history, **args2)
if dst_mac:
- args3[u"neighbor"][u"ip_address"] = ip_address(
+ args3["neighbor"]["ip_address"] = ip_address(
tunnel_dst_addr
)
papi_exec.add(cmd3, history=history, **args3)
papi_exec.get_replies(err_msg)
@staticmethod
- def vpp_ipsec_add_spd(node, spd_id):
+ def vpp_ipsec_add_spd(node: dict, spd_id: int) -> None:
"""Create Security Policy Database on the VPP node.
:param node: VPP node to add SPD on.
@@ -733,18 +732,18 @@ class IPsecUtil:
:type node: dict
:type spd_id: int
"""
- cmd = u"ipsec_spd_add_del"
- err_msg = f"Failed to add Security Policy Database " \
- f"on host {node[u'host']}"
- args = dict(
- is_add=True,
- spd_id=int(spd_id)
+ cmd = "ipsec_spd_add_del"
+ err_msg = (
+ f"Failed to add Security Policy Database on host {node['host']}"
)
+ args = dict(is_add=True, spd_id=int(spd_id))
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def vpp_ipsec_spd_add_if(node, spd_id, interface):
+ def vpp_ipsec_spd_add_if(
+ node: dict, spd_id: int, interface: Union[str, int]
+ ) -> None:
"""Add interface to the Security Policy Database.
:param node: VPP node.
@@ -754,22 +753,31 @@ class IPsecUtil:
:type spd_id: int
:type interface: str or int
"""
- cmd = u"ipsec_interface_add_del_spd"
- err_msg = f"Failed to add interface {interface} to Security Policy " \
- f"Database {spd_id} on host {node[u'host']}"
+ cmd = "ipsec_interface_add_del_spd"
+ err_msg = (
+ f"Failed to add interface {interface} to Security Policy"
+ f" Database {spd_id} on host {node['host']}"
+ )
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- spd_id=int(spd_id)
+ spd_id=int(spd_id),
)
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_ipsec_create_spds_match_nth_entry(
- node, dir1_interface, dir2_interface, entry_amount,
- local_addr_range, remote_addr_range, action=PolicyAction.BYPASS,
- inbound=False, bidirectional=True):
+ node: dict,
+ dir1_interface: Union[str, int],
+ dir2_interface: Union[str, int],
+ entry_amount: int,
+ local_addr_range: Union[str, IPv4Address, IPv6Address],
+ remote_addr_range: Union[str, IPv4Address, IPv6Address],
+ action: PolicyAction = PolicyAction.BYPASS,
+ inbound: bool = False,
+ bidirectional: bool = True,
+ ) -> None:
"""Create one matching SPD entry for inbound or outbound traffic on
a DUT for each traffic direction and also create entry_amount - 1
non-matching SPD entries. Create a Security Policy Database on each
@@ -799,21 +807,21 @@ class IPsecUtil:
:param bidirectional: When True, will create SPDs in both directions
of traffic. When False, only in one direction.
:type node: dict
- :type dir1_interface: Union[string, int]
- :type dir2_interface: Union[string, int]
+ :type dir1_interface: Union[str, int]
+ :type dir2_interface: Union[str, int]
:type entry_amount: int
:type local_addr_range:
- Union[string, ipaddress.IPv4Address, ipaddress.IPv6Address]
+ Union[str, IPv4Address, IPv6Address]
:type remote_addr_range:
- Union[string, ipaddress.IPv4Address, ipaddress.IPv6Address]
- :type action: IPsecUtil.PolicyAction
+ Union[str, IPv4Address, IPv6Address]
+ :type action: PolicyAction
:type inbound: bool
:type bidirectional: bool
:raises NotImplementedError: When the action is PolicyAction.PROTECT.
"""
if action == PolicyAction.PROTECT:
- raise NotImplementedError('Policy action PROTECT is not supported.')
+ raise NotImplementedError("Policy action PROTECT is not supported.")
spd_id_dir1 = 1
spd_id_dir2 = 2
@@ -823,9 +831,13 @@ class IPsecUtil:
IPsecUtil.vpp_ipsec_spd_add_if(node, spd_id_dir1, dir1_interface)
# matching entry direction 1
IPsecUtil.vpp_ipsec_add_spd_entry(
- node, spd_id_dir1, matching_priority, action,
- inbound=inbound, laddr_range=local_addr_range,
- raddr_range=remote_addr_range
+ node,
+ spd_id_dir1,
+ matching_priority,
+ action,
+ inbound=inbound,
+ laddr_range=local_addr_range,
+ raddr_range=remote_addr_range,
)
if bidirectional:
@@ -834,9 +846,13 @@ class IPsecUtil:
# matching entry direction 2, the address ranges are switched
IPsecUtil.vpp_ipsec_add_spd_entry(
- node, spd_id_dir2, matching_priority, action,
- inbound=inbound, laddr_range=remote_addr_range,
- raddr_range=local_addr_range
+ node,
+ spd_id_dir2,
+ matching_priority,
+ action,
+ inbound=inbound,
+ laddr_range=remote_addr_range,
+ raddr_range=local_addr_range,
)
# non-matching entries
@@ -856,10 +872,14 @@ class IPsecUtil:
# non-matching entries direction 1
IPsecUtil.vpp_ipsec_add_spd_entries(
- node, no_match_entry_amount, spd_id_dir1,
- ObjIncrement(matching_priority + 1, 1), action,
- inbound=inbound, laddr_range=no_match_local_addr_range,
- raddr_range=no_match_remote_addr_range
+ node,
+ no_match_entry_amount,
+ spd_id_dir1,
+ ObjIncrement(matching_priority + 1, 1),
+ action,
+ inbound=inbound,
+ laddr_range=no_match_local_addr_range,
+ raddr_range=no_match_remote_addr_range,
)
if bidirectional:
@@ -876,22 +896,40 @@ class IPsecUtil:
next(no_match_local_addr_range)
# non-matching entries direction 2
IPsecUtil.vpp_ipsec_add_spd_entries(
- node, no_match_entry_amount, spd_id_dir2,
- ObjIncrement(matching_priority + 1, 1), action,
- inbound=inbound, laddr_range=no_match_local_addr_range,
- raddr_range=no_match_remote_addr_range
+ node,
+ no_match_entry_amount,
+ spd_id_dir2,
+ ObjIncrement(matching_priority + 1, 1),
+ action,
+ inbound=inbound,
+ laddr_range=no_match_local_addr_range,
+ raddr_range=no_match_remote_addr_range,
)
IPsecUtil.vpp_ipsec_show_all(node)
@staticmethod
- def vpp_ipsec_add_spd_entry(
- node, spd_id, priority, action, inbound=True, sa_id=None,
- proto=None, laddr_range=None, raddr_range=None, lport_range=None,
- rport_range=None, is_ipv6=False):
- """Create Security Policy Database entry on the VPP node.
-
- :param node: VPP node to add SPD entry on.
+ def _vpp_ipsec_add_spd_entry_internal(
+ executor: PapiSocketExecutor,
+ spd_id: int,
+ priority: int,
+ action: PolicyAction,
+ inbound: bool = True,
+ sa_id: Optional[int] = None,
+ proto: Optional[int] = None,
+ laddr_range: Optional[str] = None,
+ raddr_range: Optional[str] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
+ """Prepare to create Security Policy Database entry on the VPP node.
+
+ This just adds one more command to the executor.
+ The call site shall get replies once all entries are added,
+ to get speed benefit from async PAPI.
+
+ :param executor: Open PAPI executor (async handling) to add commands to.
:param spd_id: SPD ID to add entry on.
:param priority: SPD entry priority, higher number = higher priority.
:param action: Policy action.
@@ -911,31 +949,29 @@ class IPsecUtil:
<port_start>-<port_end>.
:param is_ipv6: True in case of IPv6 policy when IPv6 address range is
not defined so it will default to address ::/0, otherwise False.
- :type node: dict
+ :type executor: PapiSocketExecutor
:type spd_id: int
:type priority: int
- :type action: IPsecUtil.PolicyAction
+ :type action: PolicyAction
:type inbound: bool
- :type sa_id: int
- :type proto: int
- :type laddr_range: string
- :type raddr_range: string
- :type lport_range: string
- :type rport_range: string
+ :type sa_id: Optional[int]
+ :type proto: Optional[int]
+ :type laddr_range: Optional[str]
+ :type raddr_range: Optional[str]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
:type is_ipv6: bool
"""
if laddr_range is None:
- laddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ laddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
if raddr_range is None:
- raddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ raddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
local_net = ip_network(laddr_range, strict=False)
remote_net = ip_network(raddr_range, strict=False)
- cmd = u"ipsec_spd_entry_add_del"
- err_msg = f"Failed to add entry to Security Policy Database " \
- f"{spd_id} on host {node[u'host']}"
+ cmd = "ipsec_spd_entry_add_del_v2"
spd_entry = dict(
spd_id=int(spd_id),
@@ -943,7 +979,7 @@ class IPsecUtil:
is_outbound=not inbound,
sa_id=int(sa_id) if sa_id else 0,
policy=int(action),
- protocol=int(proto) if proto else 0,
+ protocol=255 if proto is None else int(proto),
remote_address_start=IPAddress.create_ip_address_object(
remote_net.network_address
),
@@ -956,27 +992,109 @@ class IPsecUtil:
local_address_stop=IPAddress.create_ip_address_object(
local_net.broadcast_address
),
- remote_port_start=int(rport_range.split(u"-")[0]) if rport_range
- else 0,
- remote_port_stop=int(rport_range.split(u"-")[1]) if rport_range
- else 65535,
- local_port_start=int(lport_range.split(u"-")[0]) if lport_range
- else 0,
- local_port_stop=int(lport_range.split(u"-")[1]) if rport_range
- else 65535
+ remote_port_start=(
+ int(rport_range.split("-")[0]) if rport_range else 0
+ ),
+ remote_port_stop=(
+ int(rport_range.split("-")[1]) if rport_range else 65535
+ ),
+ local_port_start=(
+ int(lport_range.split("-")[0]) if lport_range else 0
+ ),
+ local_port_stop=(
+ int(lport_range.split("-")[1]) if rport_range else 65535
+ ),
)
- args = dict(
- is_add=True,
- entry=spd_entry
+ args = dict(is_add=True, entry=spd_entry)
+ executor.add(cmd, **args)
+
+ @staticmethod
+ def vpp_ipsec_add_spd_entry(
+ node: dict,
+ spd_id: int,
+ priority: int,
+ action: PolicyAction,
+ inbound: bool = True,
+ sa_id: Optional[int] = None,
+ proto: Optional[int] = None,
+ laddr_range: Optional[str] = None,
+ raddr_range: Optional[str] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
+ """Create Security Policy Database entry on the VPP node.
+
+ :param node: VPP node to add SPD entry on.
+ :param spd_id: SPD ID to add entry on.
+ :param priority: SPD entry priority, higher number = higher priority.
+ :param action: Policy action.
+ :param inbound: If True policy is for inbound traffic, otherwise
+ outbound.
+ :param sa_id: SAD entry ID for action PolicyAction.PROTECT.
+ :param proto: Policy selector next layer protocol number.
+ :param laddr_range: Policy selector local IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param raddr_range: Policy selector remote IPv4 or IPv6 address range
+ in format IP/prefix or IP/mask. If no mask is provided,
+ it's considered to be /32.
+ :param lport_range: Policy selector local TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param rport_range: Policy selector remote TCP/UDP port range in format
+ <port_start>-<port_end>.
+ :param is_ipv6: True in case of IPv6 policy when IPv6 address range is
+ not defined so it will default to address ::/0, otherwise False.
+ :type node: dict
+ :type spd_id: int
+ :type priority: int
+ :type action: PolicyAction
+ :type inbound: bool
+ :type sa_id: Optional[int]
+ :type proto: Optional[int]
+ :type laddr_range: Optional[str]
+ :type raddr_range: Optional[str]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
+ :type is_ipv6: bool
+ """
+ err_msg = (
+ "Failed to add entry to Security Policy Database"
+ f" {spd_id} on host {node['host']}"
)
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
+ IPsecUtil._vpp_ipsec_add_spd_entry_internal(
+ papi_exec,
+ spd_id,
+ priority,
+ action,
+ inbound,
+ sa_id,
+ proto,
+ laddr_range,
+ raddr_range,
+ lport_range,
+ rport_range,
+ is_ipv6,
+ )
+ papi_exec.get_replies(err_msg)
@staticmethod
def vpp_ipsec_add_spd_entries(
- node, n_entries, spd_id, priority, action, inbound, sa_id=None,
- proto=None, laddr_range=None, raddr_range=None, lport_range=None,
- rport_range=None, is_ipv6=False):
+ node: dict,
+ n_entries: int,
+ spd_id: int,
+ priority: Optional[ObjIncrement],
+ action: PolicyAction,
+ inbound: bool,
+ sa_id: Optional[ObjIncrement] = None,
+ proto: Optional[int] = None,
+ laddr_range: Optional[NetworkIncrement] = None,
+ raddr_range: Optional[NetworkIncrement] = None,
+ lport_range: Optional[str] = None,
+ rport_range: Optional[str] = None,
+ is_ipv6: bool = False,
+ ) -> None:
"""Create multiple Security Policy Database entries on the VPP node.
:param node: VPP node to add SPD entries on.
@@ -1003,298 +1121,51 @@ class IPsecUtil:
:type node: dict
:type n_entries: int
:type spd_id: int
- :type priority: IPsecUtil.ObjIncrement
- :type action: IPsecUtil.PolicyAction
+ :type priority: Optional[ObjIncrement]
+ :type action: PolicyAction
:type inbound: bool
- :type sa_id: IPsecUtil.ObjIncrement
- :type proto: int
- :type laddr_range: IPsecUtil.NetworkIncrement
- :type raddr_range: IPsecUtil.NetworkIncrement
- :type lport_range: string
- :type rport_range: string
+ :type sa_id: Optional[ObjIncrement]
+ :type proto: Optional[int]
+ :type laddr_range: Optional[NetworkIncrement]
+ :type raddr_range: Optional[NetworkIncrement]
+ :type lport_range: Optional[str]
+ :type rport_range: Optional[str]
:type is_ipv6: bool
"""
if laddr_range is None:
- laddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ laddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
laddr_range = NetworkIncrement(ip_network(laddr_range), 0)
if raddr_range is None:
- raddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
+ raddr_range = "::/0" if is_ipv6 else "0.0.0.0/0"
raddr_range = NetworkIncrement(ip_network(raddr_range), 0)
- lport_range_start = 0
- lport_range_stop = 65535
- if lport_range:
- lport_range_start, lport_range_stop = lport_range.split('-')
-
- rport_range_start = 0
- rport_range_stop = 65535
- if rport_range:
- rport_range_start, rport_range_stop = rport_range.split('-')
-
- if int(n_entries) > 10:
- tmp_filename = f"/tmp/ipsec_spd_{spd_id}_add_del_entry.script"
-
- with open(tmp_filename, 'w') as tmp_file:
- for _ in range(n_entries):
- direction = u'inbound' if inbound else u'outbound'
- sa = f' sa {sa_id.inc_fmt()}' if sa_id is not None else ''
- protocol = f' protocol {protocol}' if proto else ''
- local_port_range = f' local-port-range ' \
- f'{lport_range_start} - {lport_range_stop}' \
- if lport_range else ''
- remote_port_range = f' remote-port-range ' \
- f'{rport_range_start} - {rport_range_stop}' \
- if rport_range else ''
-
- spd_cfg = f"exec ipsec policy add spd {spd_id} " \
- f"priority {priority.inc_fmt()} {direction}" \
- f"{protocol} action {action}{sa} " \
- f"local-ip-range {laddr_range.inc_fmt()} " \
- f"remote-ip-range {raddr_range.inc_fmt()}" \
- f"{local_port_range}{remote_port_range}\n"
-
- tmp_file.write(spd_cfg)
-
- VatExecutor().execute_script(
- tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True
- )
- os.remove(tmp_filename)
- return
-
- for _ in range(n_entries):
- IPsecUtil.vpp_ipsec_add_spd_entry(
- node, spd_id, next(priority), action, inbound,
- next(sa_id) if sa_id is not None else sa_id,
- proto, next(laddr_range), next(raddr_range), lport_range,
- rport_range, is_ipv6
- )
-
- @staticmethod
- def _ipsec_create_tunnel_interfaces_dut1_vat(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg, integ_alg,
- raddr_ip2, addr_incr, spi_d, existing_tunnels=0):
- """Create multiple IPsec tunnel interfaces on DUT1 node using VAT.
-
- Generate random keys and return them (so DUT2 or TG can decrypt).
-
- :param nodes: VPP nodes to create tunnel interfaces.
- :param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
- IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
- IPv4/IPv6 address (ip2).
- :param if1_key: VPP node 1 interface key from topology file.
- :param if2_key: VPP node 2 / TG node (in case of 2-node topology)
- interface key from topology file.
- :param n_tunnels: Number of tunnel interfaces to be there at the end.
- :param crypto_alg: The encryption algorithm name.
- :param integ_alg: The integrity algorithm name.
- :param raddr_ip2: Policy selector remote IPv4/IPv6 start address for the
- first tunnel in direction node2->node1.
- :param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
- :param addr_incr: IP / IPv6 address incremental step.
- :param existing_tunnels: Number of tunnel interfaces before creation.
- Useful mainly for reconf tests. Default 0.
- :type nodes: dict
- :type tun_ips: dict
- :type if1_key: str
- :type if2_key: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: Optional[IntegAlg]
- :type raddr_ip2: IPv4Address or IPv6Address
- :type addr_incr: int
- :type spi_d: dict
- :type existing_tunnels: int
- :returns: Generated ckeys and ikeys.
- :rtype: List[bytes], List[bytes]
- """
- tmp_fn1 = u"/tmp/ipsec_create_tunnel_dut1.config"
- if1_n = Topology.get_interface_name(nodes[u"DUT1"], if1_key)
-
- ckeys = [bytes()] * existing_tunnels
- ikeys = [bytes()] * existing_tunnels
-
- vat = VatExecutor()
- with open(tmp_fn1, u"w") as tmp_f1:
- rmac = Topology.get_interface_mac(nodes[u"DUT2"], if2_key) \
- if u"DUT2" in nodes.keys() \
- else Topology.get_interface_mac(nodes[u"TG"], if2_key)
- if not existing_tunnels:
- tmp_f1.write(
- f"exec create loopback interface\n"
- f"exec set interface state loop0 up\n"
- f"exec set interface ip address {if1_n} "
- f"{tun_ips[u'ip2'] - 1}/"
- f"{len(tun_ips[u'ip2'].packed)*8*3//4}\n"
- f"exec set ip neighbor {if1_n} {tun_ips[u'ip2']} {rmac} "
- f"static\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- ckeys.append(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
- )
- ikeys.append(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
- )
- if integ_alg:
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {ikeys[i].hex()} "
- else:
- integ = u""
- tmp_f1.write(
- f"exec set interface ip address loop0 "
- f"{tun_ips[u'ip1'] + i * addr_incr}/32\n"
- f"exec create ipip tunnel "
- f"src {tun_ips[u'ip1'] + i * addr_incr} "
- f"dst {tun_ips[u'ip2']} "
- f"p2p\n"
- f"exec ipsec sa add {i} "
- f"spi {spi_d[u'spi_1'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec sa add {100000 + i} "
- f"spi {spi_d[u'spi_2'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec tunnel protect ipip{i} "
- f"sa-out {i} "
- f"sa-in {100000 + i} "
- f"add\n"
- )
- vat.execute_script(
- tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
+ err_msg = (
+ "Failed to add entry to Security Policy Database"
+ f" {spd_id} on host {node['host']}"
)
- os.remove(tmp_fn1)
-
- with open(tmp_fn1, 'w') as tmp_f1:
- for i in range(existing_tunnels, n_tunnels):
- tmp_f1.write(
- f"exec set interface unnumbered ipip{i} use {if1_n}\n"
- f"exec set interface state ipip{i} up\n"
- f"exec ip route add "
- f"{raddr_ip2 + i}/{len(raddr_ip2.packed)*8} "
- f"via ipip{i}\n"
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
+ for _ in range(n_entries):
+ IPsecUtil._vpp_ipsec_add_spd_entry_internal(
+ papi_exec,
+ spd_id,
+ next(priority),
+ action,
+ inbound,
+ next(sa_id) if sa_id is not None else sa_id,
+ proto,
+ next(laddr_range),
+ next(raddr_range),
+ lport_range,
+ rport_range,
+ is_ipv6,
)
- vat.execute_script(
- tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn1)
-
- return ckeys, ikeys
-
- @staticmethod
- def _ipsec_create_tunnel_interfaces_dut2_vat(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys, integ_alg,
- ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels=0):
- """Create multiple IPsec tunnel interfaces on DUT2 node using VAT.
-
- This method accesses keys generated by DUT1 method
- and does not return anything.
-
- :param nodes: VPP nodes to create tunnel interfaces.
- :param tun_ips: Dictionary with VPP node 1 ipsec tunnel interface
- IPv4/IPv6 address (ip1) and VPP node 2 ipsec tunnel interface
- IPv4/IPv6 address (ip2).
- :param if2_key: VPP node 2 / TG node (in case of 2-node topology)
- interface key from topology file.
- :param n_tunnels: Number of tunnel interfaces to be there at the end.
- :param crypto_alg: The encryption algorithm name.
- :param ckeys: List of encryption keys.
- :param integ_alg: The integrity algorithm name.
- :param ikeys: List of integrity keys.
- :param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
- :param addr_incr: IP / IPv6 address incremental step.
- :param existing_tunnels: Number of tunnel interfaces before creation.
- Useful mainly for reconf tests. Default 0.
- :type nodes: dict
- :type tun_ips: dict
- :type if2_key: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type ckeys: Sequence[bytes]
- :type integ_alg: Optional[IntegAlg]
- :type ikeys: Sequence[bytes]
- :type addr_incr: int
- :type spi_d: dict
- :type existing_tunnels: int
- """
- tmp_fn2 = u"/tmp/ipsec_create_tunnel_dut2.config"
- if2_n = Topology.get_interface_name(nodes[u"DUT2"], if2_key)
-
- vat = VatExecutor()
- with open(tmp_fn2, 'w') as tmp_f2:
- if not existing_tunnels:
- tmp_f2.write(
- f"exec set interface ip address {if2_n}"
- f" {tun_ips[u'ip2']}/{len(tun_ips[u'ip2'].packed)*8*3/4}\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- if integ_alg:
- integ = f"integ-alg {integ_alg.alg_name} " \
- f"integ-key {ikeys[i].hex()} "
- else:
- integ = u""
- tmp_f2.write(
- f"exec create ipip tunnel "
- f"src {tun_ips[u'ip2']} "
- f"dst {tun_ips[u'ip1'] + i * addr_incr} "
- f"p2p\n"
- f"exec ipsec sa add {100000 + i} "
- f"spi {spi_d[u'spi_2'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec sa add {i} "
- f"spi {spi_d[u'spi_1'] + i} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"crypto-key {ckeys[i].hex()} "
- f"{integ}"
- f"esp\n"
- f"exec ipsec tunnel protect ipip{i} "
- f"sa-out {100000 + i} "
- f"sa-in {i} "
- f"add\n"
- )
- vat.execute_script(
- tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn2)
-
- with open(tmp_fn2, 'w') as tmp_f2:
- if not existing_tunnels:
- tmp_f2.write(
- f"exec ip route add {tun_ips[u'ip1']}/8 "
- f"via {tun_ips[u'ip2'] - 1} {if2_n}\n"
- )
- for i in range(existing_tunnels, n_tunnels):
- tmp_f2.write(
- f"exec set interface unnumbered ipip{i} use {if2_n}\n"
- f"exec set interface state ipip{i} up\n"
- f"exec ip route add "
- f"{raddr_ip1 + i}/{len(raddr_ip1.packed)*8} "
- f"via ipip{i}\n"
- )
- vat.execute_script(
- tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
- copy_on_execute=True,
- history=bool(n_tunnels < 100)
- )
- os.remove(tmp_fn2)
+ papi_exec.get_replies(err_msg)
@staticmethod
- def _ipsec_create_loopback_dut1_papi(nodes, tun_ips, if1_key, if2_key):
+ def _ipsec_create_loopback_dut1_papi(
+ nodes: dict, tun_ips: dict, if1_key: str, if2_key: str
+ ) -> int:
"""Create loopback interface and set IP address on VPP node 1 interface
using PAPI.
@@ -1309,60 +1180,66 @@ class IPsecUtil:
:type tun_ips: dict
:type if1_key: str
:type if2_key: str
+ :returns: sw_if_idx Of the created loopback interface.
+ :rtype: int
"""
- with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec:
+ with PapiSocketExecutor(nodes["DUT1"]) as papi_exec:
# Create loopback interface on DUT1, set it to up state
- cmd = u"create_loopback_instance"
+ cmd = "create_loopback_instance"
args = dict(
mac_address=0,
is_specified=False,
user_instance=0,
)
- err_msg = f"Failed to create loopback interface " \
- f"on host {nodes[u'DUT1'][u'host']}"
- loop_sw_if_idx = papi_exec.add(cmd, **args). \
- get_sw_if_index(err_msg)
- cmd = u"sw_interface_set_flags"
+ err_msg = (
+ "Failed to create loopback interface"
+ f" on host {nodes['DUT1']['host']}"
+ )
+ papi_exec.add(cmd, **args)
+ loop_sw_if_idx = papi_exec.get_sw_if_index(err_msg)
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=loop_sw_if_idx,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
+ )
+ err_msg = (
+ "Failed to set loopback interface state up"
+ f" on host {nodes['DUT1']['host']}"
)
- err_msg = f"Failed to set loopback interface state up " \
- f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd, **args).get_reply(err_msg)
# Set IP address on VPP node 1 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- tun_ips[u"ip2"] - 1, 96 if tun_ips[u"ip2"].version == 6
- else 24
- )
+ tun_ips["ip2"] - 1,
+ 96 if tun_ips["ip2"].version == 6 else 24,
+ ),
+ )
+ err_msg = (
+ f"Failed to set IP address on interface {if1_key}"
+ f" on host {nodes['DUT1']['host']}"
)
- err_msg = f"Failed to set IP address on interface {if1_key} " \
- f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd, **args).get_reply(err_msg)
- cmd2 = u"ip_neighbor_add_del"
+ cmd2 = "ip_neighbor_add_del"
args2 = dict(
is_add=1,
neighbor=dict(
sw_if_index=Topology.get_interface_sw_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
flags=1,
mac_address=str(
- Topology.get_interface_mac(nodes[u"DUT2"], if2_key)
- if u"DUT2" in nodes.keys()
- else Topology.get_interface_mac(
- nodes[u"TG"], if2_key
- )
+ Topology.get_interface_mac(nodes["DUT2"], if2_key)
+ if "DUT2" in nodes.keys()
+ else Topology.get_interface_mac(nodes["TG"], if2_key)
),
- ip_address=tun_ips[u"ip2"].compressed
- )
+ ip_address=tun_ips["ip2"].compressed,
+ ),
)
err_msg = f"Failed to add IP neighbor on interface {if1_key}"
papi_exec.add(cmd2, **args2).get_reply(err_msg)
@@ -1371,8 +1248,18 @@ class IPsecUtil:
@staticmethod
def _ipsec_create_tunnel_interfaces_dut1_papi(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg, integ_alg,
- raddr_ip2, addr_incr, spi_d, existing_tunnels=0):
+ nodes: dict,
+ tun_ips: dict,
+ if1_key: str,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg,
+ integ_alg: Optional[IntegAlg],
+ raddr_ip2: Union[IPv4Address, IPv6Address],
+ addr_incr: int,
+ spi_d: dict,
+ existing_tunnels: int = 0,
+ ) -> Tuple[List[bytes], List[bytes]]:
"""Create multiple IPsec tunnel interfaces on DUT1 node using PAPI.
Generate random keys and return them (so DUT2 or TG can decrypt).
@@ -1400,7 +1287,7 @@ class IPsecUtil:
:type n_tunnels: int
:type crypto_alg: CryptoAlg
:type integ_alg: Optional[IntegAlg]
- :type raddr_ip2: IPv4Address or IPv6Address
+ :type raddr_ip2: Union[IPv4Address, IPv6Address]
:type addr_incr: int
:type spi_d: dict
:type existing_tunnels: int
@@ -1413,27 +1300,27 @@ class IPsecUtil:
)
else:
loop_sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(
- nodes[u"DUT1"], u"loop0"
+ nodes["DUT1"], "loop0"
)
- with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec:
+ with PapiSocketExecutor(nodes["DUT1"], is_async=True) as papi_exec:
# Configure IP addresses on loop0 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=loop_sw_if_idx,
is_add=True,
del_all=False,
- prefix=None
+ prefix=None,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"prefix"] = IPUtil.create_prefix_object(
- tun_ips[u"ip1"] + i * addr_incr,
- 128 if tun_ips[u"ip1"].version == 6 else 32
+ args["prefix"] = IPUtil.create_prefix_object(
+ tun_ips["ip1"] + i * addr_incr,
+ 128 if tun_ips["ip1"].version == 6 else 32,
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IPIP tunnel interfaces
- cmd = u"ipip_add_tunnel"
+ cmd = "ipip_add_tunnel"
ipip_tunnel = dict(
instance=Constants.BITWISE_NON_ZERO,
src=None,
@@ -1443,43 +1330,38 @@ class IPsecUtil:
TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
),
mode=int(TunnelMode.TUNNEL_API_MODE_P2P),
- dscp=int(IpDscp.IP_API_DSCP_CS0)
- )
- args = dict(
- tunnel=ipip_tunnel
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
)
+ args = dict(tunnel=ipip_tunnel)
ipip_tunnels = [None] * existing_tunnels
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"src"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip1"] + i * addr_incr
+ ipip_tunnel["src"] = IPAddress.create_ip_address_object(
+ tun_ips["ip1"] + i * addr_incr
)
- args[u"tunnel"][u"dst"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip2"]
+ ipip_tunnel["dst"] = IPAddress.create_ip_address_object(
+ tun_ips["ip2"]
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPIP tunnel interfaces on host" \
- f" {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add IPIP tunnel interfaces on host"
+ f" {nodes['DUT1']['host']}"
+ )
ipip_tunnels.extend(
[
- reply[u"sw_if_index"]
+ reply["sw_if_index"]
for reply in papi_exec.get_replies(err_msg)
- if u"sw_if_index" in reply
+ if "sw_if_index" in reply
]
)
# Configure IPSec SAD entries
ckeys = [bytes()] * existing_tunnels
ikeys = [bytes()] * existing_tunnels
- cmd = u"ipsec_sad_entry_add_del_v3"
- c_key = dict(
- length=0,
- data=None
- )
- i_key = dict(
- length=0,
- data=None
- )
+ cmd = "ipsec_sad_entry_add_v2"
+ c_key = dict(length=0, data=None)
+ i_key = dict(length=0, data=None)
+ common_flags = IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
sad_entry = dict(
sad_id=None,
spi=None,
@@ -1488,7 +1370,7 @@ class IPsecUtil:
crypto_key=c_key,
integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
integrity_key=i_key,
- flags=None,
+ flags=common_flags,
tunnel=dict(
src=0,
dst=0,
@@ -1499,13 +1381,11 @@ class IPsecUtil:
dscp=int(IpDscp.IP_API_DSCP_CS0),
),
salt=0,
- udp_src_port=IPSEC_UDP_PORT_NONE,
- udp_dst_port=IPSEC_UDP_PORT_NONE,
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
for i in range(existing_tunnels, n_tunnels):
ckeys.append(
gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
@@ -1514,118 +1394,118 @@ class IPsecUtil:
gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
)
# SAD entry for outband / tx path
- args[u"entry"][u"sad_id"] = i
- args[u"entry"][u"spi"] = spi_d[u"spi_1"] + i
+ sad_entry["sad_id"] = i
+ sad_entry["spi"] = spi_d["spi_1"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
+ sad_entry["flags"] |= IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
+ for i in range(existing_tunnels, n_tunnels):
# SAD entry for inband / rx path
- args[u"entry"][u"sad_id"] = 100000 + i
- args[u"entry"][u"spi"] = spi_d[u"spi_2"] + i
+ sad_entry["sad_id"] = 100000 + i
+ sad_entry["spi"] = spi_d["spi_2"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE |
- IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPsec SAD entries on host" \
- f" {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add IPsec SAD entries on host"
+ f" {nodes['DUT1']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Add protection for tunnels with IPSEC
- cmd = u"ipsec_tunnel_protect_update"
+ cmd = "ipsec_tunnel_protect_update"
n_hop = dict(
address=0,
via_label=MPLS_LABEL_INVALID,
- obj_id=Constants.BITWISE_NON_ZERO
+ obj_id=Constants.BITWISE_NON_ZERO,
)
ipsec_tunnel_protect = dict(
- sw_if_index=None,
- nh=n_hop,
- sa_out=None,
- n_sa_in=1,
- sa_in=None
- )
- args = dict(
- tunnel=ipsec_tunnel_protect
+ sw_if_index=None, nh=n_hop, sa_out=None, n_sa_in=1, sa_in=None
)
+ args = dict(tunnel=ipsec_tunnel_protect)
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"sw_if_index"] = ipip_tunnels[i]
- args[u"tunnel"][u"sa_out"] = i
- args[u"tunnel"][u"sa_in"] = [100000 + i]
+ args["tunnel"]["sw_if_index"] = ipip_tunnels[i]
+ args["tunnel"]["sa_out"] = i
+ args["tunnel"]["sa_in"] = [100000 + i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add protection for tunnels with IPSEC " \
- f"on host {nodes[u'DUT1'][u'host']}"
+ err_msg = (
+ "Failed to add protection for tunnels with IPSEC"
+ f" on host {nodes['DUT1']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Configure unnumbered interfaces
- cmd = u"sw_interface_set_unnumbered"
+ cmd = "sw_interface_set_unnumbered"
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT1"], if1_key
+ nodes["DUT1"], if1_key
),
- unnumbered_sw_if_index=0
+ unnumbered_sw_if_index=0,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"unnumbered_sw_if_index"] = ipip_tunnels[i]
+ args["unnumbered_sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Set interfaces up
- cmd = u"sw_interface_set_flags"
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"sw_if_index"] = ipip_tunnels[i]
+ args["sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IP routes
- cmd = u"ip_route_add_del"
- args = dict(
- is_add=1,
- is_multipath=0,
- route=None
- )
+ cmd = "ip_route_add_del"
+ args = dict(is_add=1, is_multipath=0, route=None)
for i in range(existing_tunnels, n_tunnels):
- args[u"route"] = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT1"], (raddr_ip2 + i).compressed,
+ args["route"] = IPUtil.compose_vpp_route_structure(
+ nodes["DUT1"],
+ (raddr_ip2 + i).compressed,
prefix_len=128 if raddr_ip2.version == 6 else 32,
- interface=ipip_tunnels[i]
+ interface=ipip_tunnels[i],
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IP routes on host " \
- f"{nodes[u'DUT1'][u'host']}"
+ err_msg = f"Failed to add IP routes on host {nodes['DUT1']['host']}"
papi_exec.get_replies(err_msg)
return ckeys, ikeys
@staticmethod
def _ipsec_create_tunnel_interfaces_dut2_papi(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys, integ_alg,
- ikeys, raddr_ip1, addr_incr, spi_d, existing_tunnels=0):
+ nodes: dict,
+ tun_ips: dict,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg,
+ ckeys: Sequence[bytes],
+ integ_alg: Optional[IntegAlg],
+ ikeys: Sequence[bytes],
+ raddr_ip1: Union[IPv4Address, IPv6Address],
+ addr_incr: int,
+ spi_d: dict,
+ existing_tunnels: int = 0,
+ ) -> None:
"""Create multiple IPsec tunnel interfaces on DUT2 node using PAPI.
This method accesses keys generated by DUT1 method
@@ -1642,6 +1522,8 @@ class IPsecUtil:
:param ckeys: List of encryption keys.
:param integ_alg: The integrity algorithm name.
:param ikeys: List of integrity keys.
+ :param raddr_ip1: Policy selector remote IPv4/IPv6 start address for the
+ first tunnel in direction node1->node2.
:param spi_d: Dictionary with SPIs for VPP node 1 and VPP node 2.
:param addr_incr: IP / IPv6 address incremental step.
:param existing_tunnels: Number of tunnel interfaces before creation.
@@ -1654,30 +1536,33 @@ class IPsecUtil:
:type ckeys: Sequence[bytes]
:type integ_alg: Optional[IntegAlg]
:type ikeys: Sequence[bytes]
+ :type raddr_ip1: Union[IPv4Address, IPv6Address]
:type addr_incr: int
:type spi_d: dict
:type existing_tunnels: int
"""
- with PapiSocketExecutor(nodes[u"DUT2"]) as papi_exec:
+ with PapiSocketExecutor(nodes["DUT2"], is_async=True) as papi_exec:
if not existing_tunnels:
# Set IP address on VPP node 2 interface
- cmd = u"sw_interface_add_del_address"
+ cmd = "sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT2"], if2_key
+ nodes["DUT2"], if2_key
),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- tun_ips[u"ip2"], 96 if tun_ips[u"ip2"].version == 6
- else 24
- )
+ tun_ips["ip2"],
+ 96 if tun_ips["ip2"].version == 6 else 24,
+ ),
)
- err_msg = f"Failed to set IP address on interface {if2_key} " \
- f"on host {nodes[u'DUT2'][u'host']}"
- papi_exec.add(cmd, **args).get_reply(err_msg)
+ err_msg = (
+ f"Failed to set IP address on interface {if2_key}"
+ f" on host {nodes['DUT2']['host']}"
+ )
+ papi_exec.add(cmd, **args).get_replies(err_msg)
# Configure IPIP tunnel interfaces
- cmd = u"ipip_add_tunnel"
+ cmd = "ipip_add_tunnel"
ipip_tunnel = dict(
instance=Constants.BITWISE_NON_ZERO,
src=None,
@@ -1687,41 +1572,36 @@ class IPsecUtil:
TunnelEncpaDecapFlags.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
),
mode=int(TunnelMode.TUNNEL_API_MODE_P2P),
- dscp=int(IpDscp.IP_API_DSCP_CS0)
- )
- args = dict(
- tunnel=ipip_tunnel
+ dscp=int(IpDscp.IP_API_DSCP_CS0),
)
+ args = dict(tunnel=ipip_tunnel)
ipip_tunnels = [None] * existing_tunnels
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"src"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip2"]
+ ipip_tunnel["src"] = IPAddress.create_ip_address_object(
+ tun_ips["ip2"]
)
- args[u"tunnel"][u"dst"] = IPAddress.create_ip_address_object(
- tun_ips[u"ip1"] + i * addr_incr
+ ipip_tunnel["dst"] = IPAddress.create_ip_address_object(
+ tun_ips["ip1"] + i * addr_incr
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPIP tunnel interfaces on host" \
- f" {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ "Failed to add IPIP tunnel interfaces on host"
+ f" {nodes['DUT2']['host']}"
+ )
ipip_tunnels.extend(
[
- reply[u"sw_if_index"]
+ reply["sw_if_index"]
for reply in papi_exec.get_replies(err_msg)
- if u"sw_if_index" in reply
+ if "sw_if_index" in reply
]
)
# Configure IPSec SAD entries
- cmd = u"ipsec_sad_entry_add_del_v3"
- c_key = dict(
- length=0,
- data=None
- )
- i_key = dict(
- length=0,
- data=None
- )
+ cmd = "ipsec_sad_entry_add_v2"
+ c_key = dict(length=0, data=None)
+ i_key = dict(length=0, data=None)
+ common_flags = IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
sad_entry = dict(
sad_id=None,
spi=None,
@@ -1730,7 +1610,7 @@ class IPsecUtil:
crypto_key=c_key,
integrity_algorithm=integ_alg.alg_int_repr if integ_alg else 0,
integrity_key=i_key,
- flags=None,
+ flags=common_flags,
tunnel=dict(
src=0,
dst=0,
@@ -1741,13 +1621,11 @@ class IPsecUtil:
dscp=int(IpDscp.IP_API_DSCP_CS0),
),
salt=0,
- udp_src_port=IPSEC_UDP_PORT_NONE,
- udp_dst_port=IPSEC_UDP_PORT_NONE,
- )
- args = dict(
- is_add=True,
- entry=sad_entry
+ udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+ udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+ anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
)
+ args = dict(entry=sad_entry)
for i in range(existing_tunnels, n_tunnels):
ckeys.append(
gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
@@ -1756,132 +1634,129 @@ class IPsecUtil:
gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
)
# SAD entry for outband / tx path
- args[u"entry"][u"sad_id"] = 100000 + i
- args[u"entry"][u"spi"] = spi_d[u"spi_2"] + i
+ sad_entry["sad_id"] = 100000 + i
+ sad_entry["spi"] = spi_d["spi_2"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
+ sad_entry["flags"] |= IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
+ for i in range(existing_tunnels, n_tunnels):
# SAD entry for inband / rx path
- args[u"entry"][u"sad_id"] = i
- args[u"entry"][u"spi"] = spi_d[u"spi_1"] + i
+ sad_entry["sad_id"] = i
+ sad_entry["spi"] = spi_d["spi_1"] + i
- args[u"entry"][u"crypto_key"][u"length"] = len(ckeys[i])
- args[u"entry"][u"crypto_key"][u"data"] = ckeys[i]
+ sad_entry["crypto_key"]["length"] = len(ckeys[i])
+ sad_entry["crypto_key"]["data"] = ckeys[i]
if integ_alg:
- args[u"entry"][u"integrity_key"][u"length"] = len(ikeys[i])
- args[u"entry"][u"integrity_key"][u"data"] = ikeys[i]
- args[u"entry"][u"flags"] = int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE |
- IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_INBOUND
- )
+ sad_entry["integrity_key"]["length"] = len(ikeys[i])
+ sad_entry["integrity_key"]["data"] = ikeys[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IPsec SAD entries on host" \
- f" {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ f"Failed to add IPsec SAD entries on host"
+ f" {nodes['DUT2']['host']}"
+ )
papi_exec.get_replies(err_msg)
# Add protection for tunnels with IPSEC
- cmd = u"ipsec_tunnel_protect_update"
+ cmd = "ipsec_tunnel_protect_update"
n_hop = dict(
address=0,
via_label=MPLS_LABEL_INVALID,
- obj_id=Constants.BITWISE_NON_ZERO
+ obj_id=Constants.BITWISE_NON_ZERO,
)
ipsec_tunnel_protect = dict(
- sw_if_index=None,
- nh=n_hop,
- sa_out=None,
- n_sa_in=1,
- sa_in=None
- )
- args = dict(
- tunnel=ipsec_tunnel_protect
+ sw_if_index=None, nh=n_hop, sa_out=None, n_sa_in=1, sa_in=None
)
+ args = dict(tunnel=ipsec_tunnel_protect)
for i in range(existing_tunnels, n_tunnels):
- args[u"tunnel"][u"sw_if_index"] = ipip_tunnels[i]
- args[u"tunnel"][u"sa_out"] = 100000 + i
- args[u"tunnel"][u"sa_in"] = [i]
+ args["tunnel"]["sw_if_index"] = ipip_tunnels[i]
+ args["tunnel"]["sa_out"] = 100000 + i
+ args["tunnel"]["sa_in"] = [i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add protection for tunnels with IPSEC " \
- f"on host {nodes[u'DUT2'][u'host']}"
+ err_msg = (
+ "Failed to add protection for tunnels with IPSEC"
+ f" on host {nodes['DUT2']['host']}"
+ )
papi_exec.get_replies(err_msg)
if not existing_tunnels:
# Configure IP route
- cmd = u"ip_route_add_del"
+ cmd = "ip_route_add_del"
route = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT2"], tun_ips[u"ip1"].compressed,
- prefix_len=32 if tun_ips[u"ip1"].version == 6 else 8,
+ nodes["DUT2"],
+ tun_ips["ip1"].compressed,
+ prefix_len=32 if tun_ips["ip1"].version == 6 else 8,
interface=if2_key,
- gateway=(tun_ips[u"ip2"] - 1).compressed
- )
- args = dict(
- is_add=1,
- is_multipath=0,
- route=route
+ gateway=(tun_ips["ip2"] - 1).compressed,
)
+ args = dict(is_add=1, is_multipath=0, route=route)
papi_exec.add(cmd, **args)
# Configure unnumbered interfaces
- cmd = u"sw_interface_set_unnumbered"
+ cmd = "sw_interface_set_unnumbered"
args = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes[u"DUT2"], if2_key
+ nodes["DUT2"], if2_key
),
- unnumbered_sw_if_index=0
+ unnumbered_sw_if_index=0,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"unnumbered_sw_if_index"] = ipip_tunnels[i]
+ args["unnumbered_sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Set interfaces up
- cmd = u"sw_interface_set_flags"
+ cmd = "sw_interface_set_flags"
args = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value,
)
for i in range(existing_tunnels, n_tunnels):
- args[u"sw_if_index"] = ipip_tunnels[i]
+ args["sw_if_index"] = ipip_tunnels[i]
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
# Configure IP routes
- cmd = u"ip_route_add_del"
- args = dict(
- is_add=1,
- is_multipath=0,
- route=None
- )
+ cmd = "ip_route_add_del"
+ args = dict(is_add=1, is_multipath=0, route=None)
for i in range(existing_tunnels, n_tunnels):
- args[u"route"] = IPUtil.compose_vpp_route_structure(
- nodes[u"DUT1"], (raddr_ip1 + i).compressed,
+ args["route"] = IPUtil.compose_vpp_route_structure(
+ nodes["DUT1"],
+ (raddr_ip1 + i).compressed,
prefix_len=128 if raddr_ip1.version == 6 else 32,
- interface=ipip_tunnels[i]
+ interface=ipip_tunnels[i],
)
papi_exec.add(
cmd, history=bool(not 1 < i < n_tunnels - 2), **args
)
- err_msg = f"Failed to add IP routes " \
- f"on host {nodes[u'DUT2'][u'host']}"
+ err_msg = f"Failed to add IP routes on host {nodes['DUT2']['host']}"
papi_exec.get_replies(err_msg)
@staticmethod
def vpp_ipsec_create_tunnel_interfaces(
- nodes, tun_if1_ip_addr, tun_if2_ip_addr, if1_key, if2_key,
- n_tunnels, crypto_alg, integ_alg, raddr_ip1, raddr_ip2, raddr_range,
- existing_tunnels=0, return_keys=False):
+ nodes: dict,
+ tun_if1_ip_addr: str,
+ tun_if2_ip_addr: str,
+ if1_key: str,
+ if2_key: str,
+ n_tunnels: int,
+ crypto_alg: CryptoAlg,
+ integ_alg: Optional[IntegAlg],
+ raddr_ip1: str,
+ raddr_ip2: str,
+ raddr_range: int,
+ existing_tunnels: int = 0,
+ return_keys: bool = False,
+ ) -> Optional[Tuple[List[bytes], List[bytes], int, int]]:
"""Create multiple IPsec tunnel interfaces between two VPP nodes.
Some deployments (e.g. devicetest) need to know the generated keys.
@@ -1916,84 +1791,94 @@ class IPsecUtil:
:type if2_key: str
:type n_tunnels: int
:type crypto_alg: CryptoAlg
- :type integ_alg: Optonal[IntegAlg]
- :type raddr_ip1: string
- :type raddr_ip2: string
+ :type integ_alg: Optional[IntegAlg]
+ :type raddr_ip1: str
+ :type raddr_ip2: str
:type raddr_range: int
:type existing_tunnels: int
:type return_keys: bool
:returns: Ckeys, ikeys, spi_1, spi_2.
- :rtype: Optional[List[bytes], List[bytes], int, int]
+ :rtype: Optional[Tuple[List[bytes], List[bytes], int, int]]
"""
n_tunnels = int(n_tunnels)
existing_tunnels = int(existing_tunnels)
- spi_d = dict(
- spi_1=100000,
- spi_2=200000
- )
+ spi_d = dict(spi_1=100000, spi_2=200000)
tun_ips = dict(
- ip1=ip_address(tun_if1_ip_addr),
- ip2=ip_address(tun_if2_ip_addr)
+ ip1=ip_address(tun_if1_ip_addr), ip2=ip_address(tun_if2_ip_addr)
)
raddr_ip1 = ip_address(raddr_ip1)
raddr_ip2 = ip_address(raddr_ip2)
- addr_incr = 1 << (128 - raddr_range) if tun_ips[u"ip1"].version == 6 \
+ addr_incr = (
+ 1 << (128 - raddr_range)
+ if tun_ips["ip1"].version == 6
else 1 << (32 - raddr_range)
+ )
- if n_tunnels - existing_tunnels > 10:
- ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_vat(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg,
- integ_alg, raddr_ip2, addr_incr, spi_d, existing_tunnels
- )
- if u"DUT2" in nodes.keys():
- IPsecUtil._ipsec_create_tunnel_interfaces_dut2_vat(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys,
- integ_alg, ikeys, raddr_ip1, addr_incr, spi_d,
- existing_tunnels
- )
- else:
- ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_papi(
- nodes, tun_ips, if1_key, if2_key, n_tunnels, crypto_alg,
- integ_alg, raddr_ip2, addr_incr, spi_d, existing_tunnels
+ ckeys, ikeys = IPsecUtil._ipsec_create_tunnel_interfaces_dut1_papi(
+ nodes,
+ tun_ips,
+ if1_key,
+ if2_key,
+ n_tunnels,
+ crypto_alg,
+ integ_alg,
+ raddr_ip2,
+ addr_incr,
+ spi_d,
+ existing_tunnels,
+ )
+ if "DUT2" in nodes.keys():
+ IPsecUtil._ipsec_create_tunnel_interfaces_dut2_papi(
+ nodes,
+ tun_ips,
+ if2_key,
+ n_tunnels,
+ crypto_alg,
+ ckeys,
+ integ_alg,
+ ikeys,
+ raddr_ip1,
+ addr_incr,
+ spi_d,
+ existing_tunnels,
)
- if u"DUT2" in nodes.keys():
- IPsecUtil._ipsec_create_tunnel_interfaces_dut2_papi(
- nodes, tun_ips, if2_key, n_tunnels, crypto_alg, ckeys,
- integ_alg, ikeys, raddr_ip1, addr_incr, spi_d,
- existing_tunnels
- )
if return_keys:
- return ckeys, ikeys, spi_d[u"spi_1"], spi_d[u"spi_2"]
+ return ckeys, ikeys, spi_d["spi_1"], spi_d["spi_2"]
return None
@staticmethod
- def _create_ipsec_script_files(dut, instances):
+ def _create_ipsec_script_files(
+ dut: str, instances: int
+ ) -> List[TextIOWrapper]:
"""Create script files for configuring IPsec in containers
:param dut: DUT node on which to create the script files
:param instances: number of containers on DUT node
- :type dut: string
+ :type dut: str
:type instances: int
+ :returns: Created opened file handles.
+ :rtype: List[TextIOWrapper]
"""
scripts = []
for cnf in range(0, instances):
script_filename = (
f"/tmp/ipsec_create_tunnel_cnf_{dut}_{cnf + 1}.config"
)
- scripts.append(open(script_filename, 'w'))
+ scripts.append(open(script_filename, "w", encoding="utf-8"))
return scripts
@staticmethod
def _close_and_copy_ipsec_script_files(
- dut, nodes, instances, scripts):
+ dut: str, nodes: dict, instances: int, scripts: Sequence[TextIOWrapper]
+ ) -> None:
"""Close created scripts and copy them to containers
:param dut: DUT node on which to create the script files
:param nodes: VPP nodes
:param instances: number of containers on DUT node
:param scripts: dictionary holding the script files
- :type dut: string
+ :type dut: str
:type nodes: dict
:type instances: int
:type scripts: dict
@@ -2005,125 +1890,21 @@ class IPsecUtil:
)
scp_node(nodes[dut], script_filename, script_filename)
-
- @staticmethod
- def vpp_ipsec_create_tunnel_interfaces_in_containers(
- nodes, if1_ip_addr, if2_ip_addr, n_tunnels, crypto_alg, integ_alg,
- raddr_ip1, raddr_ip2, raddr_range, n_instances):
- """Create multiple IPsec tunnel interfaces between two VPP nodes.
-
- :param nodes: VPP nodes to create tunnel interfaces.
- :param if1_ip_addr: VPP node 1 interface IP4 address.
- :param if2_ip_addr: VPP node 2 interface IP4 address.
- :param n_tunnels: Number of tunnell interfaces to create.
- :param crypto_alg: The encryption algorithm name.
- :param integ_alg: The integrity algorithm name.
- :param raddr_ip1: Policy selector remote IPv4 start address for the
- first tunnel in direction node1->node2.
- :param raddr_ip2: Policy selector remote IPv4 start address for the
- first tunnel in direction node2->node1.
- :param raddr_range: Mask specifying range of Policy selector Remote
- IPv4 addresses. Valid values are from 1 to 32.
- :param n_instances: Number of containers.
- :type nodes: dict
- :type if1_ip_addr: str
- :type if2_ip_addr: str
- :type n_tunnels: int
- :type crypto_alg: CryptoAlg
- :type integ_alg: Optional[IntegAlg]
- :type raddr_ip1: string
- :type raddr_ip2: string
- :type raddr_range: int
- :type n_instances: int
- """
- spi_1 = 100000
- spi_2 = 200000
- addr_incr = 1 << (32 - raddr_range)
-
- dut1_scripts = IPsecUtil._create_ipsec_script_files(
- u"DUT1", n_instances
- )
- dut2_scripts = IPsecUtil._create_ipsec_script_files(
- u"DUT2", n_instances
- )
-
- for cnf in range(0, n_instances):
- dut1_scripts[cnf].write(
- u"create loopback interface\n"
- u"set interface state loop0 up\n\n"
- )
- dut2_scripts[cnf].write(
- f"ip route add {if1_ip_addr}/8 via "
- f"{ip_address(if2_ip_addr) + cnf + 100} memif1/{cnf + 1}\n\n"
- )
-
- for tnl in range(0, n_tunnels):
- cnf = tnl % n_instances
- ckey = getattr(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)), u"hex"
- )
- integ = u""
- ikey = getattr(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)), u"hex"
- )
- if integ_alg:
- integ = (
- f"integ-alg {integ_alg.alg_name} "
- f"local-integ-key {ikey} "
- f"remote-integ-key {ikey} "
- )
- # Configure tunnel end point(s) on left side
- dut1_scripts[cnf].write(
- u"set interface ip address loop0 "
- f"{ip_address(if1_ip_addr) + tnl * addr_incr}/32\n"
- f"create ipsec tunnel "
- f"local-ip {ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"local-spi {spi_1 + tnl} "
- f"remote-ip {ip_address(if2_ip_addr) + cnf} "
- f"remote-spi {spi_2 + tnl} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"local-crypto-key {ckey} "
- f"remote-crypto-key {ckey} "
- f"instance {tnl // n_instances} "
- f"salt 0x0 "
- f"{integ} \n"
- f"set interface unnumbered ipip{tnl // n_instances} use loop0\n"
- f"set interface state ipip{tnl // n_instances} up\n"
- f"ip route add {ip_address(raddr_ip2)+tnl}/32 "
- f"via ipip{tnl // n_instances}\n\n"
- )
- # Configure tunnel end point(s) on right side
- dut2_scripts[cnf].write(
- f"set ip neighbor memif1/{cnf + 1} "
- f"{ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"02:02:00:00:{17:02X}:{cnf:02X} static\n"
- f"create ipsec tunnel local-ip {ip_address(if2_ip_addr) + cnf} "
- f"local-spi {spi_2 + tnl} "
- f"remote-ip {ip_address(if1_ip_addr) + tnl * addr_incr} "
- f"remote-spi {spi_1 + tnl} "
- f"crypto-alg {crypto_alg.alg_name} "
- f"local-crypto-key {ckey} "
- f"remote-crypto-key {ckey} "
- f"instance {tnl // n_instances} "
- f"salt 0x0 "
- f"{integ}\n"
- f"set interface unnumbered ipip{tnl // n_instances} "
- f"use memif1/{cnf + 1}\n"
- f"set interface state ipip{tnl // n_instances} up\n"
- f"ip route add {ip_address(raddr_ip1) + tnl}/32 "
- f"via ipip{tnl // n_instances}\n\n"
- )
-
- IPsecUtil._close_and_copy_ipsec_script_files(
- u"DUT1", nodes, n_instances, dut1_scripts)
- IPsecUtil._close_and_copy_ipsec_script_files(
- u"DUT2", nodes, n_instances, dut2_scripts)
-
@staticmethod
def vpp_ipsec_add_multiple_tunnels(
- nodes, interface1, interface2, n_tunnels, crypto_alg, integ_alg,
- tunnel_ip1, tunnel_ip2, raddr_ip1, raddr_ip2, raddr_range,
- tunnel_addr_incr=True):
+ nodes: dict,
+ interface1: Union[str, int],
+ interface2: Union[str, int],
+ n_tunnels: int,
+ crypto_alg: CryptoAlg,
+ integ_alg: Optional[IntegAlg],
+ tunnel_ip1: str,
+ tunnel_ip2: str,
+ raddr_ip1: str,
+ raddr_ip2: str,
+ raddr_range: int,
+ tunnel_addr_incr: bool = True,
+ ) -> None:
"""Create multiple IPsec tunnels between two VPP nodes.
:param nodes: VPP nodes to create tunnels.
@@ -2143,15 +1924,15 @@ class IPsecUtil:
:param tunnel_addr_incr: Enable or disable tunnel IP address
incremental step.
:type nodes: dict
- :type interface1: str or int
- :type interface2: str or int
+ :type interface1: Union[str, int]
+ :type interface2: Union[str, int]
:type n_tunnels: int
:type crypto_alg: CryptoAlg
:type integ_alg: Optional[IntegAlg]
:type tunnel_ip1: str
:type tunnel_ip2: str
- :type raddr_ip1: string
- :type raddr_ip2: string
+ :type raddr_ip1: str
+ :type raddr_ip2: str
:type raddr_range: int
:type tunnel_addr_incr: bool
"""
@@ -2162,145 +1943,252 @@ class IPsecUtil:
sa_id_2 = 200000
spi_1 = 300000
spi_2 = 400000
- dut1_local_outbound_range = ip_network(f"{tunnel_ip1}/8", False).\
- with_prefixlen
- dut1_remote_outbound_range = ip_network(f"{tunnel_ip2}/8", False).\
- with_prefixlen
crypto_key = gen_key(
IPsecUtil.get_crypto_alg_key_len(crypto_alg)
).decode()
- integ_key = gen_key(
- IPsecUtil.get_integ_alg_key_len(integ_alg)
- ).decode() if integ_alg else u""
+ integ_key = (
+ gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)).decode()
+ if integ_alg
+ else ""
+ )
- rmac = Topology.get_interface_mac(nodes[u"DUT2"], interface2) \
- if u"DUT2" in nodes.keys() \
- else Topology.get_interface_mac(nodes[u"TG"], interface2)
+ rmac = (
+ Topology.get_interface_mac(nodes["DUT2"], interface2)
+ if "DUT2" in nodes.keys()
+ else Topology.get_interface_mac(nodes["TG"], interface2)
+ )
IPsecUtil.vpp_ipsec_set_ip_route(
- nodes[u"DUT1"], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2,
- interface1, raddr_range, rmac)
-
- IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT1"], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT1"], spd_id, interface1)
- IPsecUtil.vpp_ipsec_add_spd_entry(
- nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
- proto=50, laddr_range=dut1_local_outbound_range,
- raddr_range=dut1_remote_outbound_range
+ nodes["DUT1"],
+ n_tunnels,
+ tunnel_ip1,
+ raddr_ip2,
+ tunnel_ip2,
+ interface1,
+ raddr_range,
+ rmac,
)
- IPsecUtil.vpp_ipsec_add_spd_entry(
- nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
- proto=50, laddr_range=dut1_remote_outbound_range,
- raddr_range=dut1_local_outbound_range
+
+ IPsecUtil.vpp_ipsec_add_spd(nodes["DUT1"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes["DUT1"], spd_id, interface1)
+
+ addr_incr = (
+ 1 << (128 - 96)
+ if ip_address(tunnel_ip1).version == 6
+ else 1 << (32 - 24)
)
+ for i in range(n_tunnels // (addr_incr**2) + 1):
+ dut1_local_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip1) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+ dut1_remote_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip2) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT1"],
+ spd_id,
+ p_hi,
+ PolicyAction.BYPASS,
+ inbound=False,
+ proto=50,
+ laddr_range=dut1_local_outbound_range,
+ raddr_range=dut1_remote_outbound_range,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT1"],
+ spd_id,
+ p_hi,
+ PolicyAction.BYPASS,
+ inbound=True,
+ proto=50,
+ laddr_range=dut1_remote_outbound_range,
+ raddr_range=dut1_local_outbound_range,
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT1"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip1, tunnel_ip2, tunnel_addr_incr
+ nodes["DUT1"],
+ n_tunnels,
+ sa_id_1,
+ spi_1,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip1,
+ tunnel_ip2,
+ tunnel_addr_incr,
)
IPsecUtil.vpp_ipsec_add_spd_entries(
- nodes[u"DUT1"], n_tunnels, spd_id, priority=ObjIncrement(p_lo, 0),
- action=PolicyAction.PROTECT, inbound=False,
+ nodes["DUT1"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=PolicyAction.PROTECT,
+ inbound=False,
sa_id=ObjIncrement(sa_id_1, 1),
- raddr_range=NetworkIncrement(ip_network(raddr_ip2))
+ raddr_range=NetworkIncrement(ip_network(raddr_ip2)),
)
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT1"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip2, tunnel_ip1, tunnel_addr_incr
+ nodes["DUT1"],
+ n_tunnels,
+ sa_id_2,
+ spi_2,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip2,
+ tunnel_ip1,
+ tunnel_addr_incr,
)
IPsecUtil.vpp_ipsec_add_spd_entries(
- nodes[u"DUT1"], n_tunnels, spd_id, priority=ObjIncrement(p_lo, 0),
- action=PolicyAction.PROTECT, inbound=True,
+ nodes["DUT1"],
+ n_tunnels,
+ spd_id,
+ priority=ObjIncrement(p_lo, 0),
+ action=PolicyAction.PROTECT,
+ inbound=True,
sa_id=ObjIncrement(sa_id_2, 1),
- raddr_range=NetworkIncrement(ip_network(raddr_ip1))
+ raddr_range=NetworkIncrement(ip_network(raddr_ip1)),
)
- if u"DUT2" in nodes.keys():
- rmac = Topology.get_interface_mac(nodes[u"DUT1"], interface1)
+ if "DUT2" in nodes.keys():
+ rmac = Topology.get_interface_mac(nodes["DUT1"], interface1)
IPsecUtil.vpp_ipsec_set_ip_route(
- nodes[u"DUT2"], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1,
- interface2, raddr_range, rmac)
-
- IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT2"], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT2"], spd_id, interface2)
- IPsecUtil.vpp_ipsec_add_spd_entry(
- nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS,
- inbound=False, proto=50, laddr_range=dut1_remote_outbound_range,
- raddr_range=dut1_local_outbound_range
- )
- IPsecUtil.vpp_ipsec_add_spd_entry(
- nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS,
- inbound=True, proto=50, laddr_range=dut1_local_outbound_range,
- raddr_range=dut1_remote_outbound_range
+ nodes["DUT2"],
+ n_tunnels,
+ tunnel_ip2,
+ raddr_ip1,
+ tunnel_ip1,
+ interface2,
+ raddr_range,
+ rmac,
)
+ IPsecUtil.vpp_ipsec_add_spd(nodes["DUT2"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes["DUT2"], spd_id, interface2)
+ for i in range(n_tunnels // (addr_incr**2) + 1):
+ dut2_local_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip1) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+ dut2_remote_outbound_range = ip_network(
+ f"{ip_address(tunnel_ip2) + i*(addr_incr**3)}/8", False
+ ).with_prefixlen
+
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT2"],
+ spd_id,
+ p_hi,
+ PolicyAction.BYPASS,
+ inbound=False,
+ proto=50,
+ laddr_range=dut2_remote_outbound_range,
+ raddr_range=dut2_local_outbound_range,
+ )
+ IPsecUtil.vpp_ipsec_add_spd_entry(
+ nodes["DUT2"],
+ spd_id,
+ p_hi,
+ PolicyAction.BYPASS,
+ inbound=True,
+ proto=50,
+ laddr_range=dut2_local_outbound_range,
+ raddr_range=dut2_remote_outbound_range,
+ )
+
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT2"], n_tunnels, sa_id_1, spi_1, crypto_alg,
- crypto_key, integ_alg, integ_key, tunnel_ip1, tunnel_ip2,
- tunnel_addr_incr
+ nodes["DUT2"],
+ n_tunnels,
+ sa_id_1,
+ spi_1,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip1,
+ tunnel_ip2,
+ tunnel_addr_incr,
)
IPsecUtil.vpp_ipsec_add_spd_entries(
- nodes[u"DUT2"], n_tunnels, spd_id,
+ nodes["DUT2"],
+ n_tunnels,
+ spd_id,
priority=ObjIncrement(p_lo, 0),
- action=PolicyAction.PROTECT, inbound=True,
+ action=PolicyAction.PROTECT,
+ inbound=True,
sa_id=ObjIncrement(sa_id_1, 1),
- raddr_range=NetworkIncrement(ip_network(raddr_ip2))
+ raddr_range=NetworkIncrement(ip_network(raddr_ip2)),
)
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes[u"DUT2"], n_tunnels, sa_id_2, spi_2, crypto_alg,
- crypto_key, integ_alg, integ_key, tunnel_ip2, tunnel_ip1,
- tunnel_addr_incr
+ nodes["DUT2"],
+ n_tunnels,
+ sa_id_2,
+ spi_2,
+ crypto_alg,
+ crypto_key,
+ integ_alg,
+ integ_key,
+ tunnel_ip2,
+ tunnel_ip1,
+ tunnel_addr_incr,
)
IPsecUtil.vpp_ipsec_add_spd_entries(
- nodes[u"DUT2"], n_tunnels, spd_id,
+ nodes["DUT2"],
+ n_tunnels,
+ spd_id,
priority=ObjIncrement(p_lo, 0),
- action=PolicyAction.PROTECT, inbound=False,
+ action=PolicyAction.PROTECT,
+ inbound=False,
sa_id=ObjIncrement(sa_id_2, 1),
- raddr_range=NetworkIncrement(ip_network(raddr_ip1))
+ raddr_range=NetworkIncrement(ip_network(raddr_ip1)),
)
@staticmethod
- def vpp_ipsec_show_all(node):
+ def vpp_ipsec_show_all(node: dict) -> None:
"""Run "show ipsec all" debug CLI command.
:param node: Node to run command on.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, u"show ipsec all")
+ PapiSocketExecutor.run_cli_cmd(node, "show ipsec all")
@staticmethod
- def show_ipsec_security_association(node):
+ def show_ipsec_security_association(node: dict) -> None:
"""Show IPSec security association.
:param node: DUT node.
:type node: dict
"""
- cmds = [
- u"ipsec_sa_v3_dump"
- ]
- PapiSocketExecutor.dump_and_log(node, cmds)
+ cmd = "ipsec_sa_v5_dump"
+ PapiSocketExecutor.dump_and_log(node, [cmd])
@staticmethod
- def vpp_ipsec_flow_enale_rss(node, proto, type, function="default"):
+ def vpp_ipsec_flow_enable_rss(
+ node: dict, proto: str, rss_type: str, function: str = "default"
+ ) -> int:
"""Ipsec flow enable rss action.
:param node: DUT node.
:param proto: The flow protocol.
- :param type: RSS type.
+ :param rss_type: RSS type.
:param function: RSS function.
:type node: dict
:type proto: str
- :type type: str
+ :type rss_type: str
:type function: str
:returns: flow_index.
+ :rtype: int
"""
# TODO: to be fixed to use full PAPI when it is ready in VPP
- cmd = f"test flow add src-ip any proto {proto} rss function " \
- f"{function} rss types {type}"
+ cmd = (
+ f"test flow add src-ip any proto {proto} rss function"
+ f" {function} rss types {rss_type}"
+ )
stdout = PapiSocketExecutor.run_cli_cmd(node, cmd)
flow_index = stdout.split()[1]
@@ -2308,7 +2196,8 @@ class IPsecUtil:
@staticmethod
def vpp_create_ipsec_flows_on_dut(
- node, n_flows, rx_queues, spi_start, interface):
+ node: dict, n_flows: int, rx_queues: int, spi_start: int, interface: str
+ ) -> None:
"""Create mutiple ipsec flows and enable flows onto interface.
:param node: DUT node.
@@ -2322,13 +2211,12 @@ class IPsecUtil:
:type rx_queues: int
:type spi_start: int
:type interface: str
- :returns: flow_index.
"""
for i in range(0, n_flows):
- rx_queue = i%rx_queues
-
+ rx_queue = i % rx_queues
spi = spi_start + i
flow_index = FlowUtil.vpp_create_ip4_ipsec_flow(
- node, "ESP", spi, "redirect-to-queue", value=rx_queue)
+ node, "ESP", spi, "redirect-to-queue", value=rx_queue
+ )
FlowUtil.vpp_flow_enable(node, interface, flow_index)
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 4a53f71a01..ff013307bc 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,18 +13,19 @@
"""Interface util library."""
+from json import loads
from time import sleep
from enum import IntEnum
from ipaddress import ip_address
from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.L2Util import L2Util
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-from resources.libraries.python.parsers.JsonParser import JsonParser
from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.VPPUtil import VPPUtil
@@ -295,6 +296,21 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
+ def set_interface_xdp_off(node, pf_pcis):
+ """Detaches any currently attached XDP/BPF program from the specified
+ interfaces.
+
+ :param node: Topology node.
+ :param pf_pcis: List of node's interfaces PCI addresses.
+ :type nodes: dict
+ :type pf_pcis: list
+ """
+ for pf_pci in pf_pcis:
+ pf_eth = InterfaceUtil.pci_to_eth(node, pf_pci)
+ cmd = f"ip link set dev {pf_eth} xdp off"
+ exec_cmd_no_error(node, cmd, sudo=True)
+
+ @staticmethod
def set_interface_flow_control(node, pf_pcis, rxf=u"off", txf=u"off"):
"""Set Ethernet flow control for specified interfaces.
@@ -332,11 +348,13 @@ class InterfaceUtil:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_set_interface_mtu(node, interface, mtu=9200):
- """Set Ethernet MTU on interface.
+ def vpp_set_interface_mtu(node, interface, mtu):
+ """Apply new MTU value to a VPP hardware interface.
+
+ The interface should be down when this is called.
:param node: VPP node.
- :param interface: Interface to setup MTU. Default: 9200.
+ :param interface: Interface to set MTU on.
:param mtu: Ethernet MTU size in Bytes.
:type node: dict
:type interface: str or int
@@ -346,18 +364,11 @@ class InterfaceUtil:
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
-
cmd = u"hw_interface_set_mtu"
err_msg = f"Failed to set interface MTU on host {node[u'host']}"
- args = dict(
- sw_if_index=sw_if_index,
- mtu=int(mtu)
- )
- try:
- with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_reply(err_msg)
- except AssertionError as err:
- logger.debug(f"Setting MTU failed.\n{err}")
+ args = dict(sw_if_index=sw_if_index, mtu=int(mtu))
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_node_interfaces_ready_wait(node, retries=15):
@@ -712,9 +723,8 @@ class InterfaceUtil:
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code) != 0:
raise RuntimeError(u"Get interface name and MAC failed")
- tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
- interfaces = JsonParser().parse_data(tmp)
+ interfaces = loads("{" + stdout.rstrip().replace("\n", ",") + "}")
for interface in node[u"interfaces"].values():
name = interfaces.get(interface[u"mac_address"])
if name is None:
@@ -882,7 +892,7 @@ class InterfaceUtil:
err_msg = f"Failed to set VXLAN bypass on interface " \
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args).get_replies(err_msg)
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vxlan_dump(node, interface=None):
@@ -1056,7 +1066,7 @@ class InterfaceUtil:
:raises RuntimeError: if it is unable to create GTPU interface on the
node.
"""
- cmd = u"gtpu_add_del_tunnel"
+ cmd = u"gtpu_add_del_tunnel_v2"
args = dict(
is_add=True,
src_address=IPAddress.create_ip_address_object(
@@ -1067,8 +1077,10 @@ class InterfaceUtil:
),
mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
encap_vrf_id=0,
- decap_next_index=2,
- teid=teid
+ decap_next_index=2, # ipv4
+ teid=teid,
+ # pdu_extension: Unused, false by default.
+ # qfi: Irrelevant when pdu_extension is not used.
)
err_msg = f"Failed to create GTPU tunnel interface " \
f"on host {node[u'host']}"
@@ -1311,7 +1323,7 @@ class InterfaceUtil:
node, u"set logging class af_xdp level debug"
)
- cmd = u"af_xdp_create"
+ cmd = u"af_xdp_create_v3"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1363,7 +1375,7 @@ class InterfaceUtil:
node, u"set logging class rdma level debug"
)
- cmd = u"rdma_create_v3"
+ cmd = u"rdma_create_v4"
pci_addr = Topology.get_interface_pci_addr(node, if_key)
args = dict(
name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1376,6 +1388,8 @@ class InterfaceUtil:
no_multi_seg=False,
max_pktlen=0,
# TODO: Apply desired RSS flags.
+ # rss4 kept 0 (auto) as API default.
+ # rss6 kept 0 (auto) as API default.
)
err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
@@ -1840,7 +1854,7 @@ class InterfaceUtil:
DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
# Initialize PCI VFs.
- DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
+ DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs=numvfs)
if not numvfs:
if osi_layer == u"L2":
@@ -1869,12 +1883,20 @@ class InterfaceUtil:
node, pf_dev, state=u"up"
)
- DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
- DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
+ vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, vf_pci_addr.replace(":", r"\:")
+ )
+ if current_driver:
+ DUTSetup.pci_vf_driver_unbind(
+ node, pf_pci_addr, vf_id
+ )
+ DUTSetup.pci_vf_driver_bind(
+ node, pf_pci_addr, vf_id, uio_driver
+ )
# Add newly created ports into topology file
vf_ifc_name = f"{ifc_key}_vif"
- vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
Topology.update_interface_name(
node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
@@ -1971,7 +1993,7 @@ class InterfaceUtil:
thread_data = VPPUtil.vpp_show_threads(node)
worker_cnt = len(thread_data) - 1
if not worker_cnt:
- return None
+ return
worker_ids = list()
if workers:
for item in thread_data:
@@ -1995,7 +2017,7 @@ class InterfaceUtil:
@staticmethod
def vpp_round_robin_rx_placement_on_all_duts(
- nodes, prefix, workers=None):
+ nodes, prefix, use_dp_cores=False):
"""Set Round Robin interface RX placement on worker threads
on all DUTs.
@@ -2006,14 +2028,18 @@ class InterfaceUtil:
:param nodes: Topology nodes.
:param prefix: Interface name prefix.
- :param workers: Comma separated worker index numbers intended for
- dataplane work.
+ :param use_dp_cores: Limit to dataplane cores.
:type nodes: dict
:type prefix: str
- :type workers: str
+ :type use_dp_cores: bool
"""
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ for node_name, node in nodes.items():
+ if node["type"] == NodeType.DUT:
+ workers = None
+ if use_dp_cores:
+ workers = BuiltIn().get_variable_value(
+ f"${{{node_name}_cpu_dp}}"
+ )
InterfaceUtil.vpp_round_robin_rx_placement(
node, prefix, workers
)
diff --git a/resources/libraries/python/Iperf3.py b/resources/libraries/python/Iperf3.py
index 12d0633abc..a881ec9f06 100644
--- a/resources/libraries/python/Iperf3.py
+++ b/resources/libraries/python/Iperf3.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -34,11 +34,9 @@ class Iperf3:
self._c_affinity = None
@staticmethod
- def get_iperf_type(node):
+ def get_iperf_type():
"""Log and return the installed traffic generator type.
- :param node: Node from topology file.
- :type node: dict
:returns: Traffic generator type string.
:rtype: str
"""
diff --git a/resources/libraries/python/L2Util.py b/resources/libraries/python/L2Util.py
index 0f00787bd4..92c93ed9dd 100644
--- a/resources/libraries/python/L2Util.py
+++ b/resources/libraries/python/L2Util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -156,7 +156,7 @@ class L2Util:
:type learn: bool
:type arp_term: bool
"""
- cmd = u"bridge_domain_add_del"
+ cmd = u"bridge_domain_add_del_v2"
err_msg = f"Failed to create L2 bridge domain on host {node[u'host']}"
args = dict(
bd_id=int(bd_id),
@@ -222,7 +222,7 @@ class L2Util:
sw_if_index1 = Topology.get_interface_sw_index(node, port_1)
sw_if_index2 = Topology.get_interface_sw_index(node, port_2)
- cmd1 = u"bridge_domain_add_del"
+ cmd1 = u"bridge_domain_add_del_v2"
args1 = dict(
bd_id=int(bd_id),
flood=True,
@@ -254,8 +254,10 @@ class L2Util:
f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3)
- papi_exec.get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd1, **args1).get_reply(err_msg)
+ papi_exec.add(cmd2, **args2).get_reply(err_msg)
+ papi_exec.add(cmd2, **args3).get_reply(err_msg)
@staticmethod
def vpp_setup_bidirectional_cross_connect(node, interface1, interface2):
@@ -293,7 +295,9 @@ class L2Util:
f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd, **args1).get_reply(err_msg)
+ papi_exec.add(cmd, **args2).get_reply(err_msg)
@staticmethod
def vpp_setup_bidirectional_l2_patch(node, interface1, interface2):
@@ -331,7 +335,9 @@ class L2Util:
f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
+ # Cannot use get_replies due to VPP-2203.
+ papi_exec.add(cmd, **args1).get_reply(err_msg)
+ papi_exec.add(cmd, **args2).get_reply(err_msg)
@staticmethod
def linux_add_bridge(node, br_name, if_1, if_2, set_up=True):
diff --git a/resources/libraries/python/LispSetup.py b/resources/libraries/python/LispSetup.py
index 6579764596..9e3ef97aa3 100644
--- a/resources/libraries/python/LispSetup.py
+++ b/resources/libraries/python/LispSetup.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
diff --git a/resources/libraries/python/LoadBalancerUtil.py b/resources/libraries/python/LoadBalancerUtil.py
index 6810122257..471bc87e80 100644
--- a/resources/libraries/python/LoadBalancerUtil.py
+++ b/resources/libraries/python/LoadBalancerUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Intel and/or its affiliates.
+# Copyright (c) 2023 Intel and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -108,9 +108,10 @@ class LoadBalancerUtil:
target_port = kwargs.pop(u"target_port", 0)
node_port = kwargs.pop(u"node_port", 0)
new_len = kwargs.pop(u"new_len", 1024)
+ src_ip_sticky = kwargs.pop(u"src_ip_sticky", 0)
is_del = kwargs.pop(u"is_del", 0)
- cmd = u"lb_add_del_vip"
+ cmd = u"lb_add_del_vip_v2"
err_msg = f"Failed to add vip on host {node[u'host']}"
vip_addr = ip_address(vip_addr).packed
@@ -127,7 +128,8 @@ class LoadBalancerUtil:
target_port=target_port,
node_port=node_port,
new_flows_table_length=int(new_len),
- is_del=is_del
+ src_ip_sticky=src_ip_sticky,
+ is_del=is_del,
)
with PapiSocketExecutor(node) as papi_exec:
diff --git a/resources/libraries/python/MLRsearch/AbstractMeasurer.py b/resources/libraries/python/MLRsearch/AbstractMeasurer.py
deleted file mode 100644
index da66b4e174..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractMeasurer.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractMeasurer class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractMeasurer(metaclass=ABCMeta):
- """Abstract class defining common API for measurement providers."""
-
- @abstractmethod
- def measure(self, duration, transmit_rate):
- """Perform trial measurement and return the result.
-
- :param duration: Trial duration [s].
- :param transmit_rate: Target transmit rate [tps].
- :type duration: float
- :type transmit_rate: float
- :returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
diff --git a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
deleted file mode 100644
index cca48ef798..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractSearchAlgorithm class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractSearchAlgorithm(metaclass=ABCMeta):
- """Abstract class defining common API for search algorithms."""
-
- def __init__(self, measurer):
- """Store the rate provider.
-
- :param measurer: Object able to perform trial or composite measurements.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- """
- self.measurer = measurer
-
- @abstractmethod
- def narrow_down_intervals(
- self, min_rate, max_rate, packet_loss_ratios):
- """Perform measurements to narrow down intervals, return them.
-
- :param min_rate: Minimal target transmit rate [tps].
- Usually, tests are set to fail if search reaches this or below.
- :param max_rate: Maximal target transmit rate [tps].
- Usually computed from line rate and various other limits,
- to prevent failures or duration stretching in Traffic Generator.
- :param packet_loss_ratios: Ratios of packet loss to search for,
- e.g. [0.0, 0.005] for NDR and PDR.
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratios: Iterable[float]
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: List[ReceiveRateInterval.ReceiveRateInterval]
- """
diff --git a/resources/libraries/python/MLRsearch/MeasurementDatabase.py b/resources/libraries/python/MLRsearch/MeasurementDatabase.py
deleted file mode 100644
index 2f601d6260..0000000000
--- a/resources/libraries/python/MLRsearch/MeasurementDatabase.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MeasurementDatabase class."""
-
-from .ReceiveRateInterval import ReceiveRateInterval
-from .PerDurationDatabase import PerDurationDatabase
-
-
-class MeasurementDatabase:
- """A structure holding measurement results.
-
- The implementation uses a dict from duration values
- to PerDurationDatabase instances.
-
- Several utility methods are added, accomplishing tasks useful for MLRsearch.
-
- This class contains the "find tightest bounds" parts of logic required
- by MLRsearch. One exception is lack of any special handling for maximal
- or minimal rates.
- """
-
- def __init__(self, measurements):
- """Store measurement results in per-duration databases.
-
- TODO: Move processing to a factory method,
- keep constructor only to store (presumably valid) values.
-
- If the measurements argument contains is a dict,
- the constructor assumes it contains the processed databases.
-
- :param measurements: The measurement results to store.
- :type measurements: Iterable[ReceiveRateMeasurement]
- """
- if isinstance(measurements, dict):
- self.data_for_duration = measurements
- else:
- self.data_for_duration = dict()
- # TODO: There is overlap with add() code. Worth extracting?
- for measurement in measurements:
- duration = measurement.duration
- if duration in self.data_for_duration:
- self.data_for_duration[duration].add(measurement)
- else:
- self.data_for_duration[duration] = PerDurationDatabase(
- duration, [measurement]
- )
- durations = sorted(self.data_for_duration.keys())
- self.current_duration = durations[-1] if duration else None
- self.previous_duration = durations[-2] if len(durations) > 1 else None
-
- def __repr__(self):
- """Return string executable to get equivalent instance.
-
- :returns: Code to construct equivalent instance.
- :rtype: str
- """
- return f"MeasurementDatabase(measurements={self.data_for_duration!r})"
-
- def set_current_duration(self, duration):
- """Remember what MLRsearch considers the current duration.
-
- Setting the same duration is allowed, setting smaller is not allowed.
-
- :param duration: Target trial duration of current phase, in seconds.
- :type duration: float
- :raises ValueError: If the duration is smaller than previous.
- """
- if duration < self.current_duration:
- raise ValueError(
- f"Duration {duration} shorter than current duration"
- f" {self.current_duration}"
- )
- if duration > self.current_duration:
- self.previous_duration = self.current_duration
- self.current_duration = duration
- self.data_for_duration[duration] = PerDurationDatabase(
- duration, list()
- )
- # Else no-op.
-
- def add(self, measurement):
- """Add a measurement. Duration has to match the set one.
-
- :param measurement: Measurement result to add to the database.
- :type measurement: ReceiveRateMeasurement
- """
- duration = measurement.duration
- if duration != self.current_duration:
- raise ValueError(
- f"{measurement!r} duration different than"
- f" {self.current_duration}"
- )
- self.data_for_duration[duration].add(measurement)
-
- def get_bounds(self, ratio):
- """Return 6 bounds: lower/upper, current/previous, tightest/second.
-
- Second tightest bounds are only returned for current duration.
- None instead of a measurement if there is no measurement of that type.
-
- The result cotains bounds in this order:
- 1. Tightest lower bound for current duration.
- 2. Tightest upper bound for current duration.
- 3. Tightest lower bound for previous duration.
- 4. Tightest upper bound for previous duration.
- 5. Second tightest lower bound for current duration.
- 6. Second tightest upper bound for current duration.
-
- :param ratio: Target ratio, valid has to be lower or equal.
- :type ratio: float
- :returns: Measurements acting as various bounds.
- :rtype: 6-tuple of Optional[PerDurationDatabase]
- """
- cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2 = [None] * 6
- duration = self.current_duration
- if duration is not None:
- data = self.data_for_duration[duration]
- cur_lo1, cur_hi1, cur_lo2, cur_hi2 = data.get_valid_bounds(ratio)
- duration = self.previous_duration
- if duration is not None:
- data = self.data_for_duration[duration]
- pre_lo, pre_hi, _, _ = data.get_valid_bounds(ratio)
- return cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2
-
- def get_results(self, ratio_list):
- """Return list of intervals for given ratios, from current duration.
-
- Attempt to construct valid intervals. If a valid bound is missing,
- use smallest/biggest target_tr for lower/upper bound.
- This can result in degenerate intervals.
-
- :param ratio_list: Ratios to create intervals for.
- :type ratio_list: Iterable[float]
- :returns: List of intervals.
- :rtype: List[ReceiveRateInterval]
- """
- ret_list = list()
- current_data = self.data_for_duration[self.current_duration]
- for ratio in ratio_list:
- lower_bound, upper_bound, _, _, _, _ = self.get_bounds(ratio)
- if lower_bound is None:
- lower_bound = current_data.measurements[0]
- if upper_bound is None:
- upper_bound = current_data.measurements[-1]
- ret_list.append(ReceiveRateInterval(lower_bound, upper_bound))
- return ret_list
diff --git a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
deleted file mode 100644
index 0e6c8cfa58..0000000000
--- a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
+++ /dev/null
@@ -1,485 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MultipleLossRatioSearch class."""
-
-import logging
-import math
-import time
-
-from .MeasurementDatabase import MeasurementDatabase
-from .ProgressState import ProgressState
-from .ReceiveRateInterval import ReceiveRateInterval
-from .WidthArithmetics import (
- multiply_relative_width,
- step_down,
- step_up,
- multiple_step_down,
- multiple_step_up,
- half_step_up,
-)
-
-
-class MultipleLossRatioSearch:
- """Optimized binary search algorithm for finding bounds for multiple ratios.
-
- This is unofficially a subclass of AbstractSearchAlgorithm,
- but constructor signature is different.
-
- Traditional binary search algorithm needs initial interval
- (lower and upper bound), and returns final interval after bisecting
- (until some exit condition is met).
- The exit condition is usually related to the interval width,
- (upper bound value minus lower bound value).
-
- The optimized algorithm contains several improvements
- aimed to reduce overall search time.
-
- One improvement is searching for multiple intervals at once.
- The intervals differ by the target loss ratio. Lower bound
- has to have equal or smaller loss ratio, upper bound has to have larger.
-
- Next improvement is that the initial interval does not need to be valid.
- Imagine initial interval (10, 11) where loss at 11 is smaller
- than the searched ratio.
- The algorithm will try (11, 13) interval next, and if 13 is still smaller,
- (13, 17) and so on, doubling width until the upper bound is valid.
- The part when interval expands is called external search,
- the part when interval is bisected is called internal search.
-
- Next improvement is that trial measurements at small trial duration
- can be used to find a reasonable interval for full trial duration search.
- This results in more trials performed, but smaller overall duration
- in general.
-
- Next improvement is bisecting in logarithmic quantities,
- so that exit criteria can be independent of measurement units.
-
- Next improvement is basing the initial interval on receive rates.
-
- Final improvement is exiting early if the minimal value
- is not a valid lower bound.
-
- The complete search consist of several phases,
- each phase performing several trial measurements.
- Initial phase creates initial interval based on receive rates
- at maximum rate and at maximum receive rate (MRR).
- Final phase and preceding intermediate phases are performing
- external and internal search steps,
- each resulting interval is the starting point for the next phase.
- The resulting intervals of final phase is the result of the whole algorithm.
-
- Each non-initial phase uses its own trial duration.
- Any non-initial phase stops searching (for all ratios independently)
- when minimum is not a valid lower bound (at current duration),
- or all of the following is true:
- Both bounds are valid, bounds are measured at the current phase
- trial duration, interval width is less than the width goal
- for current phase.
-
- TODO: Review and update this docstring according to rst docs.
- """
-
- def __init__(
- self, measurer, final_relative_width=0.005,
- final_trial_duration=30.0, initial_trial_duration=1.0,
- number_of_intermediate_phases=2, timeout=600.0, debug=None,
- expansion_coefficient=2.0):
- """Store the measurer object and additional arguments.
-
- :param measurer: Rate provider to use by this search object.
- :param final_relative_width: Final lower bound transmit rate
- cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
- :param initial_trial_duration: Trial duration for the initial phase
- and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
- to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
- :param debug: Callable to use instead of logging.debug().
- :param expansion_coefficient: External search multiplies width by this.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- :type final_relative_width: float
- :type final_trial_duration: float
- :type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
- :type debug: Optional[Callable[[str], None]]
- :type expansion_coefficient: float
- """
- self.measurer = measurer
- self.final_trial_duration = float(final_trial_duration)
- self.final_relative_width = float(final_relative_width)
- self.number_of_intermediate_phases = int(number_of_intermediate_phases)
- self.initial_trial_duration = float(initial_trial_duration)
- self.timeout = float(timeout)
- self.state = None
- self.debug = logging.debug if debug is None else debug
- self.expansion_coefficient = float(expansion_coefficient)
-
- def narrow_down_intervals(self, min_rate, max_rate, packet_loss_ratios):
- """Perform initial phase, create state object, proceed with next phases.
-
- The current implementation requires the ratios so be unique and sorted.
- Also non-empty.
-
- :param min_rate: Minimal target transmit rate [tps].
- :param max_rate: Maximal target transmit rate [tps].
- :param packet_loss_ratios: Target ratios of packets loss to locate.
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratios: Iterable[float]
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: List[ReceiveRateInterval]
- :raises RuntimeError: If total duration is larger than timeout.
- Or if ratios list is (empty or) not sorted or unique.
- """
- min_rate = float(min_rate)
- max_rate = float(max_rate)
- packet_loss_ratios = [float(ratio) for ratio in packet_loss_ratios]
- if len(packet_loss_ratios) < 1:
- raise RuntimeError(u"At least one ratio is required!")
- if packet_loss_ratios != sorted(set(packet_loss_ratios)):
- raise RuntimeError(u"Input ratios have to be sorted and unique!")
- measurements = list()
- self.debug(f"First measurement at max rate: {max_rate}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=max_rate,
- )
- measurements.append(measured)
- initial_width_goal = self.final_relative_width
- for _ in range(self.number_of_intermediate_phases):
- initial_width_goal = multiply_relative_width(
- initial_width_goal, 2.0
- )
- max_lo = step_down(max_rate, initial_width_goal)
- mrr = max(min_rate, min(max_lo, measured.relative_receive_rate))
- self.debug(f"Second measurement at mrr: {mrr}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=mrr,
- )
- measurements.append(measured)
- # Attempt to get narrower width.
- if measured.loss_ratio > packet_loss_ratios[0]:
- max_lo = step_down(mrr, initial_width_goal)
- mrr2 = min(max_lo, measured.relative_receive_rate)
- else:
- mrr2 = step_up(mrr, initial_width_goal)
- if min_rate < mrr2 < max_rate:
- self.debug(f"Third measurement at mrr2: {mrr2}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=mrr2,
- )
- measurements.append(measured)
- # If mrr2 > mrr and mrr2 got zero loss,
- # it is better to do external search from mrr2 up.
- # To prevent bisection between mrr2 and max_rate,
- # we simply remove the max_rate measurement.
- # Similar logic applies to higher loss ratio goals.
- # Overall, with mrr2 measurement done, we never need
- # the first measurement done at max rate.
- measurements = measurements[1:]
- database = MeasurementDatabase(measurements)
- stop_time = time.monotonic() + self.timeout
- self.state = ProgressState(
- database, self.number_of_intermediate_phases,
- self.final_trial_duration, self.final_relative_width,
- packet_loss_ratios, min_rate, max_rate, stop_time
- )
- self.ndrpdr()
- return self.state.database.get_results(ratio_list=packet_loss_ratios)
-
- def ndrpdr(self):
- """Perform trials for this phase. State is updated in-place.
-
- Recursion to smaller durations is performed (if not performed yet).
-
- :raises RuntimeError: If total duration is larger than timeout.
- """
- state = self.state
- if state.phases > 0:
- # We need to finish preceding intermediate phases first.
- saved_phases = state.phases
- state.phases -= 1
- # Preceding phases have shorter duration.
- saved_duration = state.duration
- duration_multiplier = state.duration / self.initial_trial_duration
- phase_exponent = float(state.phases) / saved_phases
- state.duration = self.initial_trial_duration * math.pow(
- duration_multiplier, phase_exponent
- )
- # Shorter durations do not need that narrow widths.
- saved_width = state.width_goal
- state.width_goal = multiply_relative_width(saved_width, 2.0)
- # Recurse.
- self.ndrpdr()
- # Restore the state for current phase.
- state.width_goal = saved_width
- state.duration = saved_duration
- state.phases = saved_phases # Not needed, but just in case.
- self.debug(
- f"Starting phase with {state.duration} duration"
- f" and {state.width_goal} relative width goal."
- )
- failing_fast = False
- database = state.database
- database.set_current_duration(state.duration)
- while time.monotonic() < state.stop_time:
- for index, ratio in enumerate(state.packet_loss_ratios):
- new_tr = self._select_for_ratio(ratio)
- if new_tr is None:
- # Either this ratio is fine, or min rate got invalid result.
- # If fine, we will continue to handle next ratio.
- if index > 0:
- # First ratio passed, all next have a valid lower bound.
- continue
- lower_bound, _, _, _, _, _ = database.get_bounds(ratio)
- if lower_bound is None:
- failing_fast = True
- self.debug(u"No valid lower bound for this iteration.")
- break
- # First ratio is fine.
- continue
- # We have transmit rate to measure at.
- # We do not check duration versus stop_time here,
- # as some measurers can be unpredictably faster
- # than what duration suggests.
- measurement = self.measurer.measure(
- duration=state.duration,
- transmit_rate=new_tr,
- )
- database.add(measurement)
- # Restart ratio handling on updated database.
- break
- else:
- # No ratio needs measuring, we are done with this phase.
- self.debug(u"Phase done.")
- break
- # We have broken out of the for loop.
- if failing_fast:
- # Abort the while loop early.
- break
- # Not failing fast but database got updated, restart the while loop.
- else:
- # Time is up.
- raise RuntimeError(u"Optimized search takes too long.")
- # Min rate is not valid, but returning what we have
- # so next duration can recover.
-
- @staticmethod
- def improves(new_bound, lower_bound, upper_bound):
- """Return whether new bound improves upon old bounds.
-
- To improve, new_bound has to be not None,
- and between the old bounds (where the bound is not None).
-
- This piece of logic is commonly used, when we know old bounds
- from a primary source (e.g. current duration database)
- and new bound from a secondary source (e.g. previous duration database).
- Having a function allows "if improves(..):" construction to save space.
-
- :param new_bound: The bound we consider applying.
- :param lower_bound: Known bound, new_bound has to be higher to apply.
- :param upper_bound: Known bound, new_bound has to be lower to apply.
- :type new_bound: Optional[ReceiveRateMeasurement]
- :type lower_bound: Optional[ReceiveRateMeasurement]
- :type upper_bound: Optional[ReceiveRateMeasurement]
- :returns: Whether we can apply the new bound.
- :rtype: bool
- """
- if new_bound is None:
- return False
- if lower_bound is not None:
- if new_bound.target_tr <= lower_bound.target_tr:
- return False
- if upper_bound is not None:
- if new_bound.target_tr >= upper_bound.target_tr:
- return False
- return True
-
- def _select_for_ratio(self, ratio):
- """Return None or new target_tr to measure at.
-
- Returning None means either we have narrow enough valid interval
- for this ratio, or we are hitting min rate and should fail early.
-
- :param ratio: Loss ratio to ensure narrow valid bounds for.
- :type ratio: float
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- :raises RuntimeError: If database inconsistency is detected.
- """
- state = self.state
- data = state.database
- bounds = data.get_bounds(ratio)
- cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2 = bounds
- pre_lo_improves = self.improves(pre_lo, cur_lo1, cur_hi1)
- pre_hi_improves = self.improves(pre_hi, cur_lo1, cur_hi1)
- # TODO: Detect also the other case for initial bisect, see below.
- if pre_lo_improves and pre_hi_improves:
- # We allowed larger width for previous phase
- # as single bisect here guarantees only one re-measurement.
- new_tr = self._bisect(pre_lo, pre_hi)
- if new_tr is not None:
- self.debug(f"Initial bisect for {ratio}, tr: {new_tr}")
- return new_tr
- if pre_lo_improves:
- new_tr = pre_lo.target_tr
- self.debug(f"Re-measuring lower bound for {ratio}, tr: {new_tr}")
- return new_tr
- if pre_hi_improves:
- # This can also happen when we did not do initial bisect
- # for this ratio yet, but the previous duration lower bound
- # for this ratio got already re-measured as previous duration
- # upper bound for previous ratio.
- new_tr = pre_hi.target_tr
- self.debug(f"Re-measuring upper bound for {ratio}, tr: {new_tr}")
- return new_tr
- if cur_lo1 is None and cur_hi1 is None:
- raise RuntimeError(u"No results found in databases!")
- if cur_lo1 is None:
- # Upper bound exists (cur_hi1).
- # We already tried previous lower bound.
- # So, we want to extend down.
- new_tr = self._extend_down(
- cur_hi1, cur_hi2, pre_hi, second_needed=False
- )
- self.debug(
- f"Extending down for {ratio}:"
- f" old {cur_hi1.target_tr} new {new_tr}"
- )
- return new_tr
- if cur_hi1 is None:
- # Lower bound exists (cur_lo1).
- # We already tried previous upper bound.
- # So, we want to extend up.
- new_tr = self._extend_up(cur_lo1, cur_lo2, pre_lo)
- self.debug(
- f"Extending up for {ratio}:"
- f" old {cur_lo1.target_tr} new {new_tr}"
- )
- return new_tr
- # Both bounds exist (cur_lo1 and cur_hi1).
- # cur_lo1 might have been selected for this ratio (we are bisecting)
- # or for previous ratio (we are extending down for this ratio).
- # Compute both estimates and choose the higher value.
- bisected_tr = self._bisect(cur_lo1, cur_hi1)
- extended_tr = self._extend_down(
- cur_hi1, cur_hi2, pre_hi, second_needed=True
- )
- # Only if both are not None we need to decide.
- if bisected_tr and extended_tr and extended_tr > bisected_tr:
- self.debug(
- f"Extending down for {ratio}:"
- f" old {cur_hi1.target_tr} new {extended_tr}"
- )
- new_tr = extended_tr
- else:
- self.debug(
- f"Bisecting for {ratio}: lower {cur_lo1.target_tr},"
- f" upper {cur_hi1.target_tr}, new {bisected_tr}"
- )
- new_tr = bisected_tr
- return new_tr
-
- def _extend_down(self, cur_hi1, cur_hi2, pre_hi, second_needed=False):
- """Return extended width below, or None if hitting min rate.
-
- If no second tightest (nor previous) upper bound is available,
- the behavior is governed by second_needed argument.
- If true, return None. If false, start from width goal.
- This is useful, as if a bisect is possible,
- we want to give it a chance.
-
- :param cur_hi1: Tightest upper bound for current duration. Has to exist.
- :param cur_hi2: Second tightest current upper bound, may not exist.
- :param pre_hi: Tightest upper bound, previous duration, may not exist.
- :param second_needed: Whether second tightest bound is required.
- :type cur_hi1: ReceiveRateMeasurement
- :type cur_hi2: Optional[ReceiveRateMeasurement]
- :type pre_hi: Optional[ReceiveRateMeasurement]
- :type second_needed: bool
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- """
- state = self.state
- old_tr = cur_hi1.target_tr
- if state.min_rate >= old_tr:
- self.debug(u"Extend down hits min rate.")
- return None
- next_bound = cur_hi2
- if self.improves(pre_hi, cur_hi1, cur_hi2):
- next_bound = pre_hi
- if next_bound is None and second_needed:
- return None
- old_width = state.width_goal
- if next_bound is not None:
- old_width = ReceiveRateInterval(cur_hi1, next_bound).rel_tr_width
- old_width = max(old_width, state.width_goal)
- new_tr = multiple_step_down(
- old_tr, old_width, self.expansion_coefficient
- )
- new_tr = max(new_tr, state.min_rate)
- return new_tr
-
- def _extend_up(self, cur_lo1, cur_lo2, pre_lo):
- """Return extended width above, or None if hitting max rate.
-
- :param cur_lo1: Tightest lower bound for current duration. Has to exist.
- :param cur_lo2: Second tightest current lower bound, may not exist.
- :param pre_lo: Tightest lower bound, previous duration, may not exist.
- :type cur_lo1: ReceiveRateMeasurement
- :type cur_lo2: Optional[ReceiveRateMeasurement]
- :type pre_lo: Optional[ReceiveRateMeasurement]
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- """
- state = self.state
- old_tr = cur_lo1.target_tr
- if state.max_rate <= old_tr:
- self.debug(u"Extend up hits max rate.")
- return None
- next_bound = cur_lo2
- if self.improves(pre_lo, cur_lo2, cur_lo1):
- next_bound = pre_lo
- old_width = state.width_goal
- if next_bound is not None:
- old_width = ReceiveRateInterval(cur_lo1, next_bound).rel_tr_width
- old_width = max(old_width, state.width_goal)
- new_tr = multiple_step_up(old_tr, old_width, self.expansion_coefficient)
- new_tr = min(new_tr, state.max_rate)
- return new_tr
-
- def _bisect(self, lower_bound, upper_bound):
- """Return middle rate or None if width is narrow enough.
-
- :param lower_bound: Measurement to use as a lower bound. Has to exist.
- :param upper_bound: Measurement to use as an upper bound. Has to exist.
- :type lower_bound: ReceiveRateMeasurement
- :type upper_bound: ReceiveRateMeasurement
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- :raises RuntimeError: If database inconsistency is detected.
- """
- state = self.state
- width = ReceiveRateInterval(lower_bound, upper_bound).rel_tr_width
- if width <= state.width_goal:
- self.debug(u"No more bisects needed.")
- return None
- new_tr = half_step_up(lower_bound.target_tr, width, state.width_goal)
- return new_tr
diff --git a/resources/libraries/python/MLRsearch/PerDurationDatabase.py b/resources/libraries/python/MLRsearch/PerDurationDatabase.py
deleted file mode 100644
index afdf48614b..0000000000
--- a/resources/libraries/python/MLRsearch/PerDurationDatabase.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining PerDurationDatabase class."""
-
-import copy
-
-
-class PerDurationDatabase:
- """List-like structure holding measurement results for one duration.
-
- This is a building block for MeasurementDatabase.
-
- This class hold measurements for single target duration value only,
- so the logic is quite simple.
-
- Several utility methods are added, accomplishing tasks useful for MLRsearch
- (to be called by MeasurementDatabase).
- """
-
- def __init__(self, duration, measurements):
- """Store (deep copy of) measurement results and normalize them.
-
- The results have to have the corresponding target duration,
- and there should be no duplicate target_tr values.
- Empty iterable (zero measurements) is an acceptable input.
-
- :param duration: All measurements have to have this target duration [s].
- :param measurements: The measurement results to store.
- :type duration: float
- :type measurements: Iterable[ReceiveRateMeasurement]
- :raises ValueError: If duration does not match or if TR duplicity.
- """
- self.duration = duration
- self.measurements = [copy.deepcopy(meas) for meas in measurements]
- self._normalize()
-
- def __repr__(self):
- """Return string executable to get equivalent instance.
-
- :returns: Code to construct equivalent instance.
- :rtype: str
- """
- return (
- u"PerDurationDatabase("
- f"duration={self.duration!r},"
- f"measurements={self.measurements!r})"
- )
-
- def _normalize(self):
- """Sort by target_tr, fail on detecting duplicate target_tr.
-
- Also set effective loss ratios.
-
- :raises ValueError: If duration does not match or if TR duplicity.
- """
- measurements = self.measurements
- measurements.sort(key=lambda measurement: measurement.target_tr)
- # Detect duplicated TRs.
- previous_tr = None
- for measurement in measurements:
- current_tr = measurement.target_tr
- if current_tr == previous_tr:
- raise ValueError(
- u"Transmit rate conflict:"
- f" {measurement!r} {previous_tr!r}"
- )
- previous_tr = current_tr
- # Update effective ratios.
- ratio_previous = None
- for measurement in measurements:
- if ratio_previous is None:
- ratio_previous = measurement.loss_ratio
- measurement.effective_loss_ratio = ratio_previous
- continue
- ratio_previous = max(ratio_previous, measurement.loss_ratio)
- measurement.effective_loss_ratio = ratio_previous
-
- def add(self, measurement):
- """Add measurement and normalize.
-
- :param measurement: Measurement result to add to the database.
- :type measurement: ReceiveRateMeasurement
- """
- # TODO: We should deepcopy either everywhere or nowhere.
- self.measurements.append(measurement)
- self._normalize()
-
- def get_valid_bounds(self, ratio):
- """Return None or a valid measurement for two tightest bounds.
-
- The validity of a measurement to act as a bound is determined
- by comparing the argument ratio with measurement's effective loss ratio.
-
- Both lower and upper bounds are returned, both tightest and second
- tightest. If some value is not available, None is returned instead.
-
- :param ratio: Target ratio, valid has to be lower or equal.
- :type ratio: float
- :returns: Tightest lower bound, tightest upper bound,
- second tightest lower bound, second tightest upper bound.
- :rtype: 4-tuple of Optional[ReceiveRateMeasurement]
- """
- lower_1, upper_1, lower_2, upper_2 = None, None, None, None
- for measurement in self.measurements:
- if measurement.effective_loss_ratio > ratio:
- if upper_1 is None:
- upper_1 = measurement
- continue
- upper_2 = measurement
- break
- lower_1, lower_2 = measurement, lower_1
- return lower_1, upper_1, lower_2, upper_2
diff --git a/resources/libraries/python/MLRsearch/ProgressState.py b/resources/libraries/python/MLRsearch/ProgressState.py
deleted file mode 100644
index 3610638990..0000000000
--- a/resources/libraries/python/MLRsearch/ProgressState.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ProgressState class."""
-
-
-class ProgressState:
- """Structure containing data to be passed around in recursion.
-
- This is basically a private class of MultipleRatioSearch,
- but keeping it in a separate file makes things more readable.
- """
-
- def __init__(
- self, database, phases, duration, width_goal, packet_loss_ratios,
- min_rate, max_rate, stop_time):
- """Convert and store the argument values.
-
- Also initializa the stored width for external search.
-
- :param result: Structure containing measured results.
- :param phases: How many intermediate phases to perform
- before the current one.
- :param duration: Trial duration to use in the current phase [s].
- :param width_goal: The goal relative width for the curreent phase.
- :param packet_loss_ratios: List of ratios for the current search.
- :param min_rate: Minimal target transmit rate available
- for the current search [tps].
- :param max_rate: Maximal target transmit rate available
- for the current search [tps].
- :param stop_time: Monotonic time [s] when we should fail on timeout.
- :type result: MeasurementDatabase
- :type phases: int
- :type duration: float
- :type width_goal: float
- :type packet_loss_ratios: Iterable[float]
- :type min_rate: float
- :type max_rate: float
- :type stop_time: float
- """
- self.database = database
- self.phases = int(phases)
- self.duration = float(duration)
- self.width_goal = float(width_goal)
- self.packet_loss_ratios = [
- float(ratio) for ratio in packet_loss_ratios
- ]
- self.min_rate = float(min_rate)
- self.max_rate = float(max_rate)
- self.stop_time = float(stop_time)
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
deleted file mode 100644
index 993561e396..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateInterval class."""
-
-import math
-
-
-class ReceiveRateInterval:
- """Structure defining two Rr measurements, and their relation."""
-
- def __init__(self, measured_low, measured_high):
- """Store the bound measurements and call sort.
-
- :param measured_low: Measurement for the lower bound.
- :param measured_high: Measurement for the upper bound.
- :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
- self.measured_low = measured_low
- self.measured_high = measured_high
- # Declare secondary quantities to appease pylint.
- self.abs_tr_width = None
- """Absolute width of target transmit rate. Upper minus lower."""
- self.rel_tr_width = None
- """Relative width of target transmit rate. Absolute divided by upper."""
- self.sort()
-
- def sort(self):
- """Sort bounds by target Tr, compute secondary quantities."""
- if self.measured_low.target_tr > self.measured_high.target_tr:
- self.measured_low, self.measured_high = (
- self.measured_high, self.measured_low
- )
- self.abs_tr_width = (
- self.measured_high.target_tr - self.measured_low.target_tr
- )
- self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
-
- def __str__(self):
- """Return string as half-open interval."""
- return f"[{self.measured_low!s};{self.measured_high!s})"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \
- f"measured_high={self.measured_high!r})"
-
- def width_in_goals(self, relative_width_goal):
- """Return float value.
-
- Relative width goal is some (negative) value on logarithmic scale.
- Current relative width is another logarithmic value.
- Return the latter divided by the former.
- This is useful when investigating how did surprising widths come to be.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Current width as logarithmic multiple of goal width [1].
- :rtype: float
- """
- return math.log(1.0 - self.rel_tr_width) / math.log(
- 1.0 - relative_width_goal)
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
deleted file mode 100644
index c52934530e..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateMeasurement class."""
-
-
-class ReceiveRateMeasurement:
- """Structure defining the result of single Rr measurement."""
-
- def __init__(
- self, duration, target_tr, transmit_count, loss_count,
- approximated_duration=0.0, partial_transmit_count=0,
- effective_loss_ratio=None):
- """Constructor, normalize primary and compute secondary quantities.
-
- If approximated_duration is nonzero, it is stored.
- If approximated_duration is zero, duration value is stored.
- Either way, additional secondary quantities are computed
- from the store value.
-
- If there is zero transmit_count, ratios are set to zero.
-
- In some cases, traffic generator does not attempt all the needed
- transactions. In that case, nonzero partial_transmit_count
- holds (an estimate of) count of the actually attempted transactions.
- This is used to populate some secondary quantities.
-
- TODO: Use None instead of zero?
-
- Field effective_loss_ratio is specific for use in MLRsearch,
- where measurements with lower loss ratio at higher target_tr
- cannot be relied upon if there is a measurement with higher loss ratio
- at lower target_tr. In this case, the higher loss ratio from
- other measurement is stored as effective loss ratio in this measurement.
- If None, the computed loss ratio of this measurement is used.
- If not None, the computed ratio can still be apllied if it is larger.
-
- :param duration: Measurement duration [s].
- :param target_tr: Target transmit rate [pps].
- If bidirectional traffic is measured, this is bidirectional rate.
- :param transmit_count: Number of packets transmitted [1].
- :param loss_count: Number of packets transmitted but not received [1].
- :param approximated_duration: Estimate of the actual time of the trial.
- :param partial_transmit_count: Estimate count of actually attempted
- transactions.
- :param effective_loss_ratio: None or highest loss ratio so far.
- :type duration: float
- :type target_tr: float
- :type transmit_count: int
- :type loss_count: int
- :type approximated_duration: float
- :type partial_transmit_count: int
- """
- self.duration = float(duration)
- self.target_tr = float(target_tr)
- self.transmit_count = int(transmit_count)
- self.loss_count = int(loss_count)
- self.receive_count = transmit_count - loss_count
- self.transmit_rate = transmit_count / self.duration
- self.loss_rate = loss_count / self.duration
- self.receive_rate = self.receive_count / self.duration
- self.loss_ratio = (
- float(self.loss_count) / self.transmit_count
- if self.transmit_count > 0 else 1.0
- )
- self.effective_loss_ratio = self.loss_ratio
- if effective_loss_ratio is not None:
- if effective_loss_ratio > self.loss_ratio:
- self.effective_loss_ratio = float(effective_loss_ratio)
- self.receive_ratio = (
- float(self.receive_count) / self.transmit_count
- if self.transmit_count > 0 else 0.0
- )
- self.approximated_duration = (
- float(approximated_duration) if approximated_duration
- else self.duration
- )
- self.approximated_receive_rate = (
- self.receive_count / self.approximated_duration
- if self.approximated_duration > 0.0 else 0.0
- )
- # If the traffic generator is unreliable and sends less packets,
- # the absolute receive rate might be too low for next target.
- self.partial_transmit_count = (
- int(partial_transmit_count) if partial_transmit_count
- else self.transmit_count
- )
- self.partial_receive_ratio = (
- float(self.receive_count) / self.partial_transmit_count
- if self.partial_transmit_count > 0 else 0.0
- )
- self.partial_receive_rate = (
- self.target_tr * self.partial_receive_ratio
- )
- # We use relative packet ratios in order to support cases
- # where target_tr is in transactions per second,
- # but there are multiple packets per transaction.
- self.relative_receive_rate = (
- self.target_tr * self.receive_count / self.transmit_count
- )
-
- def __str__(self):
- """Return string reporting input and loss ratio."""
- return f"d={self.duration!s},Tr={self.target_tr!s}," \
- f"Df={self.loss_ratio!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateMeasurement(duration={self.duration!r}," \
- f"target_tr={self.target_tr!r}," \
- f"transmit_count={self.transmit_count!r}," \
- f"loss_count={self.loss_count!r}," \
- f"approximated_duration={self.approximated_duration!r}," \
- f"partial_transmit_count={self.partial_transmit_count!r}," \
- f"effective_loss_ratio={self.effective_loss_ratio!r})"
diff --git a/resources/libraries/python/MLRsearch/WidthArithmetics.py b/resources/libraries/python/MLRsearch/WidthArithmetics.py
deleted file mode 100644
index 21316c5441..0000000000
--- a/resources/libraries/python/MLRsearch/WidthArithmetics.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining utility functions for manipulating intervals."""
-
-import math
-
-
-ROUNDING_CONSTANT = 0.999999
-
-def multiply_relative_width(relative_width, coefficient):
- """Return relative width corresponding to multiplied logarithmic width.
-
- The multiplication happens in logarithmic space,
- so the resulting relative width is always less than 1.
-
- :param relative_width: The base relative width to multiply.
- :param coefficient: Multiply by this in logarithmic space.
- :type relative_width: float
- :type coefficient: float
- :returns: The relative width of multiplied logarithmic size.
- :rtype: float
- """
- old_log_width = math.log(1.0 - relative_width)
- # Slight decrease to prevent rounding errors from prolonging the search.
- # TODO: Make the nines configurable.
- new_log_width = old_log_width * coefficient * ROUNDING_CONSTANT
- return 1.0 - math.exp(new_log_width)
-
-def halve_relative_width(relative_width, goal_width):
- """Return relative width corresponding to half logarithmic width.
-
- The logic attempts to save some halvings in future by performing
- uneven split. If rounding error risk is detected,
- even split is used.
-
- :param relative_width: The base relative width to halve.
- :param goal_width: Width goal for final phase.
- :type relative_width: float
- :type goal_width: float
- :returns: The relative width of half logarithmic size.
- :rtype: float
- """
- fallback_width = 1.0 - math.sqrt(1.0 - relative_width)
- # Wig means Width In Goals.
- wig = math.log(1.0 - relative_width) / math.log(1.0 - goal_width)
- cwig = 2.0 * math.ceil(wig / 2.0)
- fwig = 2.0 * math.ceil(wig * ROUNDING_CONSTANT / 2.0)
- if wig <= 2.0 or cwig != fwig:
- # Avoid too uneven splits.
- return fallback_width
- coefficient = cwig / 2
- new_width = multiply_relative_width(goal_width, coefficient)
- return new_width
-
-def step_down(current_bound, relative_width):
- """Return rate of logarithmic width below.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to use.
- :type current_bound: float
- :type relative_width: float
- :returns: Transmit rate smaller by relative width [pps].
- :rtype: float
- """
- return current_bound * (1.0 - relative_width)
-
-def step_up(current_bound, relative_width):
- """Return rate of logarithmic width above.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to use.
- :type current_bound: float
- :type relative_width: float
- :returns: Transmit rate larger by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound / (1.0 - relative_width)
-
-def multiple_step_down(current_bound, relative_width, coefficient):
- """Return rate of multiplied logarithmic width below.
-
- The multiplication happens in logarithmic space,
- so the resulting applied relative width is always less than 1.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :param coefficient: Multiply by this in logarithmic space.
- :type relative_width: float
- :type current_bound: float
- :type coefficient: float
- :returns: Transmit rate smaller by logarithmically multiplied width [pps].
- :rtype: float
- """
- new_width = multiply_relative_width(relative_width, coefficient)
- return step_down(current_bound, new_width)
-
-def multiple_step_up(current_bound, relative_width, coefficient):
- """Return rate of double logarithmic width above.
-
- The multiplication happens in logarithmic space,
- so the resulting applied relative width is always less than 1.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to double.
- :param coefficient: Multiply by this in logarithmic space.
- :type current_bound: float
- :type relative_width: float
- :type coefficient: float
- :returns: Transmit rate larger by logarithmically multiplied width [pps].
- :rtype: float
- """
- new_width = multiply_relative_width(relative_width, coefficient)
- return step_up(current_bound, new_width)
-
-def half_step_up(current_bound, relative_width, goal_width):
- """Return rate of half logarithmic width above.
-
- :param relative_width: The base relative width to halve.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically half width [pps].
- :rtype: float
- """
- new_width = halve_relative_width(relative_width, goal_width)
- return step_up(current_bound, new_width)
diff --git a/resources/libraries/python/MLRsearch/__init__.py b/resources/libraries/python/MLRsearch/__init__.py
index 35ef812179..09ce7e6719 100644
--- a/resources/libraries/python/MLRsearch/__init__.py
+++ b/resources/libraries/python/MLRsearch/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,3 +14,17 @@
"""
__init__ file for Python package "MLRsearch".
"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+# Import user-facing (API) stuff, so users do not need to know submodules.
+from .config import Config
+from .goal_result import GoalResult
+from .multiple_loss_ratio_search import MultipleLossRatioSearch
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .trial_measurement import AbstractMeasurer, MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/candidate.py b/resources/libraries/python/MLRsearch/candidate.py
new file mode 100644
index 0000000000..16bbe60bae
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/candidate.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Candidate class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from functools import total_ordering
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .selector import Selector
+
+
+@total_ordering
+@dataclass(frozen=True)
+class Candidate:
+ """Class describing next trial inputs, as nominated by a selector.
+
+ As each selector is notified by the controller when its nominated load
+ becomes the winner, a reference to the selector is also included here.
+
+ The rest of the code focuses on defining the ordering between candidates.
+ When two instances are compared, the lesser has higher priority
+ for choosing which trial is actually performed next.
+
+ As Python implicitly converts values to bool in many places
+ (e.g. in "if" statement), any instance is called "truthy" if it converts
+ to True, and "falsy" if it converts to False.
+ To make such places nice and readable, __bool__ method is implemented
+ in a way that a candidate instance is falsy if its load is None.
+ As a falsy candidate never gets measured,
+ other fields of a falsy instance are irrelevant.
+ """
+
+ load: Optional[DiscreteLoad] = None
+ """Measure at this intended load. None if no load nominated by selector."""
+ duration: float = None
+ """Trial duration as chosen by the selector."""
+ width: Optional[DiscreteWidth] = None
+ """Set the global width to this when this candidate becomes the winner."""
+ selector: Selector = None
+ """Reference to the selector instance which nominated this candidate."""
+
+ def __str__(self) -> str:
+ """Convert trial inputs into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"d={self.duration},l={self.load}"
+
+ def __eq__(self, other: Candidate) -> bool:
+ """Return wheter self is identical to the other candidate.
+
+ This is just a pretense for total ordering wrapper to work.
+ In reality, MLRsearch shall never test equivalence,
+ so we save space by just raising RuntimeError if this is ever called.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if the instances are equivalent.
+ :rtype: bool
+ :raises RuntimeError: Always, to prevent unintended usage.
+ """
+ raise RuntimeError("Candidate equality comparison shall not be needed.")
+
+ def __lt__(self, other: Candidate) -> bool:
+ """Return whether self should be measured before other.
+
+ In the decreasing order of importance:
+ Non-None load is preferred.
+ Self is less than other when both loads are None.
+ Lower offered load is preferred.
+ Longer trial duration is preferred.
+ Non-none width is preferred.
+ Larger width is preferred.
+ Self is preferred.
+
+ The logic comes from the desire to save time and being conservative.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if self should be measured sooner.
+ :rtype: bool
+ """
+ if not self.load:
+ if other.load:
+ return False
+ return True
+ if not other.load:
+ return True
+ if self.load < other.load:
+ return True
+ if self.load > other.load:
+ return False
+ if self.duration > other.duration:
+ return True
+ if self.duration < other.duration:
+ return False
+ if not self.width:
+ if other.width:
+ return False
+ return True
+ if not other.width:
+ return True
+ return self.width >= other.width
+
+ def __bool__(self) -> bool:
+ """Does this candidate choose to perform any trial measurement?
+
+ :returns: True if yes, it does choose to perform.
+ :rtype: bool
+ """
+ return bool(self.load)
+
+ @staticmethod
+ def nomination_from(selector: Selector) -> Candidate:
+ """Call nominate on selector, wrap into Candidate instance to return.
+
+ We avoid dependency cycle while letting candidate depend on selector,
+ therefore selector cannot know how to wrap its nomination
+ into a full candidate instance.
+ This factory method finishes the wrapping.
+
+ :param selector: Selector to call.
+ :type selector: Selector
+ :returns: Newly created Candidate instance with nominated trial inputs.
+ :rtype: Candidate
+ """
+ load, duration, width = selector.nominate()
+ return Candidate(
+ load=load,
+ duration=duration,
+ width=width,
+ selector=selector,
+ )
+
+ def won(self) -> None:
+ """Inform selector its candidate became a winner."""
+ self.selector.won(self.load)
diff --git a/resources/libraries/python/MLRsearch/config.py b/resources/libraries/python/MLRsearch/config.py
new file mode 100644
index 0000000000..7aa8ed75a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/config.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Config class."""
+
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Optional
+
+from .dataclass import DataclassProperty
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+
+
+@dataclass
+class Config:
+ """Structure containing several static config items.
+
+ The main MLRsearch algorithm uses multiple customizable values.
+ Pylint complains if the values appear as long argument lists
+ or multiple local variables.
+
+ This class offers a storage for values which do not contain
+ internally mutable state and are set at an unknown time
+ before the search starts. This way users can override only some values,
+ and do it over multiple calls.
+ All "official" user inputs are contained here.
+
+ Properties are defined to enforce the requirements on allowed values.
+ All fields have default values, so instances can be created without any.
+ It is still recommended to set all values after instantiation,
+ as the defaults may change in the next version.
+
+ As some relations between values of different fields are required,
+ users must take care to set them in the correct order.
+
+ For example, min_load has to be set to a value smaller
+ than the current value of max_load.
+ """
+
+ # Externally visible "fields" (but in fact redefined as properties).
+ goals: SearchGoalTuple = SearchGoalTuple((SearchGoal(),))
+ """Container holding search goals."""
+ min_load: float = 9001.0
+ """Each trial measurement must have intended load at least this [tps]."""
+ max_load: float = 1e9
+ """Each trial measurement must have intended load at most this [tps]."""
+ search_duration_max: float = 1200.0
+ """The search will end as a failure this long [s] after it is started."""
+ warmup_duration: float = 1.0
+ """If specified, one trial at max load and this duration is performed
+ before the usual search starts. None converts to zero and means no warmup.
+ The results of that one trial are ignored."""
+
+ @DataclassProperty
+ def goals(self) -> SearchGoalTuple:
+ """Return the reference to the current container of goals.
+
+ :returns: The current container instance.
+ :rtype: SearchGoalTuple
+ """
+ return self._goals
+
+ @goals.setter
+ def goals(self, goals: Iterable[SearchGoal]) -> None:
+ """Create and store the goal container.
+
+ :param goals: Search goals to add to the container to store.
+ :type goals: Iterable[SearchGoal]
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If any of the goals is not a SearchGoal.
+ """
+ self._goals = SearchGoalTuple(goals)
+
+ @DataclassProperty
+ def min_load(self) -> float:
+ """Getter for min load, no logic here.
+
+ :returns: Currently set minimal intended load [tps].
+ :rtype: float
+ """
+ return self._min_load
+
+ @min_load.setter
+ def min_load(self, load: float) -> None:
+ """Set min load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= 0.0:
+ raise ValueError(f"Min load {load} must be positive.")
+ # At the time init is first called, _max_load is not set yet.
+ if hasattr(self, "_max_load") and load >= self.max_load:
+ raise ValueError(f"Min load {load} must be smaller.")
+ self._min_load = load
+
+ @DataclassProperty
+ def max_load(self) -> float:
+ """Getter for max load, no logic here.
+
+ :returns: Currently set maximal intended load [tps].
+ :rtype: float
+ """
+ return self._max_load
+
+ @max_load.setter
+ def max_load(self, load: float) -> None:
+ """Set max load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= self.min_load:
+ raise ValueError(f"Max load {load} must be bigger.")
+ self._max_load = load
+
+ @DataclassProperty
+ def search_duration_max(self) -> float:
+ """Getter for max search duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._search_duration_max
+
+ @search_duration_max.setter
+ def search_duration_max(self, duration: float) -> None:
+ """Set max search duration after converting and checking value.
+
+ :param duration: Search duration maximum [s] to set.
+ :type duration: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ duration = float(duration)
+ if duration <= 0.0:
+ raise ValueError(f"Search duration max too small: {duration}")
+ self._search_duration_max = duration
+
+ @DataclassProperty
+ def warmup_duration(self) -> float:
+ """Getter for warmup duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._warmup_duration
+
+ @warmup_duration.setter
+ def warmup_duration(self, duration: Optional[float]) -> None:
+ """Set warmup duration after converting and checking value.
+
+ Zero duration is treated as None, meaning no warmup trial.
+
+ :param duration: Warmup duration [s] to set.
+ :type duration: Optional(float)
+ :raises ValueError: If the argument is found invalid.
+ """
+ if duration:
+ duration = float(duration)
+ if duration < 0.0:
+ raise ValueError(f"Warmup duration too small: {duration}")
+ else:
+ duration = 0.0
+ self._warmup_duration = duration
diff --git a/resources/libraries/python/parsers/__init__.py b/resources/libraries/python/MLRsearch/dataclass/__init__.py
index 1b58a3cf17..e546b090c9 100644
--- a/resources/libraries/python/parsers/__init__.py
+++ b/resources/libraries/python/MLRsearch/dataclass/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,5 +12,8 @@
# limitations under the License.
"""
-__init__ file for resources/libraries/python/parsers
+__init__ file for Python package "dataclass_property".
"""
+
+from .dc_property import DataclassProperty
+from .field import secondary_field
diff --git a/resources/libraries/python/MLRsearch/dataclass/dc_property.py b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
new file mode 100644
index 0000000000..7f3b49aeb8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DataclassProperty class.
+
+The main issue that needs support is dataclasses with properties
+(including setters) and with (immutable) default values.
+
+First, this explains how property ends up passed as default constructor value:
+https://florimond.dev/en/posts/2018/10/
+/reconciling-dataclasses-and-properties-in-python/
+TL;DR: By the time __init__ is generated, original class variable (type hint)
+is replaced by property (method definition).
+
+Second, there are ways to deal with that:
+https://stackoverflow.com/a/61480946
+TL;DR: It relies on the underscored field being replaced by the value.
+
+But that does not work for field which use default_factory (or no default)
+(the underscored class field is deleted instead).
+So another way is needed to cover those cases,
+ideally without the need to define both original and underscored field.
+
+This implementation relies on a fact that decorators are executed
+when the class fields do yet exist, and decorated function
+does know its name, so the decorator can get the value stored in
+the class field, and store it as an additional attribute of the getter function.
+Then for setter, the property contains the getter (as an unbound function),
+so it can access the additional attribute to get the value.
+
+This approach circumvents the precautions dataclasses take to prevent mishaps
+when a single mutable object is shared between multiple instances.
+So it is up to setters to create an appropriate copy of the default object
+if the default value is mutable.
+
+The default value cannot be MISSING nor Field nor DataclassProperty,
+otherwise the intended logic breaks.
+"""
+
+from __future__ import annotations
+
+from dataclasses import Field, MISSING
+from functools import wraps
+from inspect import stack
+from typing import Callable, Optional, TypeVar, Union
+
+
+Self = TypeVar("Self")
+"""Type for the dataclass instances being created using properties."""
+Value = TypeVar("Value")
+"""Type for the value the property (getter, setter) handles."""
+
+
+def _calling_scope_variable(name: str) -> Value:
+ """Get a variable from a higher scope.
+
+ This feels dirty, but without this the syntactic sugar
+ would not be sweet enough.
+
+ The implementation is copied from https://stackoverflow.com/a/14694234
+ with the difference of raising RuntimeError (instead of returning None)
+ if no variable of that name is found in any of the scopes.
+
+ :param name: Name of the variable to access.
+ :type name: str
+ :returns: The value of the found variable.
+ :rtype: Value
+ :raises RuntimeError: If the variable is not found in any calling scope.
+ """
+ frame = stack()[1][0]
+ while name not in frame.f_locals:
+ frame = frame.f_back
+ if frame is None:
+ raise RuntimeError(f"Field {name} value not found.")
+ return frame.f_locals[name]
+
+
+class DataclassProperty(property):
+ """Subclass of property, handles default values for dataclass fields.
+
+ If a dataclass field does not specify a default value (nor default_factory),
+ this is not needed, and in fact it will not work (so use built-in property).
+
+ This implementation seemlessly finds and inserts the default value
+ (can be mutable) into a new attribute of the getter function.
+ Before calling a setter function in init (recognized by type),
+ the default value is retrieved and passed transparently to the setter.
+ It is the responsibilty of the setter to appropriately clone the value,
+ in order to prevent multiple instances sharing the same mutable value.
+ """
+
+ def __init__(
+ self,
+ fget: Optional[Callable[[], Value]] = None,
+ fset: Optional[Callable[[Self, Value], None]] = None,
+ fdel: Optional[Callable[[], None]] = None,
+ doc: Optional[str] = None,
+ ):
+ """Find and store the default value, construct the property.
+
+ See this for how the superclass property works:
+ https://docs.python.org/3/howto/descriptor.html#properties
+
+ :param fget: Getter (unbound) function to use, if any.
+ :param fset: Setter (unbound) function to use, if any.
+ :param fdel: Deleter (unbound) function to use, if any.
+ :param doc: Docstring to display when examining the property.
+ :type fget: Optional[Callable[[Self], Value]]
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :type fdel: Optional[Callable[[Self], None]]
+ :type doc: Optional[str]
+ """
+ variable_found = _calling_scope_variable(fget.__name__)
+ if not isinstance(variable_found, DataclassProperty):
+ if isinstance(variable_found, Field):
+ if variable_found.default is not MISSING:
+ fget.default_value = variable_found.default
+ # Else do not store any default value.
+ else:
+ fget.default_value = variable_found
+ # Else this is the second time init is called (when setting setter),
+ # in which case the default is already stored into fget.
+ super().__init__(fget=fget, fset=fset, fdel=fdel, doc=doc)
+
+ def setter(
+ self,
+ fset: Optional[Callable[[Self, Value], None]],
+ ) -> DataclassProperty:
+ """Return new instance with a wrapped setter function set.
+
+ If the argument is None, call superclass method.
+
+ The wrapped function recognizes when it is called in init
+ (by the fact the value argument is of type DataclassProperty)
+ and in that case it extracts the stored default and passes that
+ to the user-defined setter function.
+
+ :param fset: Setter function to wrap and apply.
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :returns: New property instance with correct setter function set.
+ :rtype: DataclassProperty
+ """
+ if fset is None:
+ return super().setter(fset)
+
+ @wraps(fset)
+ def wrapped(sel_: Self, val: Union[Value, DataclassProperty]) -> None:
+ """Extract default from getter if needed, call the user setter.
+
+ The sel_ parameter is listed explicitly, to signify
+ this is an unbound function, not a bounded method yet.
+
+ :param sel_: Instance of dataclass (not of DataclassProperty)
+ to set the value on.
+ :param val: Set this value, or the default value stored there.
+ :type sel_: Self
+ :type val: Union[Value, DataclassProperty]
+ """
+ if isinstance(val, DataclassProperty):
+ val = val.fget.default_value
+ fset(sel_, val)
+
+ return super().setter(wrapped)
diff --git a/resources/libraries/python/MLRsearch/dataclass/field.py b/resources/libraries/python/MLRsearch/dataclass/field.py
new file mode 100644
index 0000000000..55d9d0879f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/field.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining secondary_field function.
+
+Just a shrothand for frequently repeated expression.
+
+The main point is that this dataclass field is not used in init.
+Maybe it is a derived value of a frozen dataclass.
+Maybe it is a cache to help avoiding repeated computation.
+Maybe it is a temporary value stored in one method and read in another method.
+In any case, the caller does not need to know it is here,
+so it is excluded from repr, hashing, ordering and similar.
+"""
+
+from dataclasses import Field, field
+
+
+def secondary_field() -> Field:
+ """Return newly created Field with non-default arguments
+
+ In practice, it seems to be fine to reuse the resulting Field instance
+ when defining multiple dataclass fields,
+ but we keep this as a function to improve readability.
+
+ :returns: A new Field instance useful for secondary fields.
+ :rtype: Field
+ """
+ return field(
+ default=None,
+ init=False,
+ repr=False,
+ compare=False,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_interval.py b/resources/libraries/python/MLRsearch/discrete_interval.py
new file mode 100644
index 0000000000..0a3bf443a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_interval.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteInterval class."""
+
+from dataclasses import dataclass
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+# TODO: Can this be frozen?
+@dataclass
+class DiscreteInterval:
+ """Interval class with more computations available.
+
+ Along discrete form of width,
+ a MLR specific way for halving the interval is also included.
+
+ The two primary field values do not have to be valid relevant bounds,
+ but at the end of the search, they usually are.
+
+ The load values must be round.
+ """
+
+ lower_bound: DiscreteLoad
+ """Value for the lower intended load (or load stats or similar)."""
+ upper_bound: DiscreteLoad
+ """Value for the higher intended load (or load stats or similar)."""
+ # Primary fields above, derived below.
+ discrete_width: DiscreteWidth = secondary_field()
+ """Discrete width between intended loads (upper_bound minus lower_bound)."""
+
+ def __post_init__(self) -> None:
+ """Sort bounds by intended load, compute secondary quantities.
+
+ :raises RuntimeError: If a result used non-rounded load.
+ """
+ if not self.lower_bound.is_round:
+ raise RuntimeError(f"Non-round lower bound: {self.lower_bound!r}")
+ if not self.upper_bound.is_round:
+ raise RuntimeError(f"Non-round upper bound: {self.upper_bound!r}")
+ if self.lower_bound > self.upper_bound:
+ tmp = self.lower_bound
+ self.lower_bound = self.upper_bound
+ self.upper_bound = tmp
+ self.discrete_width = self.upper_bound - self.lower_bound
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lower_bound=({self.lower_bound}),upper_bound=({self.upper_bound})"
+ )
+
+ # TODO: Use "target" instad of "goal" in argument and variable names.
+
+ def width_in_goals(self, goal: DiscreteWidth) -> float:
+ """Return relative width as a multiple of the given goal (int form).
+
+ Integer forms are used for computation, safe as loads are rounded.
+ The result is a float, as self int may not be divisible by goal int.
+
+ :param goal: A relative width amount to be used as a unit.
+ :type goal: DiscreteWidth
+ :returns: Self width in multiples of (integer form of) goal width.
+ :rtype: float
+ """
+ return int(self.discrete_width) / int(goal)
+
+ def middle(self, goal: DiscreteWidth) -> DiscreteLoad:
+ """Return new intended load (discrete form) in the middle.
+
+ All calculations are based on int forms.
+
+ One of the halfs is rounded to a power-of-two multiple of the goal.
+ The power that leads to most even split is used.
+ Lower width is the smaller one (if not exactly even).
+
+ This approach prefers lower loads (to remain conservative) and can save
+ some measurements (when all middle measurements have high loss).
+ Note that when competing with external search from above,
+ that search is already likely to produce widths that are
+ power-of-two multiples of the target width.
+
+ If the interval width is one goal (or less), RuntimeError is raised.
+ If the interval width is between one and two goals (not including),
+ a more even split is attempted (using half the goal value).
+
+ :param goal: Target width goal to use for uneven halving.
+ :type goal: DiscreteWidth
+ :returns: New load to use for bisecting.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ int_self, int_goal = int(self.discrete_width), int(goal)
+ if int_self <= int_goal:
+ raise RuntimeError(f"Do not halve small enough interval: {self!r}")
+ if int_self == 2 * int_goal:
+ # Even split, return here simplifies the while loop below.
+ return self.lower_bound + goal
+ if int_self < 2 * int_goal:
+ # This can only happen when int_goal >= 2.
+ # In this case, we do not have good enough split at this width goal,
+ # but maybe this is not the final target, so we can attempt
+ # a split at half width goal.
+ if not int_goal % 2:
+ return self.middle(goal=goal.half_rounded_down())
+ # Odd int_goal, so this must by the last phase. Do even split.
+ lo_width = self.discrete_width.half_rounded_down()
+ return self.lower_bound + lo_width
+ hi_width = goal
+ lo_width = self.discrete_width - hi_width
+ # We know lo_width > hi_width because we did not do the even split.
+ while 1:
+ hi2_width = hi_width * 2
+ lo2_width = self.discrete_width - hi2_width
+ if lo2_width <= hi2_width:
+ break
+ hi_width, lo_width = hi2_width, lo2_width
+ # Which of the two options is more even? Product decides.
+ if int(hi_width) * int(lo_width) > int(hi2_width) * int(lo2_width):
+ # Previous attempt was more even, but hi_width was the smaller one.
+ lo2_width = hi_width
+ # Else lo2_width is more even and no larger than hi2_width.
+ return self.lower_bound + lo2_width
diff --git a/resources/libraries/python/MLRsearch/discrete_load.py b/resources/libraries/python/MLRsearch/discrete_load.py
new file mode 100644
index 0000000000..a75b4acf96
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_load.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteLoad class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from functools import total_ordering
+from typing import Callable, Optional, Union
+
+from .load_rounding import LoadRounding
+from .discrete_width import DiscreteWidth
+
+
+@total_ordering
+@dataclass
+class DiscreteLoad:
+ """Structure to store load value together with its rounded integer form.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+ Conversion methods and factories are added for convenience.
+
+ In general, the float form is allowed to differ from conversion from int.
+
+ Comparisons are supported, acting on the float load component.
+ Additive operations are supported, acting on int form.
+ Multiplication by a float constant is supported, acting on float form.
+
+ As for all user defined classes by default, all instances are truthy.
+ That is useful when dealing with Optional values, as None is falsy.
+
+ This dataclass is effectively frozen, but cannot be marked as such
+ as that would prevent LoadStats from being its subclass.
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_load: float = None
+ """Float form of intended load [tps], usable for measurer."""
+ int_load: int = field(compare=False, default=None)
+ """Integer form, usable for exact computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float load to differ from
+ conversion from int load. MLRsearch should round explicitly later,
+ based on its additional information.
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_load is None and self.int_load is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_load is None:
+ self.int_load = int(self.int_load)
+ self.float_load = self.rounding.int2float(self.int_load)
+ else:
+ self.float_load = float(self.float_load)
+ self.int_load = self.rounding.float2int(self.float_load)
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_load={int(self)}"
+
+ # Explicit comparison operators.
+ # Those generated with dataclass order=True do not allow subclass instances.
+
+ def __eq__(self, other: Optional[DiscreteLoad]) -> bool:
+ """Return whether the other instance has the same float form.
+
+ None is effectively considered to be an unequal instance.
+
+ :param other: Other instance to compare to, or None.
+ :type other: Optional[DiscreteLoad]
+ :returns: True only if float forms are exactly equal.
+ :rtype: bool
+ """
+ if other is None:
+ return False
+ return float(self) == float(other)
+
+ def __lt__(self, other: DiscreteLoad) -> bool:
+ """Return whether self has smaller float form than the other instance.
+
+ None is not supported, as MLRsearch does not need that
+ (so when None appears we want to raise).
+
+ :param other: Other instance to compare to.
+ :type other: DiscreteLoad
+ :returns: True only if float forms of self is strictly smaller.
+ :rtype: bool
+ """
+ return float(self) < float(other)
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ @property
+ def is_round(self) -> bool:
+ """Return whether float load matches converted int load.
+
+ :returns: False if float load is not rounded.
+ :rtype: bool
+ """
+ expected = self.rounding.int2float(self.int_load)
+ return expected == self.float_load
+
+ def __int__(self) -> int:
+ """Return the int value.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_load
+
+ def __float__(self) -> float:
+ """Return the float value.
+
+ :returns: The float field value [tps].
+ :rtype: float
+ """
+ return self.float_load
+
+ @staticmethod
+ def int_conver(rounding: LoadRounding) -> Callable[[int], DiscreteLoad]:
+ """Return a factory that turns an int load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from int.
+ :rtype: Callable[[int], DiscreteLoad]
+ """
+
+ def factory_int(int_load: int) -> DiscreteLoad:
+ """Use rounding and int load to create discrete load.
+
+ :param int_load: Intended load in integer form.
+ :type int_load: int
+ :returns: New discrete load instance matching the int load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, int_load=int_load)
+
+ return factory_int
+
+ @staticmethod
+ def float_conver(rounding: LoadRounding) -> Callable[[float], DiscreteLoad]:
+ """Return a factory that turns a float load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from float.
+ :rtype: Callable[[float], DiscreteLoad]
+ """
+
+ def factory_float(float_load: float) -> DiscreteLoad:
+ """Use rounding instance and float load to create discrete load.
+
+ The float form is not rounded yet.
+
+ :param int_load: Intended load in float form [tps].
+ :type int_load: float
+ :returns: New discrete load instance matching the float load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, float_load=float_load)
+
+ return factory_float
+
+ def rounded_down(self) -> DiscreteLoad:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, int_load=int(self))
+
+ def hashable(self) -> DiscreteLoad:
+ """Return new equivalent instance.
+
+ This is mainly useful for conversion from unhashable subclasses,
+ such as LoadStats.
+ Rounding instance (reference) is copied from self.
+
+ :returns: New instance with values based on float form of self.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, float_load=float(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance with width added to int load.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding two loads by mistake
+ (or adding int to load and similar).
+
+ :param width: Value to add to int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load + int(width),
+ )
+
+ def __sub__(
+ self, other: Union[DiscreteWidth, DiscreteLoad]
+ ) -> Union[DiscreteLoad, DiscreteWidth]:
+ """Return result based on the argument type.
+
+ Load minus load is width, load minus width is load.
+ This allows the same operator to support both operations.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: Union[DiscreteWidth, DiscreteLoad]
+ :returns: Resulting width or load.
+ :rtype: Union[DiscreteLoad, DiscreteWidth]
+ :raises RuntimeError: If the argument type is not supported.
+ """
+ if isinstance(other, DiscreteWidth):
+ return self._minus_width(other)
+ if isinstance(other, DiscreteLoad):
+ return self._minus_load(other)
+ raise RuntimeError(f"Unsupported type {other!r}")
+
+ def _minus_width(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance, width subtracted from int load.
+
+ Rounding instance (reference) is copied from self.
+
+ :param width: Value to subtract from int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load - int(width),
+ )
+
+ def _minus_load(self, other: DiscreteLoad) -> DiscreteWidth:
+ """Return newly constructed width instance, difference of int loads.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: DiscreteLoad
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_load - int(other),
+ )
+
+ def __mul__(self, coefficient: float) -> DiscreteLoad:
+ """Return newly constructed instance, float load multiplied by argument.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Value to multiply float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ if not isinstance(coefficient, float):
+ raise RuntimeError(f"Not float: {coefficient!r}")
+ if coefficient <= 0.0:
+ raise RuntimeError(f"Not positive: {coefficient!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ float_load=self.float_load * coefficient,
+ )
+
+ def __truediv__(self, coefficient: float) -> DiscreteLoad:
+ """Call multiplication with inverse argument.
+
+ :param coefficient: Value to divide float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ return self * (1.0 / coefficient)
diff --git a/resources/libraries/python/MLRsearch/discrete_result.py b/resources/libraries/python/MLRsearch/discrete_result.py
new file mode 100644
index 0000000000..882b6081c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_result.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .trial_measurement import MeasurementResult
+
+
+@dataclass
+class DiscreteResult(MeasurementResult):
+ """A measurement result where intended load is also given as discrete load.
+
+ The discrete load has to be round and has to match the intended load.
+ """
+
+ # Must have default as superclass has fields with default values.
+ discrete_load: DiscreteLoad = None
+ """Intended load [tps]; discrete, round and equal to intended load."""
+
+ def __post_init__(self) -> None:
+ """Call super, verify intended and discrete loads are the same.
+
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ super().__post_init__()
+ if not isinstance(self.discrete_load, DiscreteLoad):
+ raise TypeError(f"Not a discrete load: {self.discrete_load!r}")
+ if not self.discrete_load.is_round:
+ raise ValueError(f"Discrete load not round: {self.discrete_load!r}")
+ if float(self.discrete_load) != self.intended_load:
+ raise ValueError(f"Load mismatch: {self!r}")
+
+ @staticmethod
+ def with_load(
+ result: MeasurementResult, load: DiscreteLoad
+ ) -> DiscreteResult:
+ """Return result with added load.
+
+ :param result: A result, possibly without discrete load.
+ :param load: Discrete load to add.
+ :type result: MeasurementResult
+ :type load: DiscreteLoad
+ :returns: Equivalent result with matching discrete load.
+ :rtype: DiscreteResult
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ return DiscreteResult(
+ intended_duration=result.intended_duration,
+ intended_load=result.intended_load,
+ offered_count=result.offered_count,
+ loss_count=result.loss_count,
+ forwarding_count=result.forwarding_count,
+ offered_duration=result.offered_duration,
+ duration_with_overheads=result.duration_with_overheads,
+ intended_count=result.intended_count,
+ discrete_load=load,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_width.py b/resources/libraries/python/MLRsearch/discrete_width.py
new file mode 100644
index 0000000000..8a4845a83f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_width.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteWidth class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+
+from .load_rounding import LoadRounding
+
+
+# TODO: Make properly frozen.
+@dataclass(order=True)
+class DiscreteWidth:
+ """Structure to store float width together with its rounded integer form.
+
+ The width does not have to be positive, i.e. the computed integer width
+ does not have to be larger than zero.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+
+ Conversion and arithmetic methods are added for convenience.
+ Division and non-integer multiplication are intentionally not supported,
+ as MLRsearch should not seek unround widths when round ones are available.
+
+ The instance is effectively immutable, but not hashable as it refers
+ to the rounding instance, which is implemented as mutable
+ (although the mutations are not visible).
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_width: float = None
+ """Relative width of float intended load.
+ This is treated as a constructor argument, and does not need to match
+ the int width. Int width is computed to be no wider than this."""
+ int_width: int = field(compare=False, default=None)
+ """Integer form, difference of integer loads.
+ This is the primary quantity used by most computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float width to be slightly larger
+ than the implied int width.
+
+ If both forms are specified, the float form is taken as primary
+ (thus the integer form is recomputed to match).
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_width is None and self.int_width is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_width is None:
+ self.int_width = int(self.int_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = self.rounding.int2float(self.int_width)
+ self.float_width = (increased_load - min_load) / increased_load
+ return
+ self.float_width = float(self.float_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = min_load / (1.0 - self.float_width)
+ int_load = self.rounding.float2int(increased_load)
+ verify_load = self.rounding.int2float(int_load)
+ if verify_load > increased_load:
+ int_load -= 1
+ self.int_width = int_load
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_width={int(self)}"
+
+ def __int__(self) -> int:
+ """Return the integer form.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_width
+
+ def __float__(self) -> float:
+ """Return the float form.
+
+ :returns: The float field value.
+ :rtype: float
+ """
+ return self.float_width
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ def rounded_down(self) -> DiscreteWidth:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(rounding=self.rounding, int_width=int(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths added.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+
+ :param width: Value to add to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width + int(width),
+ )
+
+ def __sub__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths subtracted.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+ Non-positive results are disallowed by constructor.
+
+ :param width: Value to subtract to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {type(width)}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width - int(width),
+ )
+
+ def __mul__(self, coefficient: int) -> DiscreteWidth:
+ """Construct new instance with int value multiplied.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Constant to multiply int width with.
+ :type coefficient: int
+ :returns: New instance with multiplied int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If argument value does not meet requirements.
+ """
+ if not isinstance(coefficient, int):
+ raise RuntimeError(f"Coefficient not int: {coefficient!r}")
+ if coefficient < 1:
+ raise RuntimeError(f"Coefficient not positive: {coefficient!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width * coefficient,
+ )
+
+ def half_rounded_down(self) -> DiscreteWidth:
+ """Contruct new instance of half the integer width.
+
+ If the current integer width is odd, round the half width down.
+
+ :returns: New instance with half int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If the resulting integerl width is not positive.
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width // 2,
+ )
diff --git a/resources/libraries/python/MLRsearch/expander.py b/resources/libraries/python/MLRsearch/expander.py
new file mode 100644
index 0000000000..0e6800477e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/expander.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetExpander class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetedExpander:
+ """Utility class to track expanding width during external search.
+
+ One instance per selector but takes into consideration global current width.
+
+ Generally, many strategies may limit next_width immediately,
+ but next_width expands only after measurement
+ when external search fails to find its bound (global width is also bumped).
+ See strategy classes for specific details on external and internal search.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if initial upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if initial lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ next_width: DiscreteWidth = secondary_field()
+ """This will be used in next search step if no strategy intervenes."""
+
+ def __post_init__(self) -> None:
+ """Prepare next width."""
+ self.next_width = self.target.discrete_width
+ if self.initial_lower_load and self.initial_upper_load:
+ interval_width = self.initial_upper_load - self.initial_lower_load
+ self.next_width = max(self.next_width, interval_width)
+ self.expand(bump_global=False)
+
+ def expand(self, bump_global: bool = True) -> None:
+ """Multiply next width by expansion coefficient.
+
+ The global current width should be bumped when external search
+ is done but load is not the bound we were looking for.
+
+ For global width shrinking, set the field directly.
+
+ :param bump_global: False if called from limit or post init.
+ :type bump_global: bool
+ """
+ self.next_width *= self.target.expansion_coefficient
+ if bump_global:
+ self.global_width.width = self.next_width
+
+ def get_width(self) -> DiscreteWidth:
+ """Return next width corrected by global current width.
+
+ :returns: The width to use, see GlobalWidth.
+ :rtype: DiscreteWidth
+ """
+ return self.global_width.or_larger(self.next_width)
+
+ def limit(self, last_width: DiscreteWidth) -> None:
+ """Decrease the prepared next width.
+
+ This is called by other strategies when bounds are getting narrower.
+
+ Global current width is not updated yet,
+ as the other strategy may not end up becoming the winner
+ and we want to avoid interfering with other selector strategies.
+
+ :param last_width: As applied by other strategy, smaller of two halves.
+ :type last_width: DiscreteWidth
+ """
+ self.next_width = max(last_width, self.target.discrete_width)
+ self.expand(bump_global=False)
diff --git a/resources/libraries/python/MLRsearch/global_width.py b/resources/libraries/python/MLRsearch/global_width.py
new file mode 100644
index 0000000000..6f7df8b894
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/global_width.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GlobalWidth class."""
+
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+@dataclass
+class GlobalWidth:
+ """Primarily used to synchronize external search steps across selectors.
+
+ The full name is global current width, but that is too long for identifiers.
+
+ While each selector tracks its "local" (per goal) width using expander,
+ it is important we do not interleave upper external search for two goals.
+ That is why all selector instances refer to a singleton instance of this.
+
+ In general, this value remains constant when main loop iterates over
+ selectors and when selector iterates over strategies.
+ After winner is measured, this width is set to winner width value
+ and for some strategies that width is expanded when external search says so.
+
+ The two methods are not really worth creating a new class,
+ but the main reason is having a name for type hints
+ that distinguish this from various other "width" and "current" values.
+ """
+
+ width: DiscreteWidth
+ """Minimum width to apply at next external search step."""
+ # TODO: Add a setter, so it is easier to add debug logging.
+
+ @staticmethod
+ def from_loads(load0: DiscreteLoad, load1: DiscreteLoad) -> GlobalWidth:
+ """Initialize the value based on two loads from initial trials.
+
+ :param load0: Lower (or equal) load from the two most recent trials.
+ :param load1: Higher (or equal) load from the two most recent trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :returns: Newly created instance with computed width.
+ :rtype: GlobalWidth
+ """
+ return GlobalWidth(load1 - load0)
+
+ def or_larger(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return width from argument or self, whichever is larger.
+
+ :param width: A selector (strategy) asks if this width is large enough.
+ :type width: DiscreteWidth
+ :returns: Argument or current width.
+ :rtype: DiscreteWidth
+ """
+ return width if width > self.width else self.width
diff --git a/resources/libraries/python/MLRsearch/goal_result.py b/resources/libraries/python/MLRsearch/goal_result.py
new file mode 100644
index 0000000000..91dccec0bb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/goal_result.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GoalResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .relevant_bounds import RelevantBounds
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class GoalResult:
+ """Composite to be mapped for each search goal at the end of the search.
+
+ The values are stored as trimmed stats,
+ the conditional throughput is returned as a discrete loads.
+ Thus, users interested only in float values have to convert explicitly.
+
+ Irregular goal results are supported as instances with a bound missing.
+ """
+
+ relevant_lower_bound: Optional[TrimmedStat]
+ """The relevant lower bound for the search goal."""
+ relevant_upper_bound: Optional[TrimmedStat]
+ """The relevant lower upper for the search goal."""
+
+ @staticmethod
+ def from_bounds(bounds: RelevantBounds) -> GoalResult:
+ """Factory, so that the call site can be shorter.
+
+ :param bounds: The relevant bounds as found in measurement database.
+ :type bounds: RelevantBounds
+ :returns: Newly created instance based on the bounds.
+ :rtype: GoalResult
+ """
+ return GoalResult(
+ relevant_lower_bound=bounds.clo,
+ relevant_upper_bound=bounds.chi,
+ )
+
+ @property
+ def conditional_throughput(self) -> Optional[DiscreteLoad]:
+ """Compute conditional throughput from the relevant lower bound.
+
+ If the relevant lower bound is missing, None is returned.
+
+ The conditional throughput has the same semantics as load,
+ so if load is unidirectional and user wants bidirectional
+ throughput, the manager has to compensate.
+
+ :return: Conditional throughput at the relevant lower bound.
+ :rtype: Optional[DiscreteLoad]
+ """
+ if not (rlb := self.relevant_lower_bound):
+ return None
+ stat = next(iter(rlb.target_to_stat.values()))
+ return rlb * (1.0 - stat.pessimistic_loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/limit_handler.py b/resources/libraries/python/MLRsearch/limit_handler.py
new file mode 100644
index 0000000000..5919f398f3
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/limit_handler.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LimitHandler class."""
+
+from dataclasses import dataclass
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_interval import DiscreteInterval
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+
+
+@dataclass
+class LimitHandler:
+ """Encapsulated methods for logic around handling limits.
+
+ In multiple places within MLRsearch code, an intended load value
+ is only useful if it is far enough from possible known values.
+ All such places can be served with the handle method
+ with appropriate arguments.
+ """
+
+ rounding: LoadRounding
+ """Rounding instance to use."""
+ debug: Callable[[str], None]
+ """Injectable logging function."""
+ # The two fields below are derived, extracted from rounding as a shortcut.
+ min_load: DiscreteLoad = secondary_field()
+ """Minimal load, as prescribed by Config."""
+ max_load: DiscreteLoad = secondary_field()
+ """Maximal load, as prescribed by Config."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived quantities."""
+ from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.min_load = from_float(self.rounding.min_load)
+ self.max_load = from_float(self.rounding.max_load)
+
+ def handle(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ clo: Optional[DiscreteLoad],
+ chi: Optional[DiscreteLoad],
+ ) -> Optional[DiscreteLoad]:
+ """Return new intended load after considering limits and bounds.
+
+ Not only we want to avoid measuring outside minmax interval,
+ we also want to avoid measuring too close to known limits and bounds.
+ We either round or return None, depending on hints from bound loads.
+
+ When rounding away from hard limits, we may end up being
+ too close to an already measured bound.
+ In this case, pick a midpoint between the bound and the limit.
+
+ The last two arguments are just loads (not full measurement results)
+ to allow callers to exclude some load without measuring them.
+ As a convenience, full results are also supported,
+ so that callers do not need to care about None when extracting load.
+
+ :param load: Intended load candidate, initial or from a load selector.
+ :param width: Relative width goal, considered narrow enough for now.
+ :param clo: Intended load of current relevant lower bound.
+ :param chi: Intended load of current relevant upper bound.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type clo: Optional[DiscreteLoad]
+ :type chi: Optional[DiscreteLoad]
+ :return: Adjusted load to measure at, or None if narrow enough already.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If unsupported corner case is detected.
+ """
+ if not load:
+ raise RuntimeError("Got None load to handle.")
+ load = load.rounded_down()
+ min_load, max_load = self.min_load, self.max_load
+ if clo and not clo.is_round:
+ raise RuntimeError(f"Clo {clo} should have been round.")
+ if chi and not chi.is_round:
+ raise RuntimeError(f"Chi {chi} should have been round.")
+ if not clo and not chi:
+ load = self._handle_load_with_excludes(
+ load, width, min_load, max_load, min_ex=False, max_ex=False
+ )
+ # The "return load" lines are separate from load computation,
+ # so that logging can be added more easily when debugging.
+ return load
+ if chi and not clo:
+ if chi <= min_load:
+ # Expected when hitting the min load.
+ return None
+ if load >= chi:
+ # This can happen when mrr2 forward rate is rounded to mrr2.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, min_load, chi, min_ex=False, max_ex=True
+ )
+ return load
+ if clo and not chi:
+ if clo >= max_load:
+ raise RuntimeError("Lower load expected.")
+ if load <= clo:
+ raise RuntimeError("Higher load expected.")
+ load = self._handle_load_with_excludes(
+ load, width, clo, max_load, min_ex=True, max_ex=False
+ )
+ return load
+ # We have both clo and chi defined.
+ if not clo < load < chi:
+ # Happens when bisect compares with bounded extend.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, clo, chi, min_ex=True, max_ex=True
+ )
+ return load
+
+ def _handle_load_with_excludes(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ minimum: DiscreteLoad,
+ maximum: DiscreteLoad,
+ min_ex: bool,
+ max_ex: bool,
+ ) -> Optional[DiscreteLoad]:
+ """Adjust load if too close to limits, respecting exclusions.
+
+ This is a reusable block.
+ Limits may come from previous bounds or from hard load limits.
+ When coming from bounds, rounding to that is not allowed.
+ When coming from hard limits, rounding to the limit value
+ is allowed in general (given by the setting the _ex flag).
+
+ :param load: The candidate intended load before accounting for limits.
+ :param width: Relative width of area around the limits to avoid.
+ :param minimum: The lower limit to round around.
+ :param maximum: The upper limit to round around.
+ :param min_ex: If false, rounding to the minimum is allowed.
+ :param max_ex: If false, rounding to the maximum is allowed.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type minimum: DiscreteLoad
+ :type maximum: DiscreteLoad
+ :type min_ex: bool
+ :type max_ex: bool
+ :returns: Adjusted load value, or None if narrow enough.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if not minimum <= load <= maximum:
+ raise RuntimeError(
+ "Internal error: load outside limits:"
+ f" load {load} min {minimum} max {maximum}"
+ )
+ max_width = maximum - minimum
+ if width >= max_width:
+ self.debug("Warning: Handling called with wide width.")
+ if not min_ex:
+ self.debug("Minimum not excluded, rounding to it.")
+ return minimum
+ if not max_ex:
+ self.debug("Maximum not excluded, rounding to it.")
+ return maximum
+ self.debug("Both limits excluded, narrow enough.")
+ return None
+ soft_min = minimum + width
+ soft_max = maximum - width
+ if soft_min > soft_max:
+ self.debug("Whole interval is less than two goals.")
+ middle = DiscreteInterval(minimum, maximum).middle(width)
+ soft_min = soft_max = middle
+ if load < soft_min:
+ if min_ex:
+ self.debug("Min excluded, rounding to soft min.")
+ return soft_min
+ self.debug("Min not excluded, rounding to minimum.")
+ return minimum
+ if load > soft_max:
+ if max_ex:
+ self.debug("Max excluded, rounding to soft max.")
+ return soft_max
+ self.debug("Max not excluded, rounding to maximum.")
+ return maximum
+ # Far enough from all limits, no additional adjustment is needed.
+ return load
diff --git a/resources/libraries/python/MLRsearch/load_rounding.py b/resources/libraries/python/MLRsearch/load_rounding.py
new file mode 100644
index 0000000000..0ac4487be9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_rounding.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadRounding class."""
+
+import math
+
+from dataclasses import dataclass
+from typing import List, Tuple
+
+from .dataclass import secondary_field
+
+
+@dataclass
+class LoadRounding:
+ """Class encapsulating stateful utilities that round intended load values.
+
+ For MLRsearch algorithm logic to be correct, it is important that
+ interval width expansion and narrowing are exactly reversible,
+ which is not true in general for floating point number arithmetics.
+
+ This class offers conversion to and from an integer quantity.
+ Operations in the integer realm are guaranteed to be reversible,
+ so the only risk is when converting between float and integer realm.
+
+ Which relative width corresponds to the unit integer
+ is computed in initialization from width goals,
+ striking a balance between memory requirements and precision.
+
+ There are two quality knobs. One restricts how far
+ can an integer be from the exact float value.
+ The other restrict how close it can be. That is to make sure
+ even with unpredictable rounding errors during the conversion,
+ the converted integer value is never bigger than the intended float value,
+ to ensure the intervals returned from MLRsearch will always
+ meet the relative width goal.
+
+ An instance of this class is mutable only in the sense it contains
+ a growing cache of previously computed values.
+ """
+
+ # TODO: Hide the cache and present as frozen hashable object.
+
+ min_load: float
+ """Minimal intended load [tps] to support, must be positive."""
+ max_load: float
+ """Maximal intended load [tps] to support, must be bigger than min load."""
+ float_goals: Tuple[float]
+ """Relative width goals to approximate, each must be positive
+ and smaller than one. Deduplicated and sorted in post init."""
+ quality_lower: float = 0.99
+ """Minimal multiple of each goal to be achievable."""
+ quality_upper: float = 0.999999
+ """Maximal multiple of each goal to be achievable."""
+ # Primary fields above, computed fields below.
+ max_int_load: int = secondary_field()
+ """Integer for max load (min load int is zero)."""
+ _int2load: List[Tuple[int, float]] = secondary_field()
+ """Known int values (sorted) and their float equivalents."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, perform checks, initialize conversion structures.
+
+ :raises RuntimeError: If a requirement is not met.
+ """
+ self.min_load = float(self.min_load)
+ self.max_load = float(self.max_load)
+ if not 0.0 < self.min_load < self.max_load:
+ raise RuntimeError("Load limits not supported: {self}")
+ self.quality_lower = float(self.quality_lower)
+ self.quality_upper = float(self.quality_upper)
+ if not 0.0 < self.quality_lower < self.quality_upper < 1.0:
+ raise RuntimeError("Qualities not supported: {self}")
+ goals = []
+ for goal in self.float_goals:
+ goal = float(goal)
+ if not 0.0 < goal < 1.0:
+ raise RuntimeError(f"Goal width {goal} is not supported.")
+ goals.append(goal)
+ self.float_goals = tuple(sorted(set(goals)))
+ self.max_int_load = self._find_ints()
+ self._int2load = []
+ self._int2load.append((0, self.min_load))
+ self._int2load.append((self.max_int_load, self.max_load))
+
+ def _find_ints(self) -> int:
+ """Find and return value for max_int_load.
+
+ Separated out of post init, as this is less conversion and checking,
+ and more math and searching.
+
+ A dumb implementation would start with 1 and kept increasing by 1
+ until all goals are within quality limits.
+ An actual implementation is smarter with the increment,
+ so it is expected to find the resulting values somewhat faster.
+
+ :returns: Value to be stored as max_int_load.
+ :rtype: int
+ """
+ minmax_log_width = math.log(self.max_load) - math.log(self.min_load)
+ log_goals = [-math.log1p(-goal) for goal in self.float_goals]
+ candidate = 1
+ while 1:
+ log_width_unit = minmax_log_width / candidate
+ # Fallback to increment by one if rounding errors make tries bad.
+ next_tries = [candidate + 1]
+ acceptable = True
+ for log_goal in log_goals:
+ units = log_goal / log_width_unit
+ int_units = math.floor(units)
+ quality = int_units / units
+ if not self.quality_lower <= quality <= self.quality_upper:
+ acceptable = False
+ target = (int_units + 1) / self.quality_upper
+ next_try = (target / units) * candidate
+ next_tries.append(next_try)
+ # Else quality acceptable, not bumping the candidate.
+ if acceptable:
+ return candidate
+ candidate = int(math.ceil(max(next_tries)))
+
+ def int2float(self, int_load: int) -> float:
+ """Convert from int to float tps load. Expand internal table as needed.
+
+ Too low or too high ints result in min or max load respectively.
+
+ :param int_load: Integer quantity to turn back into float load.
+ :type int_load: int
+ :returns: Converted load in tps.
+ :rtype: float
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if int_load <= 0:
+ return self.min_load
+ if int_load >= self.max_int_load:
+ return self.max_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_int < int_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_int > int_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_load
+ raise RuntimeError("Bisect in int2float failed.")
+
+ def float2int(self, float_load: float) -> int:
+ """Convert and round from tps load to int. Maybe expand internal table.
+
+ Too low or too high load result in zero or max int respectively.
+
+ Result value is rounded down to an integer.
+
+ :param float_load: Tps quantity to convert into int.
+ :type float_load: float
+ :returns: Converted integer value suitable for halving.
+ :rtype: int
+ """
+ if float_load <= self.min_load:
+ return 0
+ if float_load >= self.max_load:
+ return self.max_int_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_load < float_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_load > float_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_int
+ return lo_int
diff --git a/resources/libraries/python/MLRsearch/load_stats.py b/resources/libraries/python/MLRsearch/load_stats.py
new file mode 100644
index 0000000000..5f4757f488
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_stats.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStats class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .target_stat import TargetStat
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+
+
+# The eq=False part is needed to make sure comparison is inherited properly.
+@dataclass(eq=False)
+class LoadStats(DiscreteLoad):
+ """An offered load together with stats for all possible targets.
+
+ As LoadStats is frequently passed instead of plan DiscreteLoad,
+ equality and ordering is dictated by the float load.
+ """
+
+ target_to_stat: Dict[TargetSpec, TargetStat] = None
+ """Mapping from target specification to its current stat for this load."""
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there are targets to track."""
+ super().__post_init__()
+ if not self.target_to_stat:
+ raise ValueError(f"No targets: {self.target_to_stat!r}")
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ This works well only for trimmed stats,
+ as only the stat for the first target present is shown.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"fl={self.float_load}"
+ f",s=({next(iter(self.target_to_stat.values()))})"
+ )
+
+ def __hash__(self) -> int:
+ """Raise as stats are mutable by definition.
+
+ :returns: Hash value for this instance if possible.
+ :rtype: int
+ :raises TypeError: Not immutable.
+ """
+ raise TypeError("Loadstats are mutable so constant hash is impossible.")
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial measurement result.
+
+ :param result: The result to take into account.
+ :type result: DiscreteResult
+ :raises RuntimeError: If result load does is not equal to the self load.
+ """
+ if result.intended_load != float(self):
+ raise RuntimeError(
+ f"Attempting to add load {result.intended_load}"
+ f" to result set for {float(self)}"
+ )
+ for stat in self.target_to_stat.values():
+ stat.add(result)
+
+ @staticmethod
+ def new_empty(load: DiscreteLoad, targets: Tuple[TargetSpec]) -> LoadStats:
+ """Factory method to initialize mapping for given targets.
+
+ :param load: The intended load value for the new instance.
+ :param targets: The target specifications to track stats for.
+ :type load: DiscreteLoad
+ :type targets: Tuple[TargetSpec]
+ :returns: New instance with empty stats initialized.
+ :rtype: LoadStats
+ :raise ValueError: Is the load is not rounded.
+ """
+ if not load.is_round:
+ raise ValueError(f"Not round: {load!r}")
+ return LoadStats(
+ rounding=load.rounding,
+ int_load=int(load),
+ target_to_stat={target: TargetStat(target) for target in targets},
+ )
+
+ def estimates(self, target: TargetSpec) -> Tuple[bool, bool]:
+ """Classify this load according to given target.
+
+ :param target: According to which target this should be classified.
+ :type target: TargetSpec
+ :returns: Tuple of two estimates whether load can be lower bound.
+ (True, False) means target is not reached yet.
+ :rtype: Tuple[bool, bool]
+ """
+ return self.target_to_stat[target].estimates()
diff --git a/resources/libraries/python/MLRsearch/measurement_database.py b/resources/libraries/python/MLRsearch/measurement_database.py
new file mode 100644
index 0000000000..7a6618c0da
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/measurement_database.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementDatabase class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .load_stats import LoadStats
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class MeasurementDatabase:
+ """Structure holding measurement results for multiple durations and loads.
+
+ Several utility methods are added, accomplishing tasks useful for MLRsearch.
+
+ While TargetStats can decide when a single load is a lower bound (or upper),
+ it does not deal with loss inversion (higher load with less load).
+
+ This class introduces the concept of relevant bounds.
+ Relevant upper bound is simply the lowest load classified as an upper bound.
+ But relevant lower bound is only chosen from lower bound loads
+ strictly smaller than the relevant upper bound.
+ This way any higher loads with good results are ignored,
+ so relevant bound give conservative estimate of SUT true performance.
+ """
+
+ targets: Tuple[TargetSpec] = None
+ """Targets to track stats for."""
+ load_to_stats: Dict[DiscreteLoad, LoadStats] = None
+ """Mapping from loads to stats."""
+
+ def __post_init__(self) -> None:
+ """Check and sort initial values.
+
+ If no stats yet, initialize empty ones.
+
+ :raises ValueError: If there are no targets.
+ """
+ if not self.targets:
+ raise ValueError(f"Database needs targets: {self.targets!r}")
+ if not self.load_to_stats:
+ self.load_to_stats = {}
+ self._sort()
+
+ def _sort(self) -> None:
+ """Sort keys from low to high load."""
+ self.load_to_stats = dict(sorted(self.load_to_stats.items()))
+
+ def __getitem__(self, key: DiscreteLoad) -> LoadStats:
+ """Allow access to stats as if self was load_to_stats.
+
+ This also accepts LoadStats as key, so callers do not need
+ to care about hashability.
+
+ :param key: The load to get stats for.
+ :type key: DiscreteLoad
+ :returns: Stats for the given load.
+ :rtype LoadStats:
+ """
+ return self.load_to_stats[key.hashable()]
+
+ def add(self, result: DiscreteResult) -> None:
+ """Incorporate given trial measurement result.
+
+ :param result: Measurement result to add to the database.
+ :type result: DiscreteResult
+ """
+ discrete_load = result.discrete_load.hashable()
+ if not discrete_load.is_round:
+ raise ValueError(f"Not round load: {discrete_load!r}")
+ if discrete_load not in self.load_to_stats:
+ self.load_to_stats[discrete_load] = LoadStats.new_empty(
+ load=discrete_load,
+ targets=self.targets,
+ )
+ self._sort()
+ self.load_to_stats[discrete_load].add(result)
+
+ def get_relevant_bounds(self, target: TargetSpec) -> RelevantBounds:
+ """Return None or a valid trimmed stat, for the two relevant bounds.
+
+ A load is valid only if both optimistic and pessimistic estimates agree.
+
+ If some value is not available, None is returned instead.
+ The returned stats are trimmed to the argument target.
+
+ The implementation starts from low loads
+ and the search stops at lowest upper bound,
+ thus conforming to the conservative definition of relevant bounds.
+
+ :param target: Target to classify loads when finding bounds.
+ :type target: TargetSpec
+ :returns: Relevant lower bound, relevant upper bound.
+ :rtype: RelevantBounds
+ """
+ lower_bound, upper_bound = None, None
+ for load_stats in self.load_to_stats.values():
+ opt, pes = load_stats.estimates(target)
+ if opt != pes:
+ continue
+ if not opt:
+ upper_bound = load_stats
+ break
+ lower_bound = load_stats
+ if lower_bound:
+ lower_bound = TrimmedStat.for_target(lower_bound, target)
+ if upper_bound:
+ upper_bound = TrimmedStat.for_target(upper_bound, target)
+ return RelevantBounds(clo=lower_bound, chi=upper_bound)
diff --git a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
new file mode 100644
index 0000000000..4d3ff7c4cb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
@@ -0,0 +1,325 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MultipleLossRatioSearch class."""
+
+import logging
+import time
+
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple
+
+from .candidate import Candidate
+from .config import Config
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .expander import GlobalWidth
+from .goal_result import GoalResult
+from .limit_handler import LimitHandler
+from .load_rounding import LoadRounding
+from .measurement_database import MeasurementDatabase
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .selector import Selector
+from .target_scaling import TargetScaling
+from .trial_measurement import AbstractMeasurer
+
+
+@dataclass
+class MultipleLossRatioSearch:
+ """Implementation of the controller part of MLRsearch algorithm.
+
+ The manager part is creating and calling this,
+ the measurer part is injected.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final narrow bounds
+ (related to its search goal) after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm in this class contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for bounds for multiple search goals at once.
+ Specifically, the trial measurement results influence bounds for all goals,
+ even though the selection of trial inputs for next measurement
+ focuses only on one goal. The focus can switch between goals frequently.
+
+ Next improvement is that results of trial measurements
+ with small trial duration can be used to find a reasonable starting interval
+ for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+ Internally, such shorter trials come from "preceding targets",
+ handled in a same way as a search goal "final target".
+ Related improvement is that the "current" interval does not need to be valid
+ (e.g. one of the bounds is missing).
+ In that case, this algorithm will move and expand the interval,
+ in a process called external search. Only when both bounds are found,
+ the interval bisection (called internal search) starts making it narrow.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that target relative width is independent of measurement units.
+
+ Next improvement is basing the initial interval on forwarding rates
+ of few initial measurements, starting at max load and using forwarding rates
+ seen so far.
+
+ Next improvement is to allow the use of multiple shorter trials
+ instead one big trial, allowing a percentage of trials
+ to exceed the loss ratio target.
+ This makes the result more stable in practice.
+ Conservative behavior (single long trial, zero exceed ratio)
+ is still available using corresponding goal definitions.
+
+ Final improvement is exiting early if the minimal load
+ is not a valid lower bound (at final duration)
+ and also exiting if the overall search duration is too long.
+
+ There are also subtle optimizations related to candidate selection
+ and uneven splitting of intervals, too numerous to list here.
+
+ The return values describe performance at the relevant lower bound
+ as "conditional throughput", which is based on loss ratio of one of trials
+ selected as a quantile based on exceed ratio parameter.
+ Usually this value may be quite pessimistic, as MLRsearch stops
+ measuring a load as soon as it becomes a lower bound,
+ so conditional throughput is usually based on forwarding rate
+ of the worst on the good long trials.
+ """
+
+ config: Config
+ """Arguments required at construction time."""
+ # End of fields required at intance creation.
+ measurer: AbstractMeasurer = secondary_field()
+ """Measurer to use, set at calling search()."""
+ debug: Callable[[str], None] = secondary_field()
+ """Object to call for logging, None means logging.debug."""
+ # Fields below are computed from data above
+ rounding: LoadRounding = secondary_field()
+ """Derived from goals. Instance to use for intended load rounding."""
+ from_float: Callable[[float], DiscreteLoad] = secondary_field()
+ """Conversion method from float [tps] intended load values."""
+ limit_handler: LimitHandler = secondary_field()
+ """Load post-processing utility based on config and rounding."""
+ scaling: TargetScaling = secondary_field()
+ """Utility for creating target chains for search goals."""
+ database: MeasurementDatabase = secondary_field()
+ """Storage for (stats of) measurement results so far."""
+ stop_time: float = secondary_field()
+ """Monotonic time value at which the search should end with failure."""
+
+ def search(
+ self,
+ measurer: AbstractMeasurer,
+ debug: Optional[Callable[[str], None]] = None,
+ ) -> Pep3140Dict[SearchGoal, GoalResult]:
+ """Perform initial trials, create state object, proceed with main loop.
+
+ Stateful arguments (measurer and debug) are stored.
+ Derived objects are constructed from config.
+
+ :param measurer: Measurement provider to use by this search object.
+ :param debug: Callable to optionally use instead of logging.debug().
+ :type measurer: AbstractMeasurer
+ :type debug: Optional[Callable[[str], None]]
+ :returns: Structure containing conditional throughputs and other stats,
+ one for each search goal. If a value is None it means there is
+ no lower bound (min load turned out to be an upper bound).
+ :rtype: Pep3140Dict[SearchGoal, GoalResult]
+ :raises RuntimeError: If total duration is larger than timeout,
+ or if min load becomes an upper bound for a search goal
+ that has fail fast true.
+ """
+ self.measurer = measurer
+ self.debug = logging.debug if debug is None else debug
+ self.rounding = LoadRounding(
+ min_load=self.config.min_load,
+ max_load=self.config.max_load,
+ float_goals=[goal.relative_width for goal in self.config.goals],
+ )
+ self.from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.limit_handler = LimitHandler(
+ rounding=self.rounding,
+ debug=self.debug,
+ )
+ self.scaling = TargetScaling(
+ goals=self.config.goals,
+ rounding=self.rounding,
+ )
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.stop_time = time.monotonic() + self.config.search_duration_max
+ result0, result1 = self.run_initial_trials()
+ self.main_loop(result0.discrete_load, result1.discrete_load)
+ ret_dict = Pep3140Dict()
+ for goal in self.config.goals:
+ target = self.scaling.goal_to_final_target[goal]
+ bounds = self.database.get_relevant_bounds(target=target)
+ ret_dict[goal] = GoalResult.from_bounds(bounds=bounds)
+ return ret_dict
+
+ def measure(self, duration: float, load: DiscreteLoad) -> DiscreteResult:
+ """Call measurer and put the result to appropriate form in database.
+
+ Also check the argument types and load roundness,
+ and return the result to the caller.
+
+ :param duration: Intended duration for the trial measurement.
+ :param load: Intended load for the trial measurement:
+ :type duration: float
+ :type load: DiscreteLoad
+ :returns: The trial results.
+ :rtype: DiscreteResult
+ :raises RuntimeError: If an argument doed not have the required type.
+ """
+ if not isinstance(duration, float):
+ raise RuntimeError(f"Duration has to be float: {duration!r}")
+ if not isinstance(load, DiscreteLoad):
+ raise RuntimeError(f"Load has to be discrete: {load!r}")
+ if not load.is_round:
+ raise RuntimeError(f"Told to measure unrounded: {load!r}")
+ self.debug(f"Measuring at d={duration},il={int(load)}")
+ result = self.measurer.measure(
+ intended_duration=duration,
+ intended_load=float(load),
+ )
+ self.debug(f"Measured lr={result.loss_ratio}")
+ result = DiscreteResult.with_load(result=result, load=load)
+ self.database.add(result)
+ return result
+
+ def run_initial_trials(self) -> Tuple[DiscreteResult, DiscreteResult]:
+ """Perform trials to get enough data to start the selectors.
+
+ Measurements are done with all initial targets in mind,
+ based on smallest target loss ratio, largest initial trial duration,
+ and largest initial target width.
+
+ Forwarding rate is used as a hint for next intended load.
+ The relative quantity is used, as load can use different units.
+ When the smallest target loss ratio is non-zero, a correction is needed
+ (forwarding rate is only a good hint for zero loss ratio load).
+ The correction is conservative (all increase in load turns to losses).
+
+ Also, warmup trial (if configured) is performed,
+ all other trials are added to the database.
+
+ This could return the initial width, but from implementation perspective
+ it is easier to return two measurements (or the same one twice) here
+ and compute width later. The "one value twice" happens when max load
+ has small loss, or when min load has big loss.
+
+ :returns: Two last measured values, in any order. Or one value twice.
+ :rtype: Tuple[DiscreteResult, DiscreteResult]
+ """
+ max_load = self.limit_handler.max_load
+ ratio, duration, width = None, None, None
+ for target in self.scaling.targets:
+ if target.preceding:
+ continue
+ if ratio is None or ratio > target.loss_ratio:
+ ratio = target.loss_ratio
+ if not duration or duration < target.trial_duration:
+ duration = target.trial_duration
+ if not width or width < target.discrete_width:
+ width = target.discrete_width
+ self.debug(f"Init ratio {ratio} duration {duration} width {width}")
+ if self.config.warmup_duration:
+ self.debug("Warmup trial.")
+ self.measure(self.config.warmup_duration, max_load)
+ # Warmup should not affect the real results, reset the database.
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.debug(f"First trial at max rate: {max_load}")
+ result0 = self.measure(duration, max_load)
+ rfr = result0.relative_forwarding_rate
+ corrected_rfr = (self.from_float(rfr) / (1.0 - ratio)).rounded_down()
+ if corrected_rfr >= max_load:
+ self.debug("Small loss, no other initial trials are needed.")
+ return result0, result0
+ mrr = self.limit_handler.handle(corrected_rfr, width, None, max_load)
+ self.debug(f"Second trial at (corrected) mrr: {mrr}")
+ result1 = self.measure(duration, mrr)
+ # Attempt to get narrower width.
+ result_ratio = result1.loss_ratio
+ if result_ratio > ratio:
+ rfr2 = result1.relative_forwarding_rate
+ crfr2 = (self.from_float(rfr2) / (1.0 - ratio)).rounded_down()
+ mrr2 = self.limit_handler.handle(crfr2, width, None, mrr)
+ else:
+ mrr2 = mrr + width
+ mrr2 = self.limit_handler.handle(mrr2, width, mrr, max_load)
+ if not mrr2:
+ self.debug("Close enough, measuring at mrr2 is not needed.")
+ return result1, result1
+ self.debug(f"Third trial at (corrected) mrr2: {mrr2}")
+ result2 = self.measure(duration, mrr2)
+ return result1, result2
+
+ def main_loop(self, load0: DiscreteLoad, load1: DiscreteLoad) -> None:
+ """Initialize selectors and keep measuring the winning candidate.
+
+ Selectors are created, the two input loads are useful starting points.
+
+ The search ends when no selector nominates any candidate,
+ or if the search takes too long (or if a selector raises).
+
+ Winner is selected according to ordering defined in Candidate class.
+ In case of a tie, selectors for earlier goals are preferred.
+
+ As a selector is only allowed to update current width as the winner,
+ the update is done here explicitly.
+
+ :param load0: Discrete load of one of results from run_initial_trials.
+ :param load1: Discrete load of other of results from run_initial_trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :raises RuntimeError: If the search takes too long,
+ or if min load becomes an upper bound for any search goal
+ """
+ if load1 < load0:
+ load0, load1 = load1, load0
+ global_width = GlobalWidth.from_loads(load0, load1)
+ selectors = []
+ for target in self.scaling.goal_to_final_target.values():
+ selector = Selector(
+ final_target=target,
+ global_width=global_width,
+ initial_lower_load=load0,
+ initial_upper_load=load1,
+ database=self.database,
+ handler=self.limit_handler,
+ debug=self.debug,
+ )
+ selectors.append(selector)
+ while time.monotonic() < self.stop_time:
+ winner = Candidate()
+ for selector in selectors:
+ # Order of arguments is important
+ # when two targets nominate the same candidate.
+ winner = min(Candidate.nomination_from(selector), winner)
+ if not winner:
+ break
+ # We do not check duration versus stop_time here,
+ # as some measurers can be unpredictably faster
+ # than their intended duration suggests.
+ self.measure(duration=winner.duration, load=winner.load)
+ # Delayed updates.
+ if winner.width:
+ global_width.width = winner.width
+ winner.won()
+ else:
+ raise RuntimeError("Optimized search takes too long.")
+ self.debug("Search done.")
diff --git a/resources/libraries/python/MLRsearch/pep3140/__init__.py b/resources/libraries/python/MLRsearch/pep3140/__init__.py
new file mode 100644
index 0000000000..f8e2ffaa8f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "pep3140".
+"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+from .classes import Pep3140Dict
diff --git a/resources/libraries/python/MLRsearch/pep3140/classes.py b/resources/libraries/python/MLRsearch/pep3140/classes.py
new file mode 100644
index 0000000000..9ab6e25c7c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/classes.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining a subclass of dict with an alternative str method."""
+
+
+class Pep3140Dict(dict):
+ """A dict with str support as proposed in PEP 3140.
+
+ Python implemented str acting on dict such that the resulting string
+ shows both keys and values in their repr form.
+ Therefore, str() of a dict gives the same result as repr().
+
+ This class shows both keys and values their str form instead.
+ """
+
+ def __str__(self) -> str:
+ """Return comma+space separated str of items in curly brackets.
+
+ :returns: PEP 3140 string form of the dict data.
+ :rtype: str
+ """
+ body = ", ".join(f"{key}: {value}" for key, value in self.items())
+ return f"{{{body}}}"
diff --git a/resources/libraries/python/MLRsearch/relevant_bounds.py b/resources/libraries/python/MLRsearch/relevant_bounds.py
new file mode 100644
index 0000000000..4bc6796f71
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/relevant_bounds.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RelevantBounds class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class RelevantBounds:
+ """Container for the pair of relevant bounds for a target.
+
+ If there is no valid bound, None is used.
+
+ Relevant upper bound is smallest load acting as an upper bound.
+ Relevant lower bound acts as a lower bound, has to be strictly smaller
+ than the relevant upper bound, and is largest among such loads.
+
+ The short names "clo" and "chi" are also commonly used
+ in logging and technical comments.
+
+ Trimming could be done here, but it needs to known the target explicitly,
+ so it is done in MeasurementDatabase instead.
+ """
+
+ clo: Optional[TrimmedStat]
+ """The relevant lower bound (trimmed) for the current target."""
+ chi: Optional[TrimmedStat]
+ """The relevant upper bound (trimmed) for the current target."""
+
+ # TODO: Check types in post init?
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ clo = int(self.clo) if self.clo else None
+ chi = int(self.chi) if self.chi else None
+ return f"clo={clo},chi={chi}"
diff --git a/resources/libraries/python/MLRsearch/search_goal.py b/resources/libraries/python/MLRsearch/search_goal.py
new file mode 100644
index 0000000000..777ad5b991
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoal class."""
+
+from dataclasses import dataclass
+
+
+@dataclass(frozen=True, eq=True)
+class SearchGoal:
+ """Storage class for search goal attributes.
+
+ This is the part of controller inputs that can be repeated
+ with different values. MLRsearch saves time by searching
+ for conditional throughput for each goal at the same time,
+ compared to repeated calls with separate goals.
+
+ Most fields (called attributes) of this composite
+ are relevant to the definition of conditional throughput.
+ The rest does not, but can affect the overal search time.
+ """
+
+ loss_ratio: float = 0.0
+ """The goal loss ratio.
+ A trial can satisfy the goal only when its trial loss ratio is not higher
+ than this. See MeasurementResult.loss_ratio for details.
+ A trial that does not satisfy this goal is called a bad trial."""
+ exceed_ratio: float = 0.5
+ """What portion of the duration sum can consist of bad trial seconds
+ while still being classified as lower bound (assuming no short trials)."""
+ relative_width: float = 0.005
+ """Target is achieved when the relevant lower bound
+ is no more than this (in units of the tightest upper bound) far
+ from the relevant upper bound."""
+ initial_trial_duration: float = 1.0
+ """Shortest trial duration employed when searching for this goal."""
+ final_trial_duration: float = 1.0
+ """Longest trial duration employed when searching for this goal."""
+ duration_sum: float = 21.0
+ """Minimal sum of durations of relevant trials sufficient to declare a load
+ to be upper or lower bound for this goal."""
+ preceding_targets: int = 2
+ """Number of increasingly coarser search targets to insert,
+ hoping to speed up searching for the final target of this goal."""
+ expansion_coefficient: int = 2
+ """External search multiplies width (in logarithmic space) by this."""
+ fail_fast: bool = True
+ """If true and min load is not an upper bound, raise.
+ If false, search will return None instead of lower bound."""
+
+ def __post_init__(self) -> None:
+ """Convert fields to correct types and call validate."""
+ super().__setattr__("loss_ratio", float(self.loss_ratio))
+ super().__setattr__("exceed_ratio", float(self.exceed_ratio))
+ super().__setattr__("relative_width", float(self.relative_width))
+ super().__setattr__(
+ "final_trial_duration", float(self.final_trial_duration)
+ )
+ super().__setattr__(
+ "initial_trial_duration", float(self.initial_trial_duration)
+ )
+ super().__setattr__("duration_sum", float(self.duration_sum))
+ super().__setattr__("preceding_targets", int(self.preceding_targets))
+ super().__setattr__(
+ "expansion_coefficient", int(self.expansion_coefficient)
+ )
+ super().__setattr__("fail_fast", bool(self.fail_fast))
+ self.validate()
+
+ def validate(self) -> None:
+ """Make sure the initialized values conform to requirements.
+
+ :raises ValueError: If a field value is outside allowed bounds.
+ """
+ if self.loss_ratio < 0.0:
+ raise ValueError(f"Loss ratio cannot be negative: {self}")
+ if self.loss_ratio >= 1.0:
+ raise ValueError(f"Loss ratio must be lower than 1: {self}")
+ if self.exceed_ratio < 0.0:
+ raise ValueError(f"Exceed ratio cannot be negative: {self}")
+ if self.exceed_ratio >= 1.0:
+ raise ValueError(f"Exceed ratio must be lower than 1: {self}")
+ if self.relative_width <= 0.0:
+ raise ValueError(f"Relative width must be positive: {self}")
+ if self.relative_width >= 1.0:
+ raise ValueError(f"Relative width must be less than 1: {self}")
+ if self.initial_trial_duration <= 0.0:
+ raise ValueError(f"Initial trial duration must be positive: {self}")
+ if self.final_trial_duration < self.initial_trial_duration:
+ raise ValueError(
+ f"Single duration max must be at least initial: {self}"
+ )
+ if self.duration_sum < self.final_trial_duration:
+ raise ValueError(
+ "Min duration sum cannot be smaller"
+ f" than final trial duration: {self}"
+ )
+ if self.expansion_coefficient <= 1:
+ raise ValueError(f"Expansion coefficient is too small: {self}")
+ too_small = False
+ if self.preceding_targets < 0:
+ too_small = True
+ elif self.preceding_targets < 1:
+ if self.initial_trial_duration < self.duration_sum:
+ too_small = True
+ if too_small:
+ raise ValueError(
+ f"Number of preceding targets is too small: {self}"
+ )
diff --git a/resources/libraries/python/MLRsearch/search_goal_tuple.py b/resources/libraries/python/MLRsearch/search_goal_tuple.py
new file mode 100644
index 0000000000..d40ba99b4b
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal_tuple.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoalTuple class."""
+
+from collections.abc import Iterator
+from dataclasses import dataclass
+from typing import Tuple
+
+from .search_goal import SearchGoal
+
+
+@dataclass(frozen=True)
+class SearchGoalTuple:
+ """Container class holding multiple search goals.
+
+ Just a convenience for checking their number and types.
+ """
+
+ goals: Tuple[SearchGoal, ...]
+ """Goals extracted from user-provided Iterable of search goals."""
+
+ def __post_init__(self) -> None:
+ """Check type and number of search goals.
+
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If a goal is not a SearchGoal.
+ """
+ super().__setattr__("goals", tuple(self.goals))
+ if not self.goals:
+ raise ValueError(f"Cannot be empty: {self.goals}")
+ for goal in self.goals:
+ if not isinstance(goal, SearchGoal):
+ raise TypeError(f"Must be a SearchGoal instance: {goal}")
+ copied = list(self.goals)
+ deduplicated = set(self.goals)
+ for goal in copied:
+ if goal not in deduplicated:
+ raise ValueError(f"Duplicate goal: {goal}")
+ deduplicated.remove(goal)
+ if deduplicated:
+ raise ValueError(f"Error processing goals: {deduplicated}")
+
+ def __iter__(self) -> Iterator[SearchGoal]:
+ """Enable itertion over goals.
+
+ :returns: Iterator iteratinc over contained goals.
+ :rtype: Iterator[SearchGoal]
+ """
+ return iter(self.goals)
diff --git a/resources/libraries/python/MLRsearch/selector.py b/resources/libraries/python/MLRsearch/selector.py
new file mode 100644
index 0000000000..4a6d2e2574
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/selector.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Selector class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, List, Optional, Tuple
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .expander import TargetedExpander
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .measurement_database import MeasurementDatabase
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .strategy import StrategyBase, STRATEGY_CLASSES
+
+
+@dataclass
+class Selector:
+ """A selector is an abstraction that focuses on only one of search goals.
+
+ While lower-level logic is hidden in strategy classes,
+ the code in this class is responsible for initializing strategies
+ and shifting targets towards the final target.
+
+ While the public methods have the same names and meaning as the ones
+ in strategy classes, their signature is different.
+ Selector adds the current target trial duration to the output of nominate(),
+ and adds the current bounds to the input of won().
+
+ The nominate method does not return a complete Candidate instance,
+ as we need to avoid circular dependencies
+ (candidate will refer to selector).
+ """
+
+ final_target: TargetSpec
+ """The target this selector is trying to ultimately achieve."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: DiscreteLoad
+ """Smaller of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ initial_upper_load: DiscreteLoad
+ """Larger of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ database: MeasurementDatabase = field(repr=False)
+ """Reference to the common database used by all selectors."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ current_target: TargetSpec = secondary_field()
+ """The target the selector is focusing on currently."""
+ target_stack: List[TargetSpec] = secondary_field()
+ """Stack of targets. When current target is achieved, next is popped."""
+ strategies: Tuple[StrategyBase] = secondary_field()
+ """Instances implementing particular selection strategies."""
+ current_strategy: Optional[StrategyBase] = secondary_field()
+ """Reference to strategy used for last nomination, needed for won()."""
+ # Cache.
+ bounds: RelevantBounds = secondary_field()
+ """New relevant bounds for this round of candidate selection."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived values."""
+ self.target_stack = [self.final_target]
+ while preceding_target := self.target_stack[-1].preceding:
+ self.target_stack.append(preceding_target)
+ self.current_target = self.target_stack.pop()
+ self._recreate_strategies()
+
+ def _recreate_strategies(self) -> None:
+ """Recreate strategies after current target has changed.
+
+ Width expander is recreated as target width is now smaller.
+ For convenience, strategies get injectable debug
+ which prints also the current target.
+ """
+ expander = TargetedExpander(
+ target=self.current_target,
+ global_width=self.global_width,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=self.debug,
+ )
+
+ def wrapped_debug(text: str) -> None:
+ """Call self debug with current target info prepended.
+
+ :param text: Message to log at debug level.
+ :type text: str
+ """
+ self.debug(f"Target {self.current_target}: {text}")
+
+ self.strategies = tuple(
+ cls(
+ target=self.current_target,
+ expander=expander,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=wrapped_debug,
+ )
+ for cls in STRATEGY_CLASSES
+ )
+ self.current_strategy = None
+ self.debug(f"Created strategies for: {self.current_target}")
+
+ def _update_bounds(self) -> None:
+ """Before each iteration, call this to update bounds cache."""
+ self.bounds = self.database.get_relevant_bounds(self.current_target)
+
+ def nominate(
+ self,
+ ) -> Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]:
+ """Find first strategy that wants to nominate, return trial inputs.
+
+ Returned load is None if no strategy wants to nominate.
+
+ Current target is shifted when (now preceding) target is reached.
+ As each strategy never becomes done before at least one
+ bound relevant to the current target becomes available,
+ it is never needed to revert to the preceding target after the shift.
+
+ As the initial trials had inputs relevant to all initial targets,
+ the only way for this not to nominate a load
+ is when the final target is reached (including hitting min or max load).
+ The case of hitting min load raises, so search fails early.
+
+ :returns: Nominated load, duration, and global width to set if winning.
+ :rtype: Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]
+ :raises RuntimeError: If internal inconsistency is detected,
+ or if min load becomes an upper bound.
+ """
+ self._update_bounds()
+ self.current_strategy = None
+ while 1:
+ for strategy in self.strategies:
+ load, width = strategy.nominate(self.bounds)
+ if load:
+ self.current_strategy = strategy
+ return load, self.current_target.trial_duration, width
+ if not self.bounds.clo and not self.bounds.chi:
+ raise RuntimeError("Internal error: no clo nor chi.")
+ if not self.target_stack:
+ if not self.bounds.clo and self.current_target.fail_fast:
+ raise RuntimeError(f"No lower bound: {self.bounds.chi!r}")
+ self.debug(f"Goal {self.current_target} reached: {self.bounds}")
+ return None, self.current_target.trial_duration, None
+ # Everything is ready for next target in the chain.
+ self.current_target = self.target_stack.pop()
+ # Debug logs look better if we forget bounds are TrimmedStat.
+ # Abuse rounding (if not None) to convert to pure DiscreteLoad.
+ clo, chi = self.bounds.clo, self.bounds.chi
+ self.initial_lower_load = clo.rounded_down() if clo else clo
+ self.initial_upper_load = chi.rounded_down() if chi else chi
+ self._update_bounds()
+ self._recreate_strategies()
+
+ def won(self, load: DiscreteLoad) -> None:
+ """Update any private info when candidate became a winner.
+
+ :param load: The load previously nominated by current strategy.
+ :type load: DiscreteLoad
+ """
+ self._update_bounds()
+ self.current_strategy.won(bounds=self.bounds, load=load)
diff --git a/resources/libraries/python/MLRsearch/strategy/__init__.py b/resources/libraries/python/MLRsearch/strategy/__init__.py
new file mode 100644
index 0000000000..a1e0225a17
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "strategy".
+"""
+
+from .base import StrategyBase
+from .bisect import BisectStrategy
+from .extend_hi import ExtendHiStrategy
+from .extend_lo import ExtendLoStrategy
+from .halve import HalveStrategy
+from .refine_hi import RefineHiStrategy
+from .refine_lo import RefineLoStrategy
+
+
+STRATEGY_CLASSES = (
+ HalveStrategy,
+ RefineLoStrategy,
+ RefineHiStrategy,
+ ExtendLoStrategy,
+ ExtendHiStrategy,
+ BisectStrategy,
+)
+"""Tuple of strategy constructors, in order of priority decreasing."""
diff --git a/resources/libraries/python/MLRsearch/strategy/base.py b/resources/libraries/python/MLRsearch/strategy/base.py
new file mode 100644
index 0000000000..0724f882bf
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/base.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining StrategyBase class."""
+
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Callable, Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..expander import TargetedExpander
+from ..limit_handler import LimitHandler
+from ..relevant_bounds import RelevantBounds
+from ..target_spec import TargetSpec
+
+
+@dataclass
+class StrategyBase(ABC):
+ """Abstract class encompassing data common to most strategies.
+
+ A strategy is one piece of logic a selector may use
+ when nominating a candidate according to its current target.
+
+ The two initial bound arguments may not be bounds at all.
+ For initial targets, the two values are usually mrr and mrr2.
+ For subsequent targets, the initial values are usually
+ the relevant bounds of the preceding target,
+ but one of them may be None if hitting min or max load.
+
+ The initial values are mainly used as stable alternatives
+ to relevant bounds of preceding target,
+ because those bounds may have been unpredictably altered
+ by nominations from unrelated search goals.
+ This greatly simplifies reasoning about strategies making progress.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ expander: TargetedExpander
+ """Instance to track width expansion during search (if applicable)."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the limit handler instance."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+
+ @abstractmethod
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate if the conditions activate this strategy.
+
+ A complete candidate refers also to the nominating selector.
+ To prevent circular dependence (selector refers to nominating strategy),
+ this function returns only duration and width.
+
+ Width should only be non-None if global current width should be updated
+ when the candidate based on this becomes winner.
+ But currently all strategies return non-None width
+ if they return non-None load.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ return None, None
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Notify the strategy its candidate became the winner.
+
+ Most strategies have no use for this information,
+ but some strategies may need to update their private information.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ return
+
+ def not_worth(self, bounds: RelevantBounds, load: DiscreteLoad) -> bool:
+ """A check on bounds common for multiple strategies.
+
+ The load is worth measuring only if it can create or improve
+ either relevant bound.
+
+ Each strategy is designed to create a relevant bound for current target,
+ which is only needed if that (or better) bound does not exist yet.
+ Conversely, if a strategy does not nominate, it is because
+ the load it would nominate (if any) is found not worth by this method.
+
+ :param bounds: Current relevant bounds.
+ :param load: Load of a possible candidate.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ :returns: True if the load should NOT be nominated.
+ :rtype: bool
+ """
+ if bounds.clo and bounds.clo >= load:
+ return True
+ if bounds.chi and bounds.chi <= load:
+ return True
+ if bounds.clo and bounds.chi:
+ # We are not hitting min nor max load.
+ # Measuring at this load will create or improve clo or chi.
+ # The only reason not to nominate is if interval is narrow already.
+ wig = DiscreteInterval(
+ lower_bound=bounds.clo,
+ upper_bound=bounds.chi,
+ ).width_in_goals(self.target.discrete_width)
+ if wig <= 1.0:
+ return True
+ return False
diff --git a/resources/libraries/python/MLRsearch/strategy/bisect.py b/resources/libraries/python/MLRsearch/strategy/bisect.py
new file mode 100644
index 0000000000..894544695e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/bisect.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining BisectStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class BisectStrategy(StrategyBase):
+ """Strategy to use when both bounds relevant to curent target are present.
+
+ Primarily, this strategy is there to perform internal search.
+ As powers of two are fiendly to binary search,
+ this strategy relies on the splitting logic described in DiscreteInterval.
+
+ The main reason why this class is so long is that a mere existence
+ of a valid bound for the current target does not imply
+ that bound is a good approximation of the final conditional throughput.
+ The bound might become valid due to efforts of a strategy
+ focusing on an entirely different search goal.
+
+ On the other hand, initial bounds may be better approximations,
+ but they also may be bad approximations (for example
+ when SUT behavior strongly depends on trial duration).
+
+ Based on comparison of existing current bounds to intial bounds,
+ this strategy also mimics what would external search do
+ (if the one current bound was missing and other initial bound was current).
+ In case that load value is closer to appropriate inital bound
+ (compared to how far the simple bisect between current bounds is),
+ that load is nominated.
+
+ It turns out those "conditional" external search nominations
+ are quite different from unconditional ones,
+ at least when it comes to handling limits
+ and tracking when width expansion should be applied.
+ That is why that logic is here
+ and not in some generic external search class.
+ """
+
+ expand_on_clo: bool = False
+ """If extending up, width should be expanded when load becomes clo."""
+ expand_on_chi: bool = False
+ """If extending down, width should be expanded when load becomes chi."""
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate between bounds or extending from them.
+
+ The external search logic is offloaded into private methods.
+ If they return a truthy load, that is returned from here as well.
+
+ Only if the actual bisect is selected,
+ the per-selector expander is limited to the (smaller) new width.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ if not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ interval = DiscreteInterval(bounds.clo, bounds.chi)
+ if interval.width_in_goals(self.target.discrete_width) <= 1.0:
+ return None, None
+ bisect_load = interval.middle(self.target.discrete_width)
+ load, width = self._extend_lo(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = False, True
+ self.debug(f"Preferring to extend down: {load}")
+ return load, width
+ load, width = self._extend_hi(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = True, False
+ self.debug(f"Preferring to extend up: {load}")
+ return load, width
+ load = bisect_load
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.expand_on_clo, self.expand_on_chi = False, False
+ self.debug(f"Preferring to bisect: {load}")
+ width_lo = DiscreteInterval(bounds.clo, load).discrete_width
+ width_hi = DiscreteInterval(load, bounds.chi).discrete_width
+ width = min(width_lo, width_hi)
+ self.expander.limit(width)
+ return load, width
+
+ def _extend_lo(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending down, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_upper_load:
+ return None, None
+ if bisect_load >= self.initial_upper_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.chi - width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load <= bisect_load:
+ return None, None
+ if load >= self.initial_upper_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def _extend_hi(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending up, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_lower_load:
+ return None, None
+ if bisect_load <= self.initial_lower_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.clo + width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load >= bisect_load:
+ return None, None
+ if load <= self.initial_lower_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width when appropriate.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if self.expand_on_clo and load == bounds.clo:
+ self.expander.expand()
+ elif self.expand_on_chi and load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_hi.py b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
new file mode 100644
index 0000000000..79c4ad7cf2
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendHiStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant upper bound.
+
+ Typically this is needed after RefineHiStrategy turned initial upper bound
+ into a current relevant lower bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant lower bound plus expander width.
+
+ This performs external search in upwards direction,
+ until a valid upper bound for the current target is found,
+ or until max load is hit.
+ Limit handling is used to avoid nominating too close
+ (or above) the max rate.
+
+ Width expansion is only applied if the candidate becomes a lower bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.chi or not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.clo + width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No chi, extending up: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became the new lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.clo:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_lo.py b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
new file mode 100644
index 0000000000..68d20b6a6a
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendLoStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant lower bound.
+
+ Typically this is needed after RefineLoStrategy turned initial lower bound
+ into a current relevant upper bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant upper bound minus expander width.
+
+ This performs external search in downwards direction,
+ until a valid lower bound for the current target is found,
+ or until min load is hit.
+ Limit handling is used to avoid nominating too close
+ (or below) the min rate.
+
+ Width expansion is only applied if the candidate becomes an upper bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.clo or not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.chi - width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No clo, extending down: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became new upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/halve.py b/resources/libraries/python/MLRsearch/strategy/halve.py
new file mode 100644
index 0000000000..3188a041c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/halve.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining HalveStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class HalveStrategy(StrategyBase):
+ """First strategy to apply for a new current target.
+
+ Pick a load between initial lower bound and initial upper bound,
+ nominate it if it is (still) worth it.
+
+ In a sense, this can be viewed as an extension of preceding target's
+ bisect strategy. But as the current target may require a different
+ trial duration, it is better to do it for the new target.
+
+ Alternatively, this is a way to save one application
+ of subsequent refine strategy, thus avoiding reducing risk of triggering
+ an external search (slight time saver for highly unstable SUTs).
+ Either way, minor time save is achieved by preceding target
+ only needing to reach double of current target width.
+
+ If the distance between initial bounds is already at or below
+ current target width, the middle point is not nominated.
+ The reasoning is that in this case external search is likely
+ to get triggered by the subsequent refine strategies,
+ so attaining a relevant bound here is not as likely to help.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the middle between initial lower and upper bound.
+
+ The returned width is the target width, even if initial bounds
+ happened to be closer together.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not self.initial_lower_load or not self.initial_upper_load:
+ return None, None
+ interval = DiscreteInterval(
+ lower_bound=self.initial_lower_load,
+ upper_bound=self.initial_upper_load,
+ )
+ wig = interval.width_in_goals(self.target.discrete_width)
+ if wig > 2.0:
+ # Can happen for initial target.
+ return None, None
+ if wig <= 1.0:
+ # Already was narrow enough, refinements shall be sufficient.
+ return None, None
+ load = interval.middle(self.target.discrete_width)
+ if self.not_worth(bounds, load):
+ return None, None
+ self.debug(f"Halving available: {load}")
+ # TODO: Report possibly smaller width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_hi.py b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
new file mode 100644
index 0000000000..caa8fc4a7d
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineHiStrategy(StrategyBase):
+ """If initial upper bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant lower bound,
+ or if there was no halving (and RefineLoStrategy confirmed initial
+ lower bound became a relevant lower bound for the new current target).
+
+ This either ensures a matching upper bound (target is achieved)
+ or moves the relevant lower bound higher (triggering external search).
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_upper_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Upperbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_lo.py b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
new file mode 100644
index 0000000000..7927798505
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineLoStrategy(StrategyBase):
+ """If initial lower bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant upper bound,
+ or if there was no halving.
+ This ensures a relevant bound (upper or lower) for the current target
+ exists.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_lower_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Lowerbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/target_scaling.py b/resources/libraries/python/MLRsearch/target_scaling.py
new file mode 100644
index 0000000000..25114c311c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_scaling.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetScaling class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .dataclass import secondary_field
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetScaling:
+ """Encapsulate targets derived from goals.
+
+ No default values for primaries, contructor call has to specify everything.
+ """
+
+ goals: SearchGoalTuple
+ """Set of goals to generate targets for."""
+ rounding: LoadRounding
+ """Rounding instance to use (targets have discrete width)."""
+ # Derived quantities.
+ targets: Tuple[TargetSpec] = secondary_field()
+ """The generated targets, linked into chains."""
+ goal_to_final_target: Dict[SearchGoal, TargetSpec] = secondary_field()
+ """Mapping from a goal to its corresponding final target."""
+
+ def __post_init__(self) -> None:
+ """For each goal create final, and non-final targets and link them."""
+ linked_targets = []
+ self.goal_to_final_target = {}
+ for goal in self.goals:
+ standalone_targets = []
+ # Final target.
+ width = DiscreteWidth(
+ rounding=self.rounding,
+ float_width=goal.relative_width,
+ ).rounded_down()
+ duration_sum = goal.duration_sum
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=goal.final_trial_duration,
+ duration_sum=duration_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=goal.fail_fast,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Non-final targets.
+ preceding_targets = goal.preceding_targets
+ multiplier = (
+ pow(
+ goal.initial_trial_duration / duration_sum,
+ 1.0 / preceding_targets,
+ )
+ if preceding_targets
+ else 1.0
+ )
+ for count in range(preceding_targets):
+ preceding_sum = duration_sum * pow(multiplier, count + 1)
+ if count + 1 >= preceding_targets:
+ preceding_sum = goal.initial_trial_duration
+ trial_duration = min(goal.final_trial_duration, preceding_sum)
+ width *= 2
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=trial_duration,
+ duration_sum=preceding_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=False,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Link preceding targets.
+ preceding_target = None
+ for target in reversed(standalone_targets):
+ linked_target = target.with_preceding(preceding_target)
+ linked_targets.append(linked_target)
+ preceding_target = linked_target
+ # Associate final target to the goal.
+ self.goal_to_final_target[goal] = linked_targets[-1]
+ # Store all targets as a tuple.
+ self.targets = tuple(linked_targets)
diff --git a/resources/libraries/python/MLRsearch/target_spec.py b/resources/libraries/python/MLRsearch/target_spec.py
new file mode 100644
index 0000000000..5279ba00a1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_spec.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetSpec class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Optional
+
+from .discrete_width import DiscreteWidth
+
+
+@dataclass(frozen=True, eq=True)
+class TargetSpec:
+ """Composite object holding attributes specifying one search target.
+
+ Abstractly, this has several similar meanings.
+ With discrete_width attribute this specifies when a selector is Done.
+ With expansion_coefficient attribute it tells selector how quickly
+ should it expand interval in external search.
+ With "preceding" attribute it helps selector, so it does not need to point
+ to preceding target separately from its current target.
+ Without those three attributes this object is still sufficient
+ for LoadStats to classify loads as lower bound, upper bound, or unknown.
+ """
+
+ loss_ratio: float
+ """Target loss ratio. Equal and directly analogous to goal loss ratio,
+ but applicable also for non-final targets."""
+ exceed_ratio: float
+ """Target exceed ratio. Equal and directly analogous to goal exceed ratio,
+ but applicable also for non-final targets."""
+ discrete_width: DiscreteWidth
+ """Target relative width. Analogous to goal relative width,
+ but coarser for non-final targets."""
+ trial_duration: float
+ """Duration to use for trials for this target. Shorter trials have lesser
+ (and more complicated) impact when determining upper and lower bounds."""
+ duration_sum: float
+ """Sum of trial durations sufficient to classify a load
+ as an upper or lower bound.
+ For non-final targets, this is shorter than goal duration_sum."""
+ expansion_coefficient: int = field(repr=False)
+ """Equal and directly analogous to goal expansion coefficient,
+ but applicable also for non-final targets."""
+ fail_fast: bool = field(repr=False)
+ """Copied from goal. If true and min load is not an upper bound, raise."""
+ preceding: Optional[TargetSpec] = field(repr=False)
+ """Reference to next coarser target (if any) belonging to the same goal."""
+
+ # No conversions or validations, as this is an internal structure.
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lr={self.loss_ratio},er={self.exceed_ratio}"
+ f",ds={self.duration_sum}"
+ )
+
+ def with_preceding(self, preceding: Optional[TargetSpec]) -> TargetSpec:
+ """Create an equivalent instance but with different preceding field.
+
+ This is useful in initialization. Create semi-initialized targets
+ starting from final one, than add references in reversed order.
+
+ :param preceding: New value for preceding field, cannot be None.
+ :type preceding: Optional[TargetSpec]
+ :returns: Instance with the new value applied.
+ :rtype: TargetSpec
+ """
+ return TargetSpec(
+ loss_ratio=self.loss_ratio,
+ exceed_ratio=self.exceed_ratio,
+ discrete_width=self.discrete_width,
+ trial_duration=self.trial_duration,
+ duration_sum=self.duration_sum,
+ expansion_coefficient=self.expansion_coefficient,
+ fail_fast=self.fail_fast,
+ preceding=preceding,
+ )
diff --git a/resources/libraries/python/MLRsearch/target_stat.py b/resources/libraries/python/MLRsearch/target_stat.py
new file mode 100644
index 0000000000..18e1ff4161
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_stat.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStat class."""
+
+from dataclasses import dataclass, field
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .discrete_result import DiscreteResult
+
+
+@dataclass
+class TargetStat:
+ """Class for aggregating trial results for a single load and target.
+
+ Reference to the target is included for convenience.
+
+ The main usage is for load classification, done in estimates method.
+ If both estimates agree, the load is classified as either a lower bound
+ or an upper bound. For additional logic for dealing with loss inversion
+ see MeasurementDatabase.
+
+ Also, data needed for conditional throughput is gathered here,
+ exposed only as a pessimistic loss ratio
+ (as the load value is not stored here).
+ """
+
+ target: TargetSpec = field(repr=False)
+ """The target for which this instance is aggregating results."""
+ good_long: float = 0.0
+ """Sum of durations of long enough trials satisfying target loss ratio."""
+ bad_long: float = 0.0
+ """Sum of durations of long trials not satisfying target loss ratio."""
+ good_short: float = 0.0
+ """Sum of durations of shorter trials satisfying target loss ratio."""
+ bad_short: float = 0.0
+ """Sum of durations of shorter trials not satisfying target loss ratio."""
+ long_losses: Dict[float, float] = field(repr=False, default_factory=dict)
+ """If a loss ratio value occured in a long trial, map it to duration sum."""
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"gl={self.good_long},bl={self.bad_long}"
+ f",gs={self.good_short},bs={self.bad_short}"
+ )
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial result.
+
+ Use intended duration for deciding between long and short trials,
+ but use offered duation (with overheads) to increase the duration sums.
+
+ :param result: The trial result to add to the stats.
+ :type result: DiscreteResult
+ """
+ dwo = result.duration_with_overheads
+ rlr = result.loss_ratio
+ if result.intended_duration >= self.target.trial_duration:
+ if rlr not in self.long_losses:
+ self.long_losses[rlr] = 0.0
+ self.long_losses = dict(sorted(self.long_losses.items()))
+ self.long_losses[rlr] += dwo
+ if rlr > self.target.loss_ratio:
+ self.bad_long += dwo
+ else:
+ self.good_long += dwo
+ else:
+ if rlr > self.target.loss_ratio:
+ self.bad_short += dwo
+ else:
+ self.good_short += dwo
+
+ def estimates(self) -> Tuple[bool, bool]:
+ """Return whether this load can become a lower bound.
+
+ This returns two estimates, hence the weird nonverb name of this method.
+ One estimate assumes all following results will satisfy the loss ratio,
+ the other assumes all results will not satisfy the loss ratio.
+ The sum of durations of the assumed results
+ is the minimum to reach target duration sum, or zero if already reached.
+
+ If both estimates are the same, it means the load is a definite bound.
+ This may happen even when the sum of durations of already
+ measured trials is less than the target, when the missing measurements
+ cannot change the classification.
+
+ :returns: Tuple of two estimates whether the load can be a lower bound.
+ (True, False) means more trial results are needed.
+ :rtype: Tuple[bool, bool]
+ """
+ coeff = self.target.exceed_ratio
+ decrease = self.good_short * coeff / (1.0 - coeff)
+ short_excess = self.bad_short - decrease
+ effective_excess = self.bad_long + max(0.0, short_excess)
+ effective_dursum = max(
+ self.good_long + effective_excess,
+ self.target.duration_sum,
+ )
+ limit_dursum = effective_dursum * self.target.exceed_ratio
+ optimistic = effective_excess <= limit_dursum
+ pessimistic = (effective_dursum - self.good_long) <= limit_dursum
+ return optimistic, pessimistic
+
+ @property
+ def pessimistic_loss_ratio(self) -> float:
+ """Return the loss ratio for conditional throughput computation.
+
+ It adds missing dursum as full-loss trials to long_losses
+ and returns a quantile corresponding to exceed ratio.
+ In case of tie (as in median for even number of samples),
+ this returns the lower value (as being equal to goal exceed ratio
+ is allowed).
+
+ For loads classified as a lower bound, the return value
+ ends up being no larger than the target loss ratio.
+ This is because the excess short bad trials would only come
+ after the quantile in question (as would full-loss missing trials).
+ For other loads, anything can happen, but conditional throughput
+ should not be computed for those anyway.
+ Those two facts allow the logic here be simpler than in estimates().
+
+ :returns: Effective loss ratio based on long trial results.
+ :rtype: float
+ """
+ all_long = max(self.target.duration_sum, self.good_long + self.bad_long)
+ remaining = all_long * (1.0 - self.target.exceed_ratio)
+ ret = None
+ for ratio, dursum in self.long_losses.items():
+ if ret is None or remaining > 0.0:
+ ret = ratio
+ remaining -= dursum
+ else:
+ break
+ else:
+ if remaining > 0.0:
+ ret = 1.0
+ return ret
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/__init__.py b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
new file mode 100644
index 0000000000..034ae41819
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "trial_measurement".
+"""
+
+from .abstract_measurer import AbstractMeasurer
+from .measurement_result import MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
new file mode 100644
index 0000000000..6fab79c8dc
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining AbstractMeasurer class."""
+
+from abc import ABCMeta, abstractmethod
+
+from .measurement_result import MeasurementResult as Result
+
+
+class AbstractMeasurer(metaclass=ABCMeta):
+ """Abstract class defining common API for trial measurement providers.
+
+ The original use of this class was in the realm of
+ RFC 2544 Throughput search, which explains the teminology
+ related to networks, frames, packets, offered load, forwarding rate
+ and similar.
+
+ But the same logic can be used in higher level networking scenarios
+ (e.g. https requests) or even outside networking (database transactions).
+
+ The current code uses language from packet forwarding,
+ docstring sometimes mention transactions as an alternative view.
+ """
+
+ @abstractmethod
+ def measure(self, intended_duration: float, intended_load: float) -> Result:
+ """Perform trial measurement and return the result.
+
+ It is assumed the measurer got already configured with anything else
+ needed to perform the measurement (e.g. traffic profile
+ or transaction limit).
+
+ Duration and load are the only values expected to vary
+ during the search.
+
+ :param intended_duration: Intended trial duration [s].
+ :param intended_load: Intended rate of transactions (packets) [tps].
+ It is a per-port rate, e.g. uni-directional for SUTs
+ with two ports.
+ :type intended_duration: float
+ :type intended_load: float
+ :returns: Structure detailing the result of the measurement.
+ :rtype: measurement_result.MeasurementResult
+ """
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
new file mode 100644
index 0000000000..9dc1ccf5f1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementResult class."""
+
+from dataclasses import dataclass
+
+
+@dataclass
+class MeasurementResult:
+ """Structure defining the result of a single trial measurement.
+
+ There are few primary (required) quantities. Various secondary (derived)
+ quantities are calculated and can be queried.
+
+ The constructor allows broader argument types,
+ the post init function converts to the stricter types.
+
+ Integer quantities (counts) are preferred, as float values
+ can suffer from rounding errors, and sometimes they are measured
+ at unknown (possibly very limited) precision and accuracy.
+
+ There are relations between the counts (e.g. offered count
+ should be equal to a sum of forwarding count and loss count).
+ This implementation does not perform consistency checks, but uses them
+ for computing quantities the caller left unspecified.
+
+ In some cases, the units of intended load are different from units
+ of loss count (e.g. load in transactions but loss in packets).
+ Quantities with relative_ prefix can be used to get load candidates
+ from forwarding results.
+
+ Sometimes, the measurement provider is unable to reach the intended load,
+ and it can react by spending longer than intended duration
+ to reach its intended count. To signal irregular situations like this,
+ several optional fields can be given, and various secondary quantities
+ are populated, so the measurement consumer can query the quantity
+ it wants to rely on in these irregular situations.
+
+ The current implementation intentionally limits the secondary quantities
+ to the few that proved useful in practice.
+ """
+
+ # Required primary quantities.
+ intended_duration: float
+ """Intended trial measurement duration [s]."""
+ intended_load: float
+ """Intended load [tps]. If bidirectional (or multi-port) traffic is used,
+ most users will put unidirectional (single-port) value here,
+ as bandwidth and pps limits are usually per-port."""
+ # Two of the next three primary quantities are required.
+ offered_count: int = None
+ """Number of packets actually transmitted (transactions attempted).
+ This should be the aggregate (bidirectional, multi-port) value,
+ so that asymmetric trafic profiles are supported."""
+ loss_count: int = None
+ """Number of packets transmitted but not received (transactions failed)."""
+ forwarding_count: int = None
+ """Number of packets successfully forwarded (transactions succeeded)."""
+ # Optional primary quantities.
+ offered_duration: float = None
+ """Estimate of the time [s] the trial was actually transmitting traffic."""
+ duration_with_overheads: float = None
+ """Estimate of the time [s] it took to get the trial result
+ since the measurement started."""
+ intended_count: int = None
+ """Expected number of packets to transmit. If not known,
+ the value of offered_count is used."""
+
+ def __post_init__(self) -> None:
+ """Convert types, compute missing values.
+
+ Current caveats:
+ A failing assumption looks like a conversion error.
+ Negative counts are allowed, which can lead to errors later.
+ """
+ self.intended_duration = float(self.intended_duration)
+ if self.offered_duration is None:
+ self.offered_duration = self.intended_duration
+ else:
+ self.offered_duration = float(self.offered_duration)
+ if self.duration_with_overheads is None:
+ self.duration_with_overheads = self.offered_duration
+ else:
+ self.duration_with_overheads = float(self.duration_with_overheads)
+ self.intended_load = float(self.intended_load)
+ if self.forwarding_count is None:
+ self.forwarding_count = int(self.offered_count) - int(
+ self.loss_count
+ )
+ else:
+ self.forwarding_count = int(self.forwarding_count)
+ if self.offered_count is None:
+ self.offered_count = self.forwarding_count + int(self.loss_count)
+ else:
+ self.offered_count = int(self.offered_count)
+ if self.loss_count is None:
+ self.loss_count = self.offered_count - self.forwarding_count
+ else:
+ self.loss_count = int(self.loss_count)
+ if self.intended_count is None:
+ self.intended_count = self.offered_count
+ else:
+ self.intended_count = int(self.intended_count)
+ # TODO: Handle (somehow) situations where offered > intended?
+
+ @property
+ def unsent_count(self) -> int:
+ """How many packets were not transmitted (transactions not started).
+
+ :return: Intended count minus offered count.
+ :rtype: int
+ """
+ return self.intended_count - self.offered_count
+
+ @property
+ def loss_ratio(self) -> float:
+ """Bad count divided by overall count, zero if the latter is zero.
+
+ The bad count includes not only loss count, but also unsent count.
+ If unsent count is negative, its absolute value is used.
+ The overall count is intended count or offered count,
+ whichever is bigger.
+
+ Together, the resulting formula tends to increase loss ratio
+ (but not above 100%) in irregular situations,
+ thus guiding search algorithms towards lower loads
+ where there should be less irregularities.
+ The zero default is there to prevent search algorithms from
+ getting stuck on a too low intended load.
+
+ :returns: Bad count divided by overall count.
+ :rtype: float
+ """
+ overall = max(self.offered_count, self.intended_count)
+ bad = abs(self.loss_count) + abs(self.unsent_count)
+ return bad / overall if overall else 0.0
+
+ @property
+ def relative_forwarding_rate(self) -> float:
+ """Forwarding rate in load units as if duration and load was intended.
+
+ The result is based purely on intended load and loss ratio.
+ While the resulting value may be far from what really happened,
+ it has nice behavior with respect to common assumptions
+ of search algorithms.
+
+ :returns: Forwarding rate in load units estimated from loss ratio.
+ :rtype: float
+ """
+ return self.intended_load * (1.0 - self.loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/trimmed_stat.py b/resources/libraries/python/MLRsearch/trimmed_stat.py
new file mode 100644
index 0000000000..74918d78b0
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trimmed_stat.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TrimmedStat class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .load_stats import LoadStats
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TrimmedStat(LoadStats):
+ """Load stats trimmed to a single target.
+
+ Useful mainly for reporting the overall results.
+ """
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there is one target to track."""
+ super().__post_init__()
+ if len(self.target_to_stat) != 1:
+ raise ValueError(f"No single target: {self.target_to_stat!r}")
+
+ @staticmethod
+ def for_target(stats: LoadStats, target: TargetSpec) -> TrimmedStat:
+ """Return new instance with only one target in the mapping.
+
+ :param stats: The load stats instance to trim.
+ :param target: The one target which should remain in the mapping.
+ :type stats: LoadStats
+ :type target: TargetSpec
+ :return: Newly created instance.
+ :rtype: TrimmedStat
+ """
+ return TrimmedStat(
+ rounding=stats.rounding,
+ int_load=stats.int_load,
+ target_to_stat={target: stats.target_to_stat[target]},
+ )
diff --git a/resources/libraries/python/Memif.py b/resources/libraries/python/Memif.py
index d38f5000a7..32096d0ca5 100644
--- a/resources/libraries/python/Memif.py
+++ b/resources/libraries/python/Memif.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -74,7 +74,7 @@ class Memif:
includes only retval.
:rtype: dict
"""
- cmd = u"memif_socket_filename_add_del"
+ cmd = u"memif_socket_filename_add_del_v2"
err_msg = f"Failed to create memif socket on host {node[u'host']}"
args = dict(
is_add=is_add,
@@ -85,7 +85,7 @@ class Memif:
return papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def _memif_create(node, mid, sid, rxq=1, txq=1, role=1):
+ def _memif_create(node, mid, sid, rxq=1, txq=1, role=1, use_dma=False):
"""Create Memif interface on the given node, return its sw_if_index.
:param node: Given node to create Memif interface on.
@@ -94,16 +94,18 @@ class Memif:
:param rxq: Number of RX queues; 0 means do not set.
:param txq: Number of TX queues; 0 means do not set.
:param role: Memif interface role [master=0|slave=1]. Default is slave.
+ :param use_dma: Use DMA acceleration. Requires hardware support.
:type node: dict
:type mid: str
:type sid: str
:type rxq: int
:type txq: int
:type role: int
+ :type use_dma: bool
:returns: sw_if_index
:rtype: int
"""
- cmd = u"memif_create"
+ cmd = u"memif_create_v2"
err_msg = f"Failed to create memif interface on host {node[u'host']}"
args = dict(
role=role,
@@ -111,7 +113,8 @@ class Memif:
tx_queues=int(txq),
socket_id=int(sid),
id=int(mid),
- secret=u""
+ secret=u"",
+ use_dma=use_dma,
)
with PapiSocketExecutor(node) as papi_exec:
@@ -119,7 +122,8 @@ class Memif:
@staticmethod
def create_memif_interface(
- node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE"):
+ node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE", use_dma=False
+ ):
"""Create Memif interface on the given node.
:param node: Given node to create Memif interface on.
@@ -129,6 +133,7 @@ class Memif:
:param rxq: Number of RX queues; 0 means do not set.
:param txq: Number of TX queues; 0 means do not set.
:param role: Memif interface role [master=0|slave=1]. Default is master.
+ :param use_dma: Use DMA acceleration. Requires hardware support.
:type node: dict
:type filename: str
:type mid: str
@@ -136,6 +141,7 @@ class Memif:
:type rxq: int
:type txq: int
:type role: str
+ :type use_dma: bool
:returns: SW interface index.
:rtype: int
:raises ValueError: If command 'create memif' fails.
@@ -147,7 +153,7 @@ class Memif:
# Create memif
sw_if_index = Memif._memif_create(
- node, mid, sid, rxq=rxq, txq=txq, role=role
+ node, mid, sid, rxq=rxq, txq=txq, role=role, use_dma=use_dma
)
# Update Topology
diff --git a/resources/libraries/python/NATUtil.py b/resources/libraries/python/NATUtil.py
index 5d3d131c80..e5f530ab46 100644
--- a/resources/libraries/python/NATUtil.py
+++ b/resources/libraries/python/NATUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -182,10 +182,9 @@ class NATUtil:
"""Delete and re-add the NAT range setting."""
with PapiSocketExecutor(node) as papi_exec:
args_in[u"is_add"] = False
- papi_exec.add(cmd, **args_in)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
args_in[u"is_add"] = True
- papi_exec.add(cmd, **args_in)
- papi_exec.get_replies(err_msg)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
return resetter
@@ -427,10 +426,9 @@ class NATUtil:
"""Delete and re-add the deterministic NAT mapping."""
with PapiSocketExecutor(node) as papi_exec:
args_in[u"is_add"] = False
- papi_exec.add(cmd, **args_in)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
args_in[u"is_add"] = True
- papi_exec.add(cmd, **args_in)
- papi_exec.get_replies(err_msg)
+ papi_exec.add(cmd, **args_in).get_reply(err_msg)
return resetter
diff --git a/resources/libraries/python/NGINX/NGINXTools.py b/resources/libraries/python/NGINX/NGINXTools.py
index 9418484f15..941fe733e7 100644
--- a/resources/libraries/python/NGINX/NGINXTools.py
+++ b/resources/libraries/python/NGINX/NGINXTools.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Intel and/or its affiliates.
+# Copyright (c) 2022 Intel and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -74,21 +74,15 @@ class NGINXTools:
:type nginx_version: str
:raises RuntimeError: If command returns nonzero return code.
"""
- nginx_path = f"{pkg_dir}/nginx-{nginx_version}/sbin/nginx"
- cmd_options = NginxUtil.get_cmd_options(path=nginx_path)
- ret_code, _, stderr = exec_cmd(node, cmd_options, sudo=True)
- if nginx_version in stderr and ret_code == 0:
- logger.info(f"NGINX Version: {stderr}")
+ cmd = f"test -f {pkg_dir}/nginx-{nginx_version}/sbin/nginx"
+ ret_code, _, _ = exec_cmd(node, cmd, sudo=True)
+ if ret_code == 0:
return
command = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}" \
f"/entry/install_nginx.sh nginx-{nginx_version}"
message = u"Install the NGINX failed!"
exec_cmd_no_error(node, command, sudo=True, timeout=600,
message=message)
- _, stderr = exec_cmd_no_error(node, cmd_options, sudo=True,
- message=message)
-
- logger.info(f"NGINX Version: {stderr}")
@staticmethod
def install_vsap_nginx_on_dut(node, pkg_dir):
diff --git a/resources/libraries/python/NodePath.py b/resources/libraries/python/NodePath.py
index dd68506914..5b445bc593 100644
--- a/resources/libraries/python/NodePath.py
+++ b/resources/libraries/python/NodePath.py
@@ -243,8 +243,11 @@ class NodePath:
:raises RuntimeError: If unsupported combination of parameters.
"""
t_dict = dict()
+ t_dict[u"hosts"] = set()
if topo_has_dut:
duts = [key for key in nodes if u"DUT" in key]
+ for host in [nodes[dut][u"host"] for dut in duts]:
+ t_dict[u"hosts"].add(host)
t_dict[u"duts"] = duts
t_dict[u"duts_count"] = len(duts)
t_dict[u"int"] = u"pf"
@@ -259,6 +262,7 @@ class NodePath:
for dut in duts:
self.append_node(nodes[dut], filter_list=filter_list)
if topo_has_tg:
+ t_dict[u"hosts"].add(nodes[u"TG"][u"host"])
if topo_has_dut:
self.append_node(nodes[u"TG"])
else:
diff --git a/resources/libraries/python/PLRsearch/Integrator.py b/resources/libraries/python/PLRsearch/Integrator.py
index a7a59391ed..cc8f838fe6 100644
--- a/resources/libraries/python/PLRsearch/Integrator.py
+++ b/resources/libraries/python/PLRsearch/Integrator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -189,12 +189,15 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False):
:raises numpy.linalg.LinAlgError: If the focus shape gets singular
(due to rounding errors). Try changing scale_coeff.
"""
- debug_list = list()
- trace_list = list()
+ debug_list = []
+ trace_list = []
# Block until input object appears.
- dimension, dilled_function, param_focus_tracker, max_samples = (
- communication_pipe.recv()
- )
+ (
+ dimension,
+ dilled_function,
+ param_focus_tracker,
+ max_samples,
+ ) = communication_pipe.recv()
debug_list.append(
f"Called with param_focus_tracker {param_focus_tracker!r}"
)
@@ -237,39 +240,47 @@ def estimate_nd(communication_pipe, scale_coeff=8.0, trace_enabled=False):
if max_samples and samples >= max_samples:
break
sample_point = generate_sample(
- param_focus_tracker.averages, param_focus_tracker.covariance_matrix,
- dimension, scale_coeff
+ param_focus_tracker.averages,
+ param_focus_tracker.covariance_matrix,
+ dimension,
+ scale_coeff,
)
- trace(u"sample_point", sample_point)
+ trace("sample_point", sample_point)
samples += 1
- trace(u"samples", samples)
+ trace("samples", samples)
value, log_weight = value_logweight_function(trace, *sample_point)
- trace(u"value", value)
- trace(u"log_weight", log_weight)
- trace(u"focus tracker before adding", param_focus_tracker)
+ trace("value", value)
+ trace("log_weight", log_weight)
+ trace("focus tracker before adding", param_focus_tracker)
# Update focus related statistics.
param_distance = param_focus_tracker.add_without_dominance_get_distance(
sample_point, log_weight
)
# The code above looked at weight (not importance).
# The code below looks at importance (not weight).
- log_rarity = param_distance / 2.0
- trace(u"log_rarity", log_rarity)
+ log_rarity = param_distance / 2.0 / scale_coeff
+ trace("log_rarity", log_rarity)
log_importance = log_weight + log_rarity
- trace(u"log_importance", log_importance)
+ trace("log_importance", log_importance)
value_tracker.add(value, log_importance)
# Update sampled statistics.
param_sampled_tracker.add_get_shift(sample_point, log_importance)
debug_list.append(f"integrator used {samples!s} samples")
debug_list.append(
- u" ".join([
- u"value_avg", str(value_tracker.average),
- u"param_sampled_avg", repr(param_sampled_tracker.averages),
- u"param_sampled_cov", repr(param_sampled_tracker.covariance_matrix),
- u"value_log_variance", str(value_tracker.log_variance),
- u"value_log_secondary_variance",
- str(value_tracker.secondary.log_variance)
- ])
+ " ".join(
+ [
+ "value_avg",
+ str(value_tracker.average),
+ "param_sampled_avg",
+ repr(param_sampled_tracker.averages),
+ "param_sampled_cov",
+ repr(param_sampled_tracker.covariance_matrix),
+ "value_log_variance",
+ str(value_tracker.log_variance),
+ "value_log_secondary_variance",
+ str(value_tracker.secondary.log_variance),
+ ]
+ )
)
communication_pipe.send(
(value_tracker, param_focus_tracker, debug_list, trace_list, samples)
diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py
index 0e78cc936d..326aa2e2d2 100644
--- a/resources/libraries/python/PLRsearch/PLRsearch.py
+++ b/resources/libraries/python/PLRsearch/PLRsearch.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -53,8 +53,14 @@ class PLRsearch:
log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10)))
def __init__(
- self, measurer, trial_duration_per_trial, packet_loss_ratio_target,
- trial_number_offset=0, timeout=7200.0, trace_enabled=False):
+ self,
+ measurer,
+ trial_duration_per_trial,
+ packet_loss_ratio_target,
+ trial_number_offset=0,
+ timeout=7200.0,
+ trace_enabled=False,
+ ):
"""Store rate measurer and additional parameters.
The measurer must never report negative loss count.
@@ -176,7 +182,7 @@ class PLRsearch:
f"Started search with min_rate {min_rate!r}, "
f"max_rate {max_rate!r}"
)
- trial_result_list = list()
+ trial_result_list = []
trial_number = self.trial_number_offset
focus_trackers = (None, None)
transmit_rate = (min_rate + max_rate) / 2.0
@@ -186,34 +192,54 @@ class PLRsearch:
trial_number += 1
logging.info(f"Trial {trial_number!r}")
results = self.measure_and_compute(
- self.trial_duration_per_trial * trial_number, transmit_rate,
- trial_result_list, min_rate, max_rate, focus_trackers
+ self.trial_duration_per_trial * trial_number,
+ transmit_rate,
+ trial_result_list,
+ min_rate,
+ max_rate,
+ focus_trackers,
)
measurement, average, stdev, avg1, avg2, focus_trackers = results
+ # Workaround for unsent packets and other anomalies.
+ measurement.plr_loss_count = min(
+ measurement.intended_count,
+ int(measurement.intended_count * measurement.loss_ratio + 0.9),
+ )
+ logging.debug(
+ f"loss ratio {measurement.plr_loss_count}"
+ f" / {measurement.intended_count}"
+ )
zeros += 1
# TODO: Ratio of fill rate to drain rate seems to have
# exponential impact. Make it configurable, or is 4:3 good enough?
- if measurement.loss_ratio >= self.packet_loss_ratio_target:
+ if measurement.plr_loss_count >= (
+ measurement.intended_count * self.packet_loss_ratio_target
+ ):
for _ in range(4 * zeros):
- lossy_loads.append(measurement.target_tr)
- if measurement.loss_count > 0:
+ lossy_loads.append(measurement.intended_load)
+ lossy_loads.sort()
zeros = 0
- lossy_loads.sort()
+ logging.debug("High enough loss, lossy loads added.")
+ else:
+ logging.debug(
+ f"Not a high loss, zero counter bumped to {zeros}."
+ )
if stop_time <= time.time():
return average, stdev
trial_result_list.append(measurement)
if (trial_number - self.trial_number_offset) <= 1:
next_load = max_rate
elif (trial_number - self.trial_number_offset) <= 3:
- next_load = (measurement.relative_receive_rate / (
- 1.0 - self.packet_loss_ratio_target))
+ next_load = measurement.relative_forwarding_rate / (
+ 1.0 - self.packet_loss_ratio_target
+ )
else:
next_load = (avg1 + avg2) / 2.0
if zeros > 0:
if lossy_loads[0] > next_load:
diminisher = math.pow(2.0, 1 - zeros)
next_load = lossy_loads[0] + diminisher * next_load
- next_load /= (1.0 + diminisher)
+ next_load /= 1.0 + diminisher
# On zero measurement, we need to drain obsoleted low losses
# even if we did not use them to increase next_load,
# in order to get to usable loses at higher loads.
@@ -263,22 +289,22 @@ class PLRsearch:
# TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization
chi = (load - mrr) / spread
chi0 = -mrr / spread
- trace(u"stretch: load", load)
- trace(u"mrr", mrr)
- trace(u"spread", spread)
- trace(u"chi", chi)
- trace(u"chi0", chi0)
+ trace("stretch: load", load)
+ trace("mrr", mrr)
+ trace("spread", spread)
+ trace("chi", chi)
+ trace("chi0", chi0)
if chi > 0:
log_lps = math.log(
load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread
)
- trace(u"big loss direct log_lps", log_lps)
+ trace("big loss direct log_lps", log_lps)
else:
two_positive = log_plus(chi, 2 * chi0 - log_2)
two_negative = log_plus(chi0, 2 * chi - log_2)
if two_positive <= two_negative:
log_lps = log_minus(chi, chi0) + log_spread
- trace(u"small loss crude log_lps", log_lps)
+ trace("small loss crude log_lps", log_lps)
return log_lps
two = log_minus(two_positive, two_negative)
three_positive = log_plus(two_positive, 3 * chi - log_3)
@@ -286,11 +312,11 @@ class PLRsearch:
three = log_minus(three_positive, three_negative)
if two == three:
log_lps = two + log_spread
- trace(u"small loss approx log_lps", log_lps)
+ trace("small loss approx log_lps", log_lps)
else:
log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0))
log_lps += log_spread
- trace(u"small loss direct log_lps", log_lps)
+ trace("small loss direct log_lps", log_lps)
return log_lps
@staticmethod
@@ -329,26 +355,26 @@ class PLRsearch:
# TODO: The stretch sign is just to have less minuses. Worth changing?
chi = (mrr - load) / spread
chi0 = mrr / spread
- trace(u"Erf: load", load)
- trace(u"mrr", mrr)
- trace(u"spread", spread)
- trace(u"chi", chi)
- trace(u"chi0", chi0)
+ trace("Erf: load", load)
+ trace("mrr", mrr)
+ trace("spread", spread)
+ trace("chi", chi)
+ trace("chi0", chi0)
if chi >= -1.0:
- trace(u"positive, b roughly bigger than m", None)
+ trace("positive, b roughly bigger than m", None)
if chi > math.exp(10):
first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10)
- trace(u"approximated first", first)
+ trace("approximated first", first)
else:
first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi))
- trace(u"exact first", first)
+ trace("exact first", first)
first -= chi * chi
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = log_minus(first, second)
- trace(u"first", first)
+ trace("first", first)
else:
- trace(u"negative, b roughly smaller than m", None)
+ trace("negative, b roughly smaller than m", None)
exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi)
exp_first *= math.exp(-chi * chi)
exp_first -= 2 * chi
@@ -359,17 +385,17 @@ class PLRsearch:
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = math.log(exp_first - math.exp(second))
- trace(u"exp_first", exp_first)
- trace(u"second", second)
- trace(u"intermediate", intermediate)
+ trace("exp_first", exp_first)
+ trace("second", second)
+ trace("intermediate", intermediate)
result = intermediate + math.log(spread) - math.log(erfc(-chi0))
- trace(u"result", result)
+ trace("result", result)
return result
@staticmethod
def find_critical_rate(
- trace, lfit_func, min_rate, max_rate, loss_ratio_target,
- mrr, spread):
+ trace, lfit_func, min_rate, max_rate, loss_ratio_target, mrr, spread
+ ):
"""Given ratio target and parameters, return the achieving offered load.
This is basically an inverse function to lfit_func
@@ -411,12 +437,12 @@ class PLRsearch:
loss_rate = math.exp(lfit_func(trace, rate, mrr, spread))
loss_ratio = loss_rate / rate
if loss_ratio > loss_ratio_target:
- trace(u"halving down", rate)
+ trace("halving down", rate)
rate_hi = rate
elif loss_ratio < loss_ratio_target:
- trace(u"halving up", rate)
+ trace("halving up", rate)
rate_lo = rate
- trace(u"found", rate)
+ trace("found", rate)
return rate
@staticmethod
@@ -441,7 +467,7 @@ class PLRsearch:
Instead, the expected average loss is scaled according to the number
of packets actually sent.
- TODO: Copy ReceiveRateMeasurement from MLRsearch.
+ TODO: Copy MeasurementResult from MLRsearch.
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
@@ -450,40 +476,47 @@ class PLRsearch:
:param spread: The spread parameter for the fitting function.
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type mrr: float
:type spread: float
:returns: Logarithm of result weight for given function and parameters.
:rtype: float
"""
log_likelihood = 0.0
- trace(u"log_weight for mrr", mrr)
- trace(u"spread", spread)
+ trace("log_weight for mrr", mrr)
+ trace("spread", spread)
for result in trial_result_list:
- trace(u"for tr", result.target_tr)
- trace(u"lc", result.loss_count)
- trace(u"d", result.duration)
- # _rel_ values use units of target_tr (transactions per second).
+ trace("for tr", result.intended_load)
+ trace("plc", result.plr_loss_count)
+ trace("d", result.intended_duration)
+ # _rel_ values use units of intended_load (transactions per second).
log_avg_rel_loss_per_second = lfit_func(
- trace, result.target_tr, mrr, spread
+ trace, result.intended_load, mrr, spread
)
# _abs_ values use units of loss count (maybe packets).
# There can be multiple packets per transaction.
log_avg_abs_loss_per_trial = log_avg_rel_loss_per_second + math.log(
- result.transmit_count / result.target_tr
+ result.offered_count / result.intended_load
)
# Geometric probability computation for logarithms.
log_trial_likelihood = log_plus(0.0, -log_avg_abs_loss_per_trial)
- log_trial_likelihood *= -result.loss_count
+ log_trial_likelihood *= -result.plr_loss_count
log_trial_likelihood -= log_plus(0.0, +log_avg_abs_loss_per_trial)
log_likelihood += log_trial_likelihood
- trace(u"avg_loss_per_trial", math.exp(log_avg_abs_loss_per_trial))
- trace(u"log_trial_likelihood", log_trial_likelihood)
+ trace("avg_loss_per_trial", math.exp(log_avg_abs_loss_per_trial))
+ trace("log_trial_likelihood", log_trial_likelihood)
return log_likelihood
def measure_and_compute(
- self, trial_duration, transmit_rate, trial_result_list,
- min_rate, max_rate, focus_trackers=(None, None), max_samples=None):
+ self,
+ trial_duration,
+ transmit_rate,
+ trial_result_list,
+ min_rate,
+ max_rate,
+ focus_trackers=(None, None),
+ max_samples=None,
+ ):
"""Perform both measurement and computation at once.
High level steps: Prepare and launch computation worker processes,
@@ -524,7 +557,7 @@ class PLRsearch:
:param max_samples: Limit for integrator samples, for debugging.
:type trial_duration: float
:type transmit_rate: float
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type min_rate: float
:type max_rate: float
:type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker
@@ -572,7 +605,7 @@ class PLRsearch:
# See https://stackoverflow.com/questions/15137292/large-objects-and-multiprocessing-pipes-and-send
worker = multiprocessing.Process(
target=Integrator.try_estimate_nd,
- args=(worker_pipe_end, 10.0, self.trace_enabled)
+ args=(worker_pipe_end, 5.0, self.trace_enabled),
)
worker.daemon = True
worker.start()
@@ -616,8 +649,13 @@ class PLRsearch:
)
value = math.log(
self.find_critical_rate(
- trace, fitting_function, min_rate, max_rate,
- self.packet_loss_ratio_target, mrr, spread
+ trace,
+ fitting_function,
+ min_rate,
+ max_rate,
+ self.packet_loss_ratio_target,
+ mrr,
+ spread,
)
)
return value, logweight
@@ -664,14 +702,18 @@ class PLRsearch:
raise RuntimeError(f"Worker {name} did not finish!")
result_or_traceback = pipe.recv()
try:
- value_tracker, focus_tracker, debug_list, trace_list, sampls = (
- result_or_traceback
- )
- except ValueError:
+ (
+ value_tracker,
+ focus_tracker,
+ debug_list,
+ trace_list,
+ sampls,
+ ) = result_or_traceback
+ except ValueError as exc:
raise RuntimeError(
f"Worker {name} failed with the following traceback:\n"
f"{result_or_traceback}"
- )
+ ) from exc
logging.info(f"Logs from worker {name!r}:")
for message in debug_list:
logging.info(message)
@@ -682,8 +724,8 @@ class PLRsearch:
)
return _PartialResult(value_tracker, focus_tracker, sampls)
- stretch_result = stop_computing(u"stretch", stretch_pipe)
- erf_result = stop_computing(u"erf", erf_pipe)
+ stretch_result = stop_computing("stretch", stretch_pipe)
+ erf_result = stop_computing("erf", erf_pipe)
result = PLRsearch._get_result(measurement, stretch_result, erf_result)
logging.info(
f"measure_and_compute finished with trial result "
@@ -705,7 +747,7 @@ class PLRsearch:
:param measurement: The trial measurement obtained during computation.
:param stretch_result: Computation output for stretch fitting function.
:param erf_result: Computation output for erf fitting function.
- :type measurement: ReceiveRateMeasurement
+ :type measurement: MeasurementResult
:type stretch_result: _PartialResult
:type erf_result: _PartialResult
:returns: Combined results.
@@ -730,7 +772,7 @@ class PLRsearch:
# Named tuples, for multiple local variables to be passed as return value.
_PartialResult = namedtuple(
- u"_PartialResult", u"value_tracker focus_tracker samples"
+ "_PartialResult", "value_tracker focus_tracker samples"
)
"""Two stat trackers and sample counter.
@@ -743,8 +785,8 @@ _PartialResult = namedtuple(
"""
_ComputeResult = namedtuple(
- u"_ComputeResult",
- u"measurement avg stdev stretch_exp_avg erf_exp_avg trackers"
+ "_ComputeResult",
+ "measurement avg stdev stretch_exp_avg erf_exp_avg trackers",
)
"""Measurement, 4 computation result values, pair of trackers.
@@ -754,7 +796,7 @@ _ComputeResult = namedtuple(
:param stretch_exp_avg: Stretch fitting function estimate average exponentiated.
:param erf_exp_avg: Erf fitting function estimate average, exponentiated.
:param trackers: Pair of focus trackers to start next iteration with.
-:type measurement: ReceiveRateMeasurement
+:type measurement: MeasurementResult
:type avg: float
:type stdev: float
:type stretch_exp_avg: float
diff --git a/resources/libraries/python/PLRsearch/log_plus.py b/resources/libraries/python/PLRsearch/log_plus.py
index 8ede2909c6..aabefdb5be 100644
--- a/resources/libraries/python/PLRsearch/log_plus.py
+++ b/resources/libraries/python/PLRsearch/log_plus.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -76,14 +76,14 @@ def log_minus(first, second):
:raises RuntimeError: If the difference would be non-positive.
"""
if first is None:
- raise RuntimeError(u"log_minus: does not support None first")
+ raise RuntimeError("log_minus: does not support None first")
if second is None:
return first
if second >= first:
- raise RuntimeError(u"log_minus: first has to be bigger than second")
+ raise RuntimeError("log_minus: first has to be bigger than second")
factor = -math.expm1(second - first)
if factor <= 0.0:
- msg = u"log_minus: non-positive number to log"
+ msg = "log_minus: non-positive number to log"
else:
return first + math.log(factor)
raise RuntimeError(msg)
diff --git a/resources/libraries/python/PLRsearch/stat_trackers.py b/resources/libraries/python/PLRsearch/stat_trackers.py
index e0b21dc3a9..e598fd840e 100644
--- a/resources/libraries/python/PLRsearch/stat_trackers.py
+++ b/resources/libraries/python/PLRsearch/stat_trackers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -64,8 +64,10 @@ class ScalarStatTracker:
:returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r}," \
+ return (
+ f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r},"
f"average={self.average!r},log_variance={self.log_variance!r})"
+ )
def copy(self):
"""Return new ScalarStatTracker instance with the same state as self.
@@ -110,7 +112,8 @@ class ScalarStatTracker:
if absolute_shift > 0.0:
log_square_shift = 2 * math.log(absolute_shift)
log_variance = log_plus(
- log_variance, log_square_shift + log_sample_ratio)
+ log_variance, log_square_shift + log_sample_ratio
+ )
if log_variance is not None:
log_variance += old_log_sum_weight - new_log_sum_weight
self.log_sum_weight = new_log_sum_weight
@@ -133,10 +136,17 @@ class ScalarDualStatTracker(ScalarStatTracker):
One typical use is for Monte Carlo integrator to decide whether
the partial sums so far are reliable enough.
"""
+
def __init__(
- self, log_sum_weight=None, average=0.0, log_variance=None,
- log_sum_secondary_weight=None, secondary_average=0.0,
- log_secondary_variance=None, max_log_weight=None):
+ self,
+ log_sum_weight=None,
+ average=0.0,
+ log_variance=None,
+ log_sum_secondary_weight=None,
+ secondary_average=0.0,
+ log_secondary_variance=None,
+ max_log_weight=None,
+ ):
"""Initialize new tracker instance, empty by default.
:param log_sum_weight: Natural logarithm of sum of weights
@@ -177,12 +187,14 @@ class ScalarDualStatTracker(ScalarStatTracker):
:rtype: str
"""
sec = self.secondary
- return f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"\
- f"average={self.average!r},log_variance={self.log_variance!r}," \
- f"log_sum_secondary_weight={sec.log_sum_weight!r}," \
- f"secondary_average={sec.average!r}," \
- f"log_secondary_variance={sec.log_variance!r}," \
+ return (
+ f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"
+ f"average={self.average!r},log_variance={self.log_variance!r},"
+ f"log_sum_secondary_weight={sec.log_sum_weight!r},"
+ f"secondary_average={sec.average!r},"
+ f"log_secondary_variance={sec.log_variance!r},"
f"max_log_weight={self.max_log_weight!r})"
+ )
def add(self, scalar_value, log_weight=0.0):
"""Return updated both stats after addition of another sample.
@@ -197,7 +209,7 @@ class ScalarDualStatTracker(ScalarStatTracker):
"""
# Using super() as copy() and add() are not expected to change
# signature, so this way diamond inheritance will be supported.
- primary = super(ScalarDualStatTracker, self)
+ primary = super()
if self.max_log_weight is None or log_weight >= self.max_log_weight:
self.max_log_weight = log_weight
self.secondary = primary.copy()
@@ -242,8 +254,12 @@ class VectorStatTracker:
"""
def __init__(
- self, dimension=2, log_sum_weight=None, averages=None,
- covariance_matrix=None):
+ self,
+ dimension=2,
+ log_sum_weight=None,
+ averages=None,
+ covariance_matrix=None,
+ ):
"""Initialize new tracker instance, two-dimensional empty by default.
If any of latter two arguments is None, it means
@@ -272,10 +288,12 @@ class VectorStatTracker:
:returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return f"VectorStatTracker(dimension={self.dimension!r}," \
- f"log_sum_weight={self.log_sum_weight!r}," \
- f"averages={self.averages!r}," \
+ return (
+ f"VectorStatTracker(dimension={self.dimension!r},"
+ f"log_sum_weight={self.log_sum_weight!r},"
+ f"averages={self.averages!r},"
f"covariance_matrix={self.covariance_matrix!r})"
+ )
def copy(self):
"""Return new instance with the same state as self.
@@ -287,8 +305,10 @@ class VectorStatTracker:
:rtype: VectorStatTracker
"""
return VectorStatTracker(
- self.dimension, self.log_sum_weight, self.averages[:],
- copy.deepcopy(self.covariance_matrix)
+ self.dimension,
+ self.log_sum_weight,
+ self.averages[:],
+ copy.deepcopy(self.covariance_matrix),
)
def reset(self):
diff --git a/resources/libraries/python/PapiExecutor.py b/resources/libraries/python/PapiExecutor.py
index ecee70c9c5..a55638ab7c 100644
--- a/resources/libraries/python/PapiExecutor.py
+++ b/resources/libraries/python/PapiExecutor.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,19 +12,21 @@
# limitations under the License.
"""Python API executor library.
+
+TODO: Document sync and async handling properly.
"""
import copy
import glob
import json
+import logging
import shutil
import struct # vpp-papi can raise struct.error
import subprocess
import sys
import tempfile
import time
-from collections import UserDict
-
+from collections import deque, UserDict
from pprint import pformat
from robot.api import logger
@@ -34,15 +36,19 @@ from resources.libraries.python.LocalExecution import run
from resources.libraries.python.FilteredLogger import FilteredLogger
from resources.libraries.python.PapiHistory import PapiHistory
from resources.libraries.python.ssh import (
- SSH, SSHTimeout, exec_cmd_no_error, scp_node)
+ SSH,
+ SSHTimeout,
+ exec_cmd_no_error,
+ scp_node,
+)
from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppApiCrc import VppApiCrcChecker
__all__ = [
- u"PapiExecutor",
- u"PapiSocketExecutor",
- u"Disconnector",
+ "PapiExecutor",
+ "PapiSocketExecutor",
+ "Disconnector",
]
@@ -65,47 +71,50 @@ def dictize(obj):
:param obj: Arbitrary object to dictize.
:type obj: object
:returns: Dictized object.
- :rtype: same as obj type or collections.OrderedDict
+ :rtype: same as obj type or collections.UserDict
"""
- if not hasattr(obj, u"_asdict"):
+ if not hasattr(obj, "_asdict"):
return obj
overriden = UserDict(obj._asdict())
old_get = overriden.__getitem__
- new_get = lambda self, key: dictize(old_get(self, key))
- overriden.__getitem__ = new_get
+ overriden.__getitem__ = lambda self, key: dictize(old_get(self, key))
return overriden
-class PapiSocketExecutor:
- """Methods for executing VPP Python API commands on forwarded socket.
+def dictize_and_check_retval(obj, err_msg):
+ """Make namedtuple-like object accessible as dict, check retval if exists.
- Previously, we used an implementation with single client instance
- and connection being handled by a resource manager.
- On "with" statement, the instance connected, and disconnected
- on exit from the "with" block.
- This was limiting (no nested with blocks) and mainly it was slow:
- 0.7 seconds per disconnect cycle on Skylake, more than 3 second on Taishan.
+ If the object contains "retval" field, raise when the value is non-zero.
- The currently used implementation caches the connected client instances,
- providing speedup and making "with" blocks unnecessary.
- But with many call sites, "with" blocks are still the main usage pattern.
- Documentation still lists that as the intended pattern.
+ See dictize() for what it means to dictize.
- As a downside, clients need to be explicitly told to disconnect
- before VPP restart.
- There is some amount of retries and disconnects on disconnect
- (so unresponsive VPPs do not breach test much more than needed),
- but it is hard to verify all that works correctly.
- Especially, if Robot crashes, files and ssh processes may leak.
+ :param obj: Arbitrary object to dictize.
+ :param err_msg: The (additional) text for the raised exception.
+ :type obj: object
+ :type err_msg: str
+ :returns: Dictized object.
+ :rtype: same as obj type or collections.UserDict
+ :raises AssertionError: If retval field is present with nonzero value.
+ """
+ ret = dictize(obj)
+ # *_details messages do not contain retval.
+ retval = ret.get("retval", 0)
+ if retval != 0:
+ raise AssertionError(f"{err_msg}\nRetval nonzero in object {ret!r}")
+ return ret
- Delay for accepting socket connection is 10s.
- TODO: Decrease 10s to value that is long enough for creating connection
- and short enough to not affect performance.
+
+class PapiSocketExecutor:
+ """Methods for executing VPP Python API commands on forwarded socket.
The current implementation downloads and parses .api.json files only once
and caches client instances for reuse.
Cleanup metadata is added as additional attributes
- directly to client instances.
+ directly to the client instances.
+
+ The current implementation caches the connected client instances.
+ As a downside, clients need to be explicitly told to disconnect
+ before VPP restart.
The current implementation seems to run into read error occasionally.
Not sure if the error is in Python code on Robot side, ssh forwarding,
@@ -113,14 +122,16 @@ class PapiSocketExecutor:
seems to help, hoping repeated command execution does not lead to surprises.
The reconnection is logged at WARN level, so it is prominently shown
in log.html, so we can see how frequently it happens.
+ There are similar retries cleanups in other places
+ (so unresponsive VPPs do not break test much more than needed),
+ but it is hard to verify all that works correctly.
+ Especially, if Robot crashes, files and ssh processes may leak.
- TODO: Support handling of retval!=0 without try/except in caller.
-
- Note: Use only with "with" statement, e.g.:
+ TODO: Decrease current timeout value when creating connections
+ so broken VPP does not prolong job duration too much
+ while good VPP (almost) never fails to connect.
- cmd = 'show_version'
- with PapiSocketExecutor(node) as papi_exec:
- reply = papi_exec.add(cmd).get_reply(err_msg)
+ TODO: Support handling of retval!=0 without try/except in caller.
This class processes two classes of VPP PAPI methods:
1. Simple request / reply: method='request'.
@@ -130,27 +141,37 @@ class PapiSocketExecutor:
The recommended ways of use are (examples):
- 1. Simple request / reply
-
- a. One request with no arguments:
+ 1. Simple request / reply. Example with no arguments:
- cmd = 'show_version'
+ cmd = "show_version"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
- b. Three requests with arguments, the second and the third ones are the same
- but with different arguments.
+ 2. Dump functions:
+ cmd = "sw_interface_rx_placement_dump"
with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, sw_if_index=ifc["vpp_sw_index"])
+ details = papi_exec.get_details(err_msg)
+
+ 3. Multiple requests with one reply each.
+ In this example, there are three requests with arguments,
+ the second and the third ones are the same but with different arguments.
+ This example also showcases method chaining.
+
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
replies = papi_exec.add(cmd1, **args1).add(cmd2, **args2).\
add(cmd2, **args3).get_replies(err_msg)
- 2. Dump functions
+ The "is_async=True" part in the last example enables "async handling mode",
+ which imposes limitations but gains speed and saves memory.
+ This is different than async mode of VPP PAPI, as the default handling mode
+ also uses async PAPI connections.
- cmd = 'sw_interface_rx_placement_dump'
- with PapiSocketExecutor(node) as papi_exec:
- details = papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']).\
- get_details(err_msg)
+ The implementation contains more hidden details, such as
+ support for old VPP PAPI async mode behavior, API CRC checking
+ conditional usage of control ping, and possible susceptibility to VPP-2033.
+ See docstring of methods for more detailed info.
"""
# Class cache for reuse between instances.
@@ -174,16 +195,21 @@ class PapiSocketExecutor:
conn_cache = dict()
"""Mapping from node key to connected client instance."""
- def __init__(self, node, remote_vpp_socket=Constants.SOCKSVR_PATH):
+ def __init__(
+ self, node, remote_vpp_socket=Constants.SOCKSVR_PATH, is_async=False
+ ):
"""Store the given arguments, declare managed variables.
:param node: Node to connect to and forward unix domain socket from.
:param remote_vpp_socket: Path to remote socket to tunnel to.
+ :param is_async: Whether to use async handling.
:type node: dict
:type remote_vpp_socket: str
+ :type is_async: bool
"""
self._node = node
self._remote_vpp_socket = remote_vpp_socket
+ self._is_async = is_async
# The list of PAPI commands to be executed on the node.
self._api_command_list = list()
@@ -198,32 +224,40 @@ class PapiSocketExecutor:
cls = self.__class__
if cls.api_package_path:
return
- cls.api_root_dir = tempfile.TemporaryDirectory(dir=u"/tmp")
+ # Pylint suggests to use "with" statement, which we cannot,
+ # do as the dir should stay for multiple ensure_vpp_instance calls.
+ cls.api_root_dir = tempfile.TemporaryDirectory(dir="/tmp")
root_path = cls.api_root_dir.name
# Pack, copy and unpack Python part of VPP installation from _node.
# TODO: Use rsync or recursive version of ssh.scp_node instead?
node = self._node
- exec_cmd_no_error(node, [u"rm", u"-rf", u"/tmp/papi.txz"])
+ exec_cmd_no_error(node, ["rm", "-rf", "/tmp/papi.txz"])
# Papi python version depends on OS (and time).
- # Python 2.7 or 3.4, site-packages or dist-packages.
- installed_papi_glob = u"/usr/lib/python3*/*-packages/vpp_papi"
+ # Python 3.4 or higher, site-packages or dist-packages.
+ installed_papi_glob = "/usr/lib/python3*/*-packages/vpp_papi"
# We need to wrap this command in bash, in order to expand globs,
# and as ssh does join, the inner command has to be quoted.
- inner_cmd = u" ".join([
- u"tar", u"cJf", u"/tmp/papi.txz", u"--exclude=*.pyc",
- installed_papi_glob, u"/usr/share/vpp/api"
- ])
- exec_cmd_no_error(node, [u"bash", u"-c", u"'" + inner_cmd + u"'"])
- scp_node(node, root_path + u"/papi.txz", u"/tmp/papi.txz", get=True)
- run([u"tar", u"xf", root_path + u"/papi.txz", u"-C", root_path])
- cls.api_json_path = root_path + u"/usr/share/vpp/api"
+ inner_cmd = " ".join(
+ [
+ "tar",
+ "cJf",
+ "/tmp/papi.txz",
+ "--exclude=*.pyc",
+ installed_papi_glob,
+ "/usr/share/vpp/api",
+ ]
+ )
+ exec_cmd_no_error(node, ["bash", "-c", f"'{inner_cmd}'"])
+ scp_node(node, root_path + "/papi.txz", "/tmp/papi.txz", get=True)
+ run(["tar", "xf", root_path + "/papi.txz", "-C", root_path])
+ cls.api_json_path = root_path + "/usr/share/vpp/api"
# Perform initial checks before .api.json files are gone,
# by creating the checker instance.
cls.crc_checker = VppApiCrcChecker(cls.api_json_path)
# When present locally, we finally can find the installation path.
cls.api_package_path = glob.glob(root_path + installed_papi_glob)[0]
# Package path has to be one level above the vpp_papi directory.
- cls.api_package_path = cls.api_package_path.rsplit(u"/", 1)[0]
+ cls.api_package_path = cls.api_package_path.rsplit("/", 1)[0]
def ensure_vpp_instance(self):
"""Create or reuse a closed client instance, return it.
@@ -251,14 +285,39 @@ class PapiSocketExecutor:
# It is right, we should refactor the code and move initialization
# of package outside.
from vpp_papi.vpp_papi import VPPApiClient as vpp_class
- vpp_class.apidir = cls.api_json_path
- # We need to create instance before removing from sys.path.
- vpp_instance = vpp_class(
- use_socket=True, server_address=u"TBD", async_thread=False,
- read_timeout=14, logger=FilteredLogger(logger, u"INFO")
- )
- # Cannot use loglevel parameter, robot.api.logger lacks support.
- # TODO: Stop overriding read_timeout when VPP-1722 is fixed.
+ try:
+ # The old way. Deduplicate when pre-2402 support is not needed.
+
+ vpp_class.apidir = cls.api_json_path
+ # We need to create instance before removing from sys.path.
+ # Cannot use loglevel parameter, robot.api.logger lacks the support.
+ vpp_instance = vpp_class(
+ use_socket=True,
+ server_address="TBD",
+ async_thread=False,
+ # Large read timeout was originally there for VPP-1722,
+ # it may still be helping against AVF device creation failures.
+ read_timeout=14,
+ logger=FilteredLogger(logger, "INFO"),
+ )
+ except vpp_class.VPPRuntimeError:
+ # The 39871 way.
+
+ # We need to create instance before removing from sys.path.
+ # Cannot use loglevel parameter, robot.api.logger lacks the support.
+ vpp_instance = vpp_class(
+ apidir=cls.api_json_path,
+ use_socket=True,
+ server_address="TBD",
+ async_thread=False,
+ # Large read timeout was originally there for VPP-1722,
+ # it may still be helping against AVF device creation failures.
+ read_timeout=14,
+ logger=FilteredLogger(logger, "INFO"),
+ )
+ # The following is needed to prevent union (e.g. Ip4) debug logging
+ # of VPP part of PAPI from spamming robot logs.
+ logging.getLogger("vpp_papi.serializer").setLevel(logging.INFO)
finally:
if sys.path[-1] == cls.api_package_path:
sys.path.pop()
@@ -284,8 +343,8 @@ class PapiSocketExecutor:
:rtype: tuple of str
"""
return (
- node[u"host"],
- node[u"port"],
+ node["host"],
+ node["port"],
remote_socket,
# TODO: Do we support sockets paths such as "~/vpp/api.socket"?
# If yes, add also:
@@ -302,7 +361,8 @@ class PapiSocketExecutor:
:rtype: tuple of str
"""
return self.__class__.key_for_node_and_socket(
- self._node, self._remote_vpp_socket,
+ self._node,
+ self._remote_vpp_socket,
)
def set_connected_client(self, client):
@@ -329,10 +389,11 @@ class PapiSocketExecutor:
If check_connected, RuntimeError is raised when the client is
not in cache. None is returned if client is not in cache
(and the check is disabled).
+ Successful retrieval from cache is logged only when check_connected.
This hides details of what the node key is.
- :param check_connected: Whether cache miss raises.
+ :param check_connected: Whether cache miss raises (and success logs).
:type check_connected: bool
:returns: Connected client instance, or None if uncached and no check.
:rtype: Optional[vpp_papi.VPPApiClient]
@@ -340,11 +401,9 @@ class PapiSocketExecutor:
"""
key = self.key_for_self()
ret = self.__class__.conn_cache.get(key, None)
-
- if ret is None:
- if check_connected:
+ if check_connected:
+ if ret is None:
raise RuntimeError(f"Client not cached for key: {key}")
- else:
# When reading logs, it is good to see which VPP is accessed.
logger.debug(f"Activated cached PAPI client for key: {key}")
return ret
@@ -366,6 +425,8 @@ class PapiSocketExecutor:
- This socket controls the local ssh process doing the forwarding.
csit_local_vpp_socket
- This is the forwarded socket to talk with remote VPP.
+ csit_deque
+ - Queue for responses.
The attribute names do not start with underscore,
so pylint does not complain about accessing private attribute.
@@ -380,7 +441,7 @@ class PapiSocketExecutor:
if vpp_instance is not None:
return self
# No luck, create and connect a new instance.
- time_enter = time.time()
+ time_enter = time.monotonic()
node = self._node
# Parsing takes longer than connecting, prepare instance before tunnel.
vpp_instance = self.ensure_vpp_instance()
@@ -388,44 +449,55 @@ class PapiSocketExecutor:
# If connection fails, it is better to attempt disconnect anyway.
self.set_connected_client(vpp_instance)
# Set additional attributes.
- vpp_instance.csit_temp_dir = tempfile.TemporaryDirectory(dir=u"/tmp")
+ vpp_instance.csit_temp_dir = tempfile.TemporaryDirectory(dir="/tmp")
temp_path = vpp_instance.csit_temp_dir.name
- api_socket = temp_path + u"/vpp-api.sock"
+ api_socket = temp_path + "/vpp-api.sock"
vpp_instance.csit_local_vpp_socket = api_socket
- ssh_socket = temp_path + u"/ssh.sock"
+ ssh_socket = temp_path + "/ssh.sock"
vpp_instance.csit_control_socket = ssh_socket
# Cleanup possibilities.
- ret_code, _ = run([u"ls", ssh_socket], check=False)
+ ret_code, _ = run(["ls", ssh_socket], check=False)
if ret_code != 2:
# This branch never seems to be hit in CI,
# but may be useful when testing manually.
run(
- [u"ssh", u"-S", ssh_socket, u"-O", u"exit", u"0.0.0.0"],
- check=False, log=True
+ ["ssh", "-S", ssh_socket, "-O", "exit", "0.0.0.0"],
+ check=False,
+ log=True,
)
# TODO: Is any sleep necessary? How to prove if not?
- run([u"sleep", u"0.1"])
- run([u"rm", u"-vrf", ssh_socket])
+ run(["sleep", "0.1"])
+ run(["rm", "-vrf", ssh_socket])
# Even if ssh can perhaps reuse this file,
# we need to remove it for readiness detection to work correctly.
- run([u"rm", u"-rvf", api_socket])
+ run(["rm", "-rvf", api_socket])
# We use sleep command. The ssh command will exit in 30 second,
# unless a local socket connection is established,
# in which case the ssh command will exit only when
# the ssh connection is closed again (via control socket).
# The log level is to suppress "Warning: Permanently added" messages.
ssh_cmd = [
- u"ssh", u"-S", ssh_socket, u"-M", u"-L",
- api_socket + u":" + self._remote_vpp_socket,
- u"-p", str(node[u"port"]),
- u"-o", u"LogLevel=ERROR",
- u"-o", u"UserKnownHostsFile=/dev/null",
- u"-o", u"StrictHostKeyChecking=no",
- u"-o", u"ExitOnForwardFailure=yes",
- node[u"username"] + u"@" + node[u"host"],
- u"sleep", u"30"
+ "ssh",
+ "-S",
+ ssh_socket,
+ "-M",
+ "-L",
+ f"{api_socket}:{self._remote_vpp_socket}",
+ "-p",
+ str(node["port"]),
+ "-o",
+ "LogLevel=ERROR",
+ "-o",
+ "UserKnownHostsFile=/dev/null",
+ "-o",
+ "StrictHostKeyChecking=no",
+ "-o",
+ "ExitOnForwardFailure=yes",
+ f"{node['username']}@{node['host']}",
+ "sleep",
+ "30",
]
- priv_key = node.get(u"priv_key")
+ priv_key = node.get("priv_key")
if priv_key:
# This is tricky. We need a file to pass the value to ssh command.
# And we need ssh command, because paramiko does not support sockets
@@ -434,48 +506,52 @@ class PapiSocketExecutor:
key_file.write(priv_key)
# Make sure the content is written, but do not close yet.
key_file.flush()
- ssh_cmd[1:1] = [u"-i", key_file.name]
- password = node.get(u"password")
+ ssh_cmd[1:1] = ["-i", key_file.name]
+ password = node.get("password")
if password:
# Prepend sshpass command to set password.
- ssh_cmd[:0] = [u"sshpass", u"-p", password]
- time_stop = time.time() + 10.0
+ ssh_cmd[:0] = ["sshpass", "-p", password]
+ time_stop = time.monotonic() + 10.0
# subprocess.Popen seems to be the best way to run commands
# on background. Other ways (shell=True with "&" and ssh with -f)
# seem to be too dependent on shell behavior.
# In particular, -f does NOT return values for run().
subprocess.Popen(ssh_cmd)
# Check socket presence on local side.
- while time.time() < time_stop:
+ while time.monotonic() < time_stop:
# It can take a moment for ssh to create the socket file.
- ret_code, _ = run(
- [u"ls", u"-l", api_socket], check=False
- )
+ ret_code, _ = run(["ls", "-l", api_socket], check=False)
if not ret_code:
break
- time.sleep(0.1)
+ time.sleep(0.01)
else:
- raise RuntimeError(u"Local side socket has not appeared.")
+ raise RuntimeError("Local side socket has not appeared.")
if priv_key:
# Socket up means the key has been read. Delete file by closing it.
key_file.close()
# Everything is ready, set the local socket address and connect.
vpp_instance.transport.server_address = api_socket
# It seems we can get read error even if every preceding check passed.
- # Single retry seems to help.
+ # Single retry seems to help. TODO: Confirm this is still needed.
for _ in range(2):
try:
- vpp_instance.connect_sync(u"csit_socket")
+ vpp_instance.connect("csit_socket", do_async=True)
except (IOError, struct.error) as err:
logger.warn(f"Got initial connect error {err!r}")
vpp_instance.disconnect()
else:
break
else:
- raise RuntimeError(u"Failed to connect to VPP over a socket.")
- logger.trace(
- f"Establishing socket connection took {time.time()-time_enter}s"
- )
+ raise RuntimeError("Failed to connect to VPP over a socket.")
+ # Only after rls2302 all relevant VPP builds should have do_async.
+ if hasattr(vpp_instance.transport, "do_async"):
+ deq = deque()
+ vpp_instance.csit_deque = deq
+ vpp_instance.register_event_callback(lambda x, y: deq.append(y))
+ else:
+ vpp_instance.csit_deque = None
+ duration_conn = time.monotonic() - time_enter
+ logger.trace(f"Establishing socket connection took {duration_conn}s.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
@@ -500,10 +576,17 @@ class PapiSocketExecutor:
return
logger.debug(f"Disconnecting by key: {key}")
client_instance.disconnect()
- run([
- u"ssh", u"-S", client_instance.csit_control_socket, u"-O",
- u"exit", u"0.0.0.0"
- ], check=False)
+ run(
+ [
+ "ssh",
+ "-S",
+ client_instance.csit_control_socket,
+ "-O",
+ "exit",
+ "0.0.0.0",
+ ],
+ check=False,
+ )
# Temp dir has autoclean, but deleting explicitly
# as an error can happen.
try:
@@ -519,8 +602,8 @@ class PapiSocketExecutor:
@classmethod
def disconnect_by_node_and_socket(
- cls, node, remote_socket=Constants.SOCKSVR_PATH
- ):
+ cls, node, remote_socket=Constants.SOCKSVR_PATH
+ ):
"""Disconnect a connected client instance, noop it not connected.
Also remove the local sockets by deleting the temporary directory.
@@ -578,10 +661,8 @@ class PapiSocketExecutor:
"""Add next command to internal command list; return self.
Unless disabled, new entry to papi history is also added at this point.
- The argument name 'csit_papi_command' must be unique enough as it cannot
- be repeated in kwargs.
- The kwargs dict is deep-copied, so it is safe to use the original
- with partial modifications for subsequent commands.
+ The kwargs dict is serialized or deep-copied, so it is safe to use
+ the original with partial modifications for subsequent calls.
Any pending conflicts from .api.json processing are raised.
Then the command name is checked for known CRCs.
@@ -591,6 +672,16 @@ class PapiSocketExecutor:
Each CRC issue is raised only once, so subsequent tests
can raise other issues.
+ With async handling mode, this method also serializes and sends
+ the command, skips CRC check to gain speed, and saves memory
+ by putting a sentinel (instead of deepcopy) to api command list.
+
+ For scale tests, the call sites are responsible to set history values
+ in a way that hints what is done without overwhelming the papi history.
+
+ Note to contributors: Do not rename "csit_papi_command"
+ to anything VPP could possibly use as an API field name.
+
:param csit_papi_command: VPP API command.
:param history: Enable/disable adding command to PAPI command history.
:param kwargs: Optional key-value arguments.
@@ -603,23 +694,39 @@ class PapiSocketExecutor:
"""
self.crc_checker.report_initial_conflicts()
if history:
+ # No need for deepcopy yet, serialization isolates from edits.
PapiHistory.add_to_papi_history(
self._node, csit_papi_command, **kwargs
)
self.crc_checker.check_api_name(csit_papi_command)
- self._api_command_list.append(
- dict(
- api_name=csit_papi_command,
- api_args=copy.deepcopy(kwargs)
+ if self._is_async:
+ # Save memory but still count the number of expected replies.
+ self._api_command_list.append(0)
+ api_object = self.get_connected_client(check_connected=False).api
+ func = getattr(api_object, csit_papi_command)
+ # No need for deepcopy yet, serialization isolates from edits.
+ func(**kwargs)
+ else:
+ # No serialization, so deepcopy is needed here.
+ self._api_command_list.append(
+ dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))
)
- )
return self
def get_replies(self, err_msg="Failed to get replies."):
- """Get replies from VPP Python API.
+ """Get reply for each command from VPP Python API.
+
+ This method expects one reply per command,
+ and gains performance by reading replies only after
+ sending all commands.
The replies are parsed into dict-like objects,
- "retval" field is guaranteed to be zero on success.
+ "retval" field (if present) is guaranteed to be zero on success.
+
+ Do not use this for messages with variable number of replies,
+ use get_details instead.
+ Do not use for commands trigering VPP-2033,
+ use series of get_reply instead.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -627,15 +734,18 @@ class PapiSocketExecutor:
:rtype: list of dict
:raises RuntimeError: If retval is nonzero, parsing or ssh error.
"""
- return self._execute(err_msg=err_msg)
+ if not self._is_async:
+ raise RuntimeError("Sync handling does not suport get_replies.")
+ return self._execute(err_msg=err_msg, do_async=True)
- def get_reply(self, err_msg=u"Failed to get reply."):
- """Get reply from VPP Python API.
+ def get_reply(self, err_msg="Failed to get reply."):
+ """Get reply to single command from VPP Python API.
- The reply is parsed into dict-like object,
- "retval" field is guaranteed to be zero on success.
+ This method waits for a single reply (no control ping),
+ thus avoiding bugs like VPP-2033.
- TODO: Discuss exception types to raise, unify with inner methods.
+ The reply is parsed into a dict-like object,
+ "retval" field (if present) is guaranteed to be zero on success.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -643,18 +753,19 @@ class PapiSocketExecutor:
:rtype: dict
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
- replies = self.get_replies(err_msg=err_msg)
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_reply.")
+ replies = self._execute(err_msg=err_msg, do_async=False)
if len(replies) != 1:
raise RuntimeError(f"Expected single reply, got {replies!r}")
return replies[0]
- def get_sw_if_index(self, err_msg=u"Failed to get reply."):
+ def get_sw_if_index(self, err_msg="Failed to get reply."):
"""Get sw_if_index from reply from VPP Python API.
Frequently, the caller is only interested in sw_if_index field
- of the reply, this wrapper makes such call sites shorter.
-
- TODO: Discuss exception types to raise, unify with inner methods.
+ of the reply, this wrapper around get_reply (thus safe against VPP-2033)
+ makes such call sites shorter.
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
@@ -662,12 +773,13 @@ class PapiSocketExecutor:
:rtype: int
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_sw_if_index")
reply = self.get_reply(err_msg=err_msg)
- logger.trace(f"Getting index from {reply!r}")
- return reply[u"sw_if_index"]
+ return reply["sw_if_index"]
def get_details(self, err_msg="Failed to get dump details."):
- """Get dump details from VPP Python API.
+ """Get details (for possibly multiple dumps) from VPP Python API.
The details are parsed into dict-like objects.
The number of details per single dump command can vary,
@@ -676,19 +788,27 @@ class PapiSocketExecutor:
logging everything at once for debugging purposes),
it is recommended to call get_details for each dump (type) separately.
+ This method uses control ping to detect end of replies,
+ so it is not suitable for commands which trigger VPP-2033
+ (but arguably no dump currently triggers it).
+
:param err_msg: The message used if the PAPI command(s) execution fails.
:type err_msg: str
:returns: Details, dict objects with fields due to API without "retval".
:rtype: list of dict
"""
- return self._execute(err_msg)
+ if self._is_async:
+ raise RuntimeError("Async handling does not suport get_details.")
+ return self._execute(err_msg, do_async=False, single_reply=False)
@staticmethod
def run_cli_cmd(
- node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH):
+ node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH
+ ):
"""Run a CLI command as cli_inband, return the "reply" field of reply.
Optionally, log the field value.
+ This is a convenience wrapper around get_reply.
:param node: Node to run command on.
:param cli_cmd: The CLI command to be run on the node.
@@ -701,18 +821,18 @@ class PapiSocketExecutor:
:returns: CLI output.
:rtype: str
"""
- cmd = u"cli_inband"
- args = dict(
- cmd=cli_cmd
+ cmd = "cli_inband"
+ args = dict(cmd=cli_cmd)
+ err_msg = (
+ f"Failed to run 'cli_inband {cli_cmd}' PAPI command"
+ f" on host {node['host']}"
)
- err_msg = f"Failed to run 'cli_inband {cli_cmd}' PAPI command " \
- f"on host {node[u'host']}"
with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)["reply"]
if log:
logger.info(
- f"{cli_cmd} ({node[u'host']} - {remote_vpp_socket}):\n"
+ f"{cli_cmd} ({node['host']} - {remote_vpp_socket}):\n"
f"{reply.strip()}"
)
return reply
@@ -721,6 +841,8 @@ class PapiSocketExecutor:
def run_cli_cmd_on_all_sockets(node, cli_cmd, log=True):
"""Run a CLI command as cli_inband, on all sockets in topology file.
+ Just a run_cli_cmd, looping over sockets.
+
:param node: Node to run command on.
:param cli_cmd: The CLI command to be run on the node.
:param log: If True, the response is logged.
@@ -739,6 +861,8 @@ class PapiSocketExecutor:
def dump_and_log(node, cmds):
"""Dump and log requested information, return None.
+ Just a get_details (with logging), looping over commands.
+
:param node: DUT node.
:param cmds: Dump commands to be executed.
:type node: dict
@@ -749,65 +873,231 @@ class PapiSocketExecutor:
dump = papi_exec.add(cmd).get_details()
logger.debug(f"{cmd}:\n{pformat(dump)}")
- def _execute(self, err_msg=u"Undefined error message", exp_rv=0):
+ @staticmethod
+ def _read_internal(vpp_instance, timeout=None):
+ """Blockingly read within timeout.
+
+ This covers behaviors both before and after 37758.
+ One read attempt is guaranteed even with zero timeout.
+
+ TODO: Simplify after 2302 RCA is done.
+
+ :param vpp_instance: Client instance to read from.
+ :param timeout: How long to wait for reply (or transport default).
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type timeout: Optional[float]
+ :returns: Message read or None if nothing got read.
+ :rtype: Optional[namedtuple]
+ """
+ timeout = vpp_instance.read_timeout if timeout is None else timeout
+ if vpp_instance.csit_deque is None:
+ return vpp_instance.read_blocking(timeout=timeout)
+ time_stop = time.monotonic() + timeout
+ while 1:
+ try:
+ return vpp_instance.csit_deque.popleft()
+ except IndexError:
+ # We could busy-wait but that seems to starve the reader thread.
+ time.sleep(0.01)
+ if time.monotonic() > time_stop:
+ return None
+
+ @staticmethod
+ def _read(vpp_instance, tries=3):
+ """Blockingly read within timeout, retry on early None.
+
+ For (sometimes) unknown reasons, VPP client in async mode likes
+ to return None occasionally before time runs out.
+ This function retries in that case.
+
+ Most of the time, early None means VPP crashed (see VPP-2033),
+ but is is better to give VPP more chances to respond without failure.
+
+ TODO: Perhaps CSIT now never triggers VPP-2033,
+ so investigate and remove this layer if even more speed is needed.
+
+ :param vpp_instance: Client instance to read from.
+ :param tries: Maximum number of tries to attempt.
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type tries: int
+ :returns: Message read or None if nothing got read even with retries.
+ :rtype: Optional[namedtuple]
+ """
+ timeout = vpp_instance.read_timeout
+ for _ in range(tries):
+ time_stop = time.monotonic() + 0.9 * timeout
+ reply = PapiSocketExecutor._read_internal(vpp_instance)
+ if reply is None and time.monotonic() < time_stop:
+ logger.trace("Early None. Retry?")
+ continue
+ return reply
+ logger.trace(f"Got {tries} early Nones, probably a real None.")
+ return None
+
+ @staticmethod
+ def _drain(vpp_instance, err_msg, timeout=30.0):
+ """Keep reading with until None or timeout.
+
+ This is needed to mitigate the risk of a state with unread responses
+ (e.g. after non-zero retval in the middle of get_replies)
+ causing failures in everything subsequent (until disconnect).
+
+ The reads are done without any waiting.
+
+ It is possible some responses have not arrived yet,
+ but that is unlikely as Python is usually slower than VPP.
+
+ :param vpp_instance: Client instance to read from.
+ :param err_msg: Error message to use when overstepping timeout.
+ :param timeout: How long to try before giving up.
+ :type vpp_instance: vpp_papi.VPPApiClient
+ :type err_msg: str
+ :type timeout: float
+ :raises RuntimeError: If read keeps returning nonzero after timeout.
+ """
+ time_stop = time.monotonic() + timeout
+ while time.monotonic() < time_stop:
+ if PapiSocketExecutor._read_internal(vpp_instance, 0.0) is None:
+ return
+ raise RuntimeError(f"{err_msg}\nTimed out while draining.")
+
+ def _execute(self, err_msg, do_async, single_reply=True):
"""Turn internal command list into data and execute; return replies.
This method also clears the internal command list.
- IMPORTANT!
- Do not use this method in L1 keywords. Use:
- - get_replies()
- - get_reply()
- - get_sw_if_index()
- - get_details()
-
:param err_msg: The message used if the PAPI command(s) execution fails.
+ :param do_async: If true, assume one reply per command and do not wait
+ for each reply before sending next request.
+ Dump commands (and calls causing VPP-2033) need False.
+ :param single_reply: For sync emulation mode (cannot be False
+ if do_async is True). When false use control ping.
+ When true, wait for a single reply.
:type err_msg: str
- :returns: Papi responses parsed into a dict-like object,
+ :type do_async: bool
+ :type single_reply: bool
+ :returns: Papi replies parsed into a dict-like object,
with fields due to API (possibly including retval).
- :rtype: list of dict
+ :rtype: NoneType or list of dict
:raises RuntimeError: If the replies are not all correct.
"""
- vpp_instance = self.get_connected_client()
local_list = self._api_command_list
# Clear first as execution may fail.
self._api_command_list = list()
- replies = list()
+ if do_async:
+ if not single_reply:
+ raise RuntimeError("Async papi needs one reply per request.")
+ return self._execute_async(local_list, err_msg=err_msg)
+ return self._execute_sync(
+ local_list, err_msg=err_msg, single_reply=single_reply
+ )
+
+ def _execute_sync(self, local_list, err_msg, single_reply):
+ """Execute commands waiting for replies one by one; return replies.
+
+ This implementation either expects a single response per request,
+ or uses control ping to emulate sync PAPI calls.
+ Reliable, but slow. Required for dumps. Needed for calls
+ which trigger VPP-2033.
+
+ CRC checking is done for the replies (requests are checked in .add).
+
+ :param local_list: The list of PAPI commands to be executed on the node.
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :param single_reply: When false use control ping.
+ When true, wait for a single reply.
+ :type local_list: list of dict
+ :type err_msg: str
+ :type single_reply: bool
+ :returns: Papi replies parsed into a dict-like object,
+ with fields due to API (possibly including retval).
+ :rtype: List[UserDict]
+ :raises AttributeError: If VPP does not know the command.
+ :raises RuntimeError: If the replies are not all correct.
+ """
+ vpp_instance = self.get_connected_client()
+ control_ping_fn = getattr(vpp_instance.api, "control_ping")
+ ret_list = list()
for command in local_list:
- api_name = command[u"api_name"]
+ api_name = command["api_name"]
papi_fn = getattr(vpp_instance.api, api_name)
+ replies = list()
try:
- try:
- reply = papi_fn(**command[u"api_args"])
- except (IOError, struct.error) as err:
- # Occasionally an error happens, try reconnect.
- logger.warn(f"Reconnect after error: {err!r}")
- vpp_instance.disconnect()
- # Testing shows immediate reconnect fails.
- time.sleep(1)
- vpp_instance.connect_sync(u"csit_socket")
- logger.trace(u"Reconnected.")
- reply = papi_fn(**command[u"api_args"])
+ # Send the command maybe followed by control ping.
+ main_context = papi_fn(**command["api_args"])
+ if single_reply:
+ replies.append(PapiSocketExecutor._read(vpp_instance))
+ else:
+ ping_context = control_ping_fn()
+ # Receive the replies.
+ while 1:
+ reply = PapiSocketExecutor._read(vpp_instance)
+ if reply is None:
+ raise RuntimeError(
+ f"{err_msg}\nSync PAPI timed out."
+ )
+ if reply.context == ping_context:
+ break
+ if reply.context != main_context:
+ raise RuntimeError(
+ f"{err_msg}\nUnexpected context: {reply!r}"
+ )
+ replies.append(reply)
except (AttributeError, IOError, struct.error) as err:
- raise AssertionError(err_msg) from err
- # *_dump commands return list of objects, convert, ordinary reply.
- if not isinstance(reply, list):
- reply = [reply]
- for item in reply:
- message_name = item.__class__.__name__
- self.crc_checker.check_api_name(message_name)
- dict_item = dictize(item)
- if u"retval" in dict_item.keys():
- # *_details messages do not contain retval.
- retval = dict_item[u"retval"]
- if retval != exp_rv:
- raise AssertionError(
- f"Retval {retval!r} does not match expected "
- f"retval {exp_rv!r} in message {message_name} "
- f"for command {command}."
- )
- replies.append(dict_item)
- return replies
+ # TODO: Add retry if it is still needed.
+ raise AssertionError(f"{err_msg}") from err
+ finally:
+ # Discard any unprocessed replies to avoid secondary failures.
+ PapiSocketExecutor._drain(vpp_instance, err_msg)
+ # Process replies for this command.
+ for reply in replies:
+ self.crc_checker.check_api_name(reply.__class__.__name__)
+ dictized_reply = dictize_and_check_retval(reply, err_msg)
+ ret_list.append(dictized_reply)
+ return ret_list
+
+ def _execute_async(self, local_list, err_msg):
+ """Read, process and return replies.
+
+ The messages were already sent by .add() in this mode,
+ local_list is used just so we know how many replies to read.
+
+ Beware: It is not clear what to do when socket read fails
+ in the middle of async processing.
+
+ The implementation assumes each command results in exactly one reply,
+ there is no reordering in either commands nor replies,
+ and context numbers increase one by one (and are matching for replies).
+
+ To speed processing up, reply CRC values are not checked.
+
+ The current implementation does not limit the number of messages
+ in-flight, we rely on VPP PAPI background thread to move replies
+ from socket to queue fast enough.
+
+ :param local_list: The list of PAPI commands to get replies for.
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :type local_list: list
+ :type err_msg: str
+ :returns: Papi replies parsed into a dict-like object, with fields
+ according to API (possibly including retval).
+ :rtype: List[UserDict]
+ :raises RuntimeError: If the replies are not all correct.
+ """
+ vpp_instance = self.get_connected_client()
+ ret_list = list()
+ try:
+ for index, _ in enumerate(local_list):
+ # Blocks up to timeout.
+ reply = PapiSocketExecutor._read(vpp_instance)
+ if reply is None:
+ time_msg = f"PAPI async timeout: idx {index}"
+ raise RuntimeError(f"{err_msg}\n{time_msg}")
+ ret_list.append(dictize_and_check_retval(reply, err_msg))
+ finally:
+ # Discard any unprocessed replies to avoid secondary failures.
+ PapiSocketExecutor._drain(vpp_instance, err_msg)
+ return ret_list
class Disconnector:
@@ -832,8 +1122,7 @@ class Disconnector:
"""
cls = PapiSocketExecutor
# Iterate over copy of entries so deletions do not mess with iterator.
- keys_copy = list(cls.conn_cache.keys())
- for key in keys_copy:
+ for key in list(cls.conn_cache.keys()):
cls.disconnect_by_key(key)
@@ -894,17 +1183,15 @@ class PapiExecutor:
def __enter__(self):
try:
self._ssh.connect(self._node)
- except IOError:
- raise RuntimeError(
- f"Cannot open SSH connection to host {self._node[u'host']} "
- f"to execute PAPI command(s)"
- )
+ except IOError as err:
+ msg = f"PAPI: Cannot open SSH connection to {self._node['host']}"
+ raise RuntimeError(msg) from err
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._ssh.disconnect(self._node)
- def add(self, csit_papi_command=u"vpp-stats", history=True, **kwargs):
+ def add(self, csit_papi_command="vpp-stats", history=True, **kwargs):
"""Add next command to internal command list; return self.
The argument name 'csit_papi_command' must be unique enough as it cannot
@@ -926,15 +1213,16 @@ class PapiExecutor:
self._node, csit_papi_command, **kwargs
)
self._api_command_list.append(
- dict(
- api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)
- )
+ dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs))
)
return self
def get_stats(
- self, err_msg=u"Failed to get statistics.", timeout=120,
- socket=Constants.SOCKSTAT_PATH):
+ self,
+ err_msg="Failed to get statistics.",
+ timeout=120,
+ socket=Constants.SOCKSTAT_PATH,
+ ):
"""Get VPP Stats from VPP Python API.
:param err_msg: The message used if the PAPI command(s) execution fails.
@@ -946,12 +1234,15 @@ class PapiExecutor:
:returns: Requested VPP statistics.
:rtype: list of dict
"""
- paths = [cmd[u"api_args"][u"path"] for cmd in self._api_command_list]
+ paths = [cmd["api_args"]["path"] for cmd in self._api_command_list]
self._api_command_list = list()
stdout = self._execute_papi(
- paths, method=u"stats", err_msg=err_msg, timeout=timeout,
- socket=socket
+ paths,
+ method="stats",
+ err_msg=err_msg,
+ timeout=timeout,
+ socket=socket,
)
return json.loads(stdout)
@@ -991,19 +1282,16 @@ class PapiExecutor:
api_data_processed = list()
for api in api_d:
api_args_processed = dict()
- for a_k, a_v in api[u"api_args"].items():
+ for a_k, a_v in api["api_args"].items():
api_args_processed[str(a_k)] = process_value(a_v)
api_data_processed.append(
- dict(
- api_name=api[u"api_name"],
- api_args=api_args_processed
- )
+ dict(api_name=api["api_name"], api_args=api_args_processed)
)
return api_data_processed
def _execute_papi(
- self, api_data, method=u"request", err_msg=u"", timeout=120,
- socket=None):
+ self, api_data, method="request", err_msg="", timeout=120, socket=None
+ ):
"""Execute PAPI command(s) on remote node and store the result.
:param api_data: List of APIs with their arguments.
@@ -1022,15 +1310,19 @@ class PapiExecutor:
:raises AssertionError: If PAPI command(s) execution has failed.
"""
if not api_data:
- raise RuntimeError(u"No API data provided.")
+ raise RuntimeError("No API data provided.")
- json_data = json.dumps(api_data) \
- if method in (u"stats", u"stats_request") \
+ json_data = (
+ json.dumps(api_data)
+ if method in ("stats", "stats_request")
else json.dumps(self._process_api_data(api_data))
+ )
- sock = f" --socket {socket}" if socket else u""
- cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}" \
+ sock = f" --socket {socket}" if socket else ""
+ cmd = (
+ f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}"
f" --method {method} --data '{json_data}'{sock}"
+ )
try:
ret_code, stdout, _ = self._ssh.exec_command_sudo(
cmd=cmd, timeout=timeout, log_stdout_err=False
@@ -1038,14 +1330,14 @@ class PapiExecutor:
# TODO: Fail on non-empty stderr?
except SSHTimeout:
logger.error(
- f"PAPI command(s) execution timeout on host "
- f"{self._node[u'host']}:\n{api_data}"
+ f"PAPI command(s) execution timeout on host"
+ f" {self._node['host']}:\n{api_data}"
)
raise
except Exception as exc:
raise RuntimeError(
- f"PAPI command(s) execution on host {self._node[u'host']} "
- f"failed: {api_data}"
+ f"PAPI command(s) execution on host {self._node['host']}"
+ f" failed: {api_data}"
) from exc
if ret_code != 0:
raise AssertionError(err_msg)
diff --git a/resources/libraries/python/PapiHistory.py b/resources/libraries/python/PapiHistory.py
index 32429c4f64..18b2774908 100644
--- a/resources/libraries/python/PapiHistory.py
+++ b/resources/libraries/python/PapiHistory.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -48,7 +48,7 @@ class PapiHistory:
PapiHistory.reset_papi_history(node)
@staticmethod
- def add_to_papi_history(node, csit_papi_command, papi=True, **kwargs):
+ def add_to_papi_history(node, csit_papi_command, **kwargs):
"""Add command to PAPI command history on DUT node.
Repr strings are used for argument values.
@@ -70,29 +70,17 @@ class PapiHistory:
VPP Stats:
vpp-stats(path=['^/if', '/err/ip4-input', '/sys/node/ip4-input'])
- VAT:
- sw_interface_set_flags sw_if_index 3 admin-up link-up
-
:param node: DUT node to add command to PAPI command history for.
:param csit_papi_command: Command to be added to PAPI command history.
- :param papi: Says if the command to store is PAPi or VAT. Remove when
- VAT executor is completely removed.
:param kwargs: Optional key-value arguments.
:type node: dict
:type csit_papi_command: str
- :type papi: bool
:type kwargs: dict
"""
- if papi:
- args = list()
- for key, val in kwargs.items():
- args.append(f"{key}={val!r}")
- item = f"{csit_papi_command}({u','.join(args)})"
- else:
- # This else part is here to store VAT commands.
- # VAT history is not used.
- # TODO: Remove when VatExecutor is completely removed.
- item = f"{csit_papi_command}"
+ args = list()
+ for key, val in kwargs.items():
+ args.append(f"{key}={val!r}")
+ item = f"{csit_papi_command}({u','.join(args)})"
DICT__DUTS_PAPI_HISTORY[node[u"host"]].append(item)
@staticmethod
diff --git a/resources/libraries/python/Policer.py b/resources/libraries/python/Policer.py
index 6d3bf86462..28ed0b0aa9 100644
--- a/resources/libraries/python/Policer.py
+++ b/resources/libraries/python/Policer.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -72,7 +72,7 @@ class Policer:
def policer_set_configuration(
node, policer_name, cir, eir, cbs, ebs, rate_type, round_type,
policer_type, conform_action_type, exceed_action_type,
- violate_action_type, color_aware, is_add=True, conform_dscp=None,
+ violate_action_type, color_aware, conform_dscp=None,
exceed_dscp=None, violate_dscp=None):
"""Configure policer on VPP node.
@@ -89,7 +89,6 @@ class Policer:
:param exceed_action_type: Exceed action type.
:param violate_action_type: Violate action type.
:param color_aware: Color-blind (cb) or color-aware (ca).
- :param is_add: Add policer if True, else delete.
:param conform_dscp: DSCP for conform mark_and_transmit action.
:param exceed_dscp: DSCP for exceed mark_and_transmit action.
:param violate_dscp: DSCP for vilate mark_and_transmit action.
@@ -106,7 +105,6 @@ class Policer:
:type exceed_action_type: str
:type violate_action_type: str
:type color_aware: str
- :type is_add: bool
:type conform_dscp: str
:type exceed_dscp: str
:type violate_dscp: str
@@ -130,10 +128,8 @@ class Policer:
else 0
)
- cmd = u"policer_add_del"
- args = dict(
- is_add=is_add,
- name=str(policer_name),
+ cmd = u"policer_add"
+ infos = dict(
cir=int(cir),
eir=int(eir),
cb=int(cbs),
@@ -148,6 +144,10 @@ class Policer:
violate_action=violate_action,
color_aware=bool(color_aware == u"'ca'")
)
+ args = dict(
+ name=str(policer_name),
+ infos=infos,
+ )
err_msg = f"Failed to configure policer {policer_name} " \
f"on host {node['host']}"
diff --git a/resources/libraries/python/QATUtil.py b/resources/libraries/python/QATUtil.py
new file mode 100644
index 0000000000..e16221fb30
--- /dev/null
+++ b/resources/libraries/python/QATUtil.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""QAT util library."""
+
+from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VPPUtil import VPPUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
+
+
+class QATUtil:
+ """Contains methods for setting up QATs."""
+
+ @staticmethod
+ def crypto_device_verify_on_all_duts(nodes):
+ """Verify if Crypto QAT device and its virtual functions are initialized
+ on all DUTs.
+
+ :param nodes: Nodes in the topology.
+ :type nodes: dict
+ """
+ VPPUtil.stop_vpp_service_on_all_duts(nodes)
+
+ for node in nodes.values():
+ if node["type"] == NodeType.DUT:
+ cryptodevs = Topology.get_cryptodev(node)
+ if not cryptodevs:
+ return
+ for device in cryptodevs.values():
+ QATUtil.crypto_device_init(node, device)
+
+ @staticmethod
+ def crypto_device_init(node, device):
+ """Init Crypto QAT device virtual functions on DUT.
+
+ :param node: DUT node.
+ :device: Crypto device entry from topology file.
+ :type node: dict
+ :type device: dict
+ """
+ DUTSetup.verify_kernel_module(node, device["module"], force_load=True)
+
+ current_driver = DUTSetup.get_pci_dev_driver(
+ node, device["pci_address"].replace(":", r"\:")
+ )
+ if current_driver is not None:
+ DUTSetup.pci_driver_unbind(node, device["pci_address"])
+ # Bind to kernel driver.
+ DUTSetup.pci_driver_bind(node, device["pci_address"], device["driver"])
+
+ cmd = f"adf_ctl status | grep {device['pci_address']} | "
+ cmd += "awk '{print $1}'"
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, sudo=True, message="Failed to check crypto device!"
+ )
+ if stdout.strip():
+ qat_dev = stdout.split("_")[-1]
+ conf_file = f"/etc/{device['driver']}_{qat_dev.strip()}.conf"
+ exec_cmd_no_error(
+ node, f"adf_ctl --config {conf_file} {stdout.strip()} restart",
+ sudo=True, message="Failed to restart crypto device!"
+ )
+ else:
+ raise ValueError("Crypto device error")
+
+ # Initialize QAT VFs.
+ if int(device["numvfs"]) > 0:
+ path = f"drivers/{device['driver']}"
+ DUTSetup.set_sriov_numvfs(
+ node, device["pci_address"], path=path,
+ numvfs=device["numvfs"]
+ )
+
+ if device["driver"] not in ["c4xxx"]:
+ for cvf in range(int(device["numvfs"])):
+ DUTSetup.pci_vf_driver_unbind(
+ node, device["pci_address"], cvf
+ )
+ DUTSetup.pci_vf_driver_bind(
+ node, device["pci_address"], cvf, "vfio-pci"
+ )
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index 64fb5a0e87..2df89ee87c 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022-2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -258,7 +258,7 @@ class QemuUtils:
logger.debug(u"Jumbo frames temporarily disabled!")
self._params.add_with_value(
u"chardev", f"socket,id=char{self._nic_id},"
- f"path={socket}{u',server' if server is True else u''}"
+ f"path={socket}{u',server=on' if server is True else u''}"
)
self._params.add_with_value(
u"netdev", f"vhost-user,id=vhost{self._nic_id},"
@@ -605,7 +605,7 @@ class QemuUtils:
except AttributeError:
self._wait_default()
- def _wait_default(self, retries=60):
+ def _wait_default(self, retries=120):
"""Wait until QEMU with VPP is booted.
:param retries: Number of retries.
diff --git a/resources/libraries/python/SRv6.py b/resources/libraries/python/SRv6.py
index eca22a2b17..0170df5ef6 100644
--- a/resources/libraries/python/SRv6.py
+++ b/resources/libraries/python/SRv6.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -222,13 +222,14 @@ class SRv6:
:type sid_list: list
:type mode: str
"""
- cmd = u"sr_policy_add"
+ cmd = u"sr_policy_add_v2"
args = dict(
bsid_addr=IPv6Address(bsid).packed,
weight=1,
is_encap=bool(mode == u"encap"),
- is_spray=False,
- sids=SRv6.create_srv6_sid_list(sid_list)
+ type=0, # Neither SPRAY nor TEF are needed yet.
+ sids=SRv6.create_srv6_sid_list(sid_list),
+ # encap_src is optional, do not set yet.
)
err_msg = f"Failed to add SR policy for BindingSID {bsid} " \
f"on host {node[u'host']}"
@@ -243,7 +244,7 @@ class SRv6:
:param node: Given node to show SRv6 policies on.
:type node: dict
"""
- cmd = u"sr_policies_dump"
+ cmd = u"sr_policies_v2_dump"
PapiSocketExecutor.dump_and_log(node, (cmd,))
@staticmethod
diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py
index 6d1332c1b7..95ca8a7d51 100644
--- a/resources/libraries/python/SetupFramework.py
+++ b/resources/libraries/python/SetupFramework.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,8 +14,6 @@
"""This module exists to provide setup utilities for the framework on topology
nodes. All tasks required to be run before the actual tests are started is
supposed to end up here.
-
-TODO: Figure out how to export JSON from SSH outside main Robot thread.
"""
from os import environ, remove
@@ -108,7 +106,7 @@ def extract_tarball_at_node(tarball, node):
node, cmd,
message=f"Failed to extract {tarball} at node {node[u'type']} "
f"host {node[u'host']}, port {node[u'port']}",
- timeout=240, include_reason=True, export=False
+ timeout=600, include_reason=True
)
logger.console(
f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} "
@@ -137,7 +135,7 @@ def create_env_directory_at_node(node):
f"&& source env/bin/activate && ANSIBLE_SKIP_CONFLICT_CHECK=1 " \
f"pip3 install -r requirements.txt"
stdout, stderr = exec_cmd_no_error(
- node, cmd, timeout=300, include_reason=True, export=False,
+ node, cmd, timeout=300, include_reason=True,
message=f"Failed install at node {node[u'type']} host {node[u'host']}, "
f"port {node[u'port']}"
)
@@ -217,7 +215,7 @@ def delete_framework_dir(node):
node, f"sudo rm -rf {con.REMOTE_FW_DIR}",
message=f"Framework delete failed at node {node[u'type']} "
f"host {node[u'host']}, port {node[u'port']}",
- timeout=100, include_reason=True, export=False
+ timeout=100, include_reason=True,
)
logger.console(
f"Deleting framework directory on {node[u'type']} host {node[u'host']},"
@@ -261,9 +259,9 @@ def cleanup_node(node, results=None, logs=None):
class SetupFramework:
"""Setup suite run on topology nodes.
- Many VAT/CLI based tests need the scripts at remote hosts before executing
- them. This class packs the whole testing directory and copies it over
- to all nodes in topology under /tmp/
+ Some tests need the scripts at remote hosts before executing them.
+ This class packs the whole testing directory and copies it over
+ to all nodes in topology under /tmp/.
"""
@staticmethod
diff --git a/resources/libraries/python/TRexConfigGenerator.py b/resources/libraries/python/TRexConfigGenerator.py
new file mode 100644
index 0000000000..c50b42610c
--- /dev/null
+++ b/resources/libraries/python/TRexConfigGenerator.py
@@ -0,0 +1,301 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TRex Configuration File Generator library."""
+
+import re
+import yaml
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.ssh import exec_cmd_no_error
+from resources.libraries.python.topology import NodeType, NodeSubTypeTG
+from resources.libraries.python.topology import Topology
+
+
+__all__ = ["TrexConfigGenerator", "TrexConfig"]
+
+def pci_dev_check(pci_dev):
+ """Check if provided PCI address is in correct format.
+
+ :param pci_dev: PCI address (expected format: xxxx:xx:xx.x).
+ :type pci_dev: str
+ :returns: True if PCI address is in correct format.
+ :rtype: bool
+ :raises ValueError: If PCI address is in incorrect format.
+ """
+ pattern = re.compile(
+ r"^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:[0-9A-Fa-f]{2}\.[0-9A-Fa-f]$"
+ )
+ if not re.match(pattern, pci_dev):
+ raise ValueError(
+ f"PCI address {pci_dev} is not in valid format xxxx:xx:xx.x"
+ )
+ return True
+
+
+class TrexConfigGenerator:
+ """TRex Startup Configuration File Generator."""
+
+ def __init__(self):
+ """Initialize library.
+ """
+ self._node = ""
+ self._node_key = ""
+ self._node_config = dict()
+ self._node_serialized_config = ""
+ self._startup_configuration_path = "/etc/trex_cfg.yaml"
+
+ def set_node(self, node, node_key=None):
+ """Set topology node.
+
+ :param node: Node to store configuration on.
+ :param node_key: Topology node key.
+ :type node: dict
+ :type node_key: str
+ :raises RuntimeError: If Node type is not TG and subtype is not TREX.
+ """
+ if node.get("type") is None:
+ msg = "Node type is not defined!"
+ elif node["type"] != NodeType.TG:
+ msg = f"Node type is {node['type']!r}, not a TG!"
+ elif node.get("subtype") is None:
+ msg = "TG subtype is not defined"
+ elif node["subtype"] != NodeSubTypeTG.TREX:
+ msg = f"TG subtype {node['subtype']!r} is not supported"
+ else:
+ self._node = node
+ self._node_key = node_key
+ return
+ raise RuntimeError(msg)
+
+ def get_serialized_config(self):
+ """Get serialized startup configuration in YAML format.
+
+ :returns: Startup configuration in YAML format.
+ :rtype: str
+ """
+ self.serialize_config(self._node_config)
+ return self._node_serialized_config
+
+ def serialize_config(self, obj):
+ """Serialize the startup configuration in YAML format.
+
+ :param obj: Python Object to print.
+ :type obj: Obj
+ """
+ self._node_serialized_config = yaml.dump([obj], default_style=None)
+
+ def add_config_item(self, config, value, path):
+ """Add startup configuration item.
+
+ :param config: Startup configuration of node.
+ :param value: Value to insert.
+ :param path: Path where to insert item.
+ :type config: dict
+ :type value: str
+ :type path: list
+ """
+ if len(path) == 1:
+ config[path[0]] = value
+ return
+ if path[0] not in config:
+ config[path[0]] = dict()
+ elif isinstance(config[path[0]], str):
+ config[path[0]] = dict() if config[path[0]] == "" \
+ else {config[path[0]]: ""}
+ self.add_config_item(config[path[0]], value, path[1:])
+
+ def add_version(self, value=2):
+ """Add config file version.
+
+ :param value: Version of configuration file.
+ :type value: int
+ """
+ path = ["version"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_c(self, value):
+ """Add core count.
+
+ :param value: Core count.
+ :type value: int
+ """
+ path = ["c"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_limit_memory(self, value):
+ """Add memory limit.
+
+ :param value: Memory limit.
+ :type value: str
+ """
+ path = ["limit_memory"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_interfaces(self, devices):
+ """Add PCI device configuration.
+
+ :param devices: PCI device(s) (format xxxx:xx:xx.x).
+ :type devices: list(str)
+ """
+ for device in devices:
+ pci_dev_check(device)
+
+ path = ["interfaces"]
+ self.add_config_item(self._node_config, devices, path)
+
+ def add_rx_desc(self, value):
+ """Add RX descriptors.
+
+ :param value: RX descriptors count.
+ :type value: int
+ """
+ path = ["rx_desc"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_tx_desc(self, value):
+ """Add TX descriptors.
+
+ :param value: TX descriptors count.
+ :type value: int
+ """
+ path = ["tx_desc"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_port_info(self, value):
+ """Add port information configuration.
+
+ :param value: Port information configuration.
+ :type value: list(dict)
+ """
+ path = ["port_info"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_master_thread_id(self, value):
+ """Add platform master thread ID.
+
+ :param value: Master thread ID.
+ :type value: int
+ """
+ path = ["platform", "master_thread_id"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_latency_thread_id(self, value):
+ """Add platform latency thread ID.
+
+ :param value: Latency thread ID.
+ :type value: int
+ """
+ path = ["platform", "latency_thread_id"]
+ self.add_config_item(self._node_config, value, path)
+
+ def add_platform_dual_if(self, value):
+ """Add platform dual interface configuration.
+
+ :param value: Dual interface configuration.
+ :type value: list(dict)
+ """
+ path = ["platform", "dual_if"]
+ self.add_config_item(self._node_config, value, path)
+
+ def write_config(self, path=None):
+ """Generate and write TRex startup configuration to file.
+
+ :param path: Override startup configuration path.
+ :type path: str
+ """
+ self.serialize_config(self._node_config)
+
+ if path is None:
+ path = self._startup_configuration_path
+
+ command = f"echo \"{self._node_serialized_config}\" | sudo tee {path}"
+ message = "Writing TRex startup configuration failed!"
+ exec_cmd_no_error(self._node, command, message=message)
+
+
+class TrexConfig:
+ """TRex Configuration Class.
+ """
+ @staticmethod
+ def add_startup_configuration(node, tg_topology):
+ """Apply TRex startup configuration.
+
+ :param node: TRex node in the topology.
+ :param tg_topology: Ordered TRex links.
+ :type node: dict
+ :type tg_topology: list(dict)
+ """
+ pci_addresses = list()
+ dual_if = list()
+ port_info = list()
+ master_thread_id = None
+ latency_thread_id = None
+ cores = None
+ sockets = list()
+
+ for idx, link in enumerate(tg_topology):
+ pci_addresses.append(
+ Topology().get_interface_pci_addr(node, link["interface"])
+ )
+ if len(tg_topology) > 2:
+ # Multiple dual_ifs must not share the cores.
+ tg_dtc = Constants.TREX_CORE_COUNT_MULTI
+ tg_dtc_offset = Constants.TREX_CORE_COUNT_MULTI * (idx // 2)
+ else:
+ # Single dual_if can share cores.
+ tg_dtc = Constants.TREX_CORE_COUNT
+ tg_dtc_offset = 0
+ master_thread_id, latency_thread_id, socket, threads = \
+ CpuUtils.get_affinity_trex(
+ node, link["interface"], tg_dtc=tg_dtc,
+ tg_dtc_offset=tg_dtc_offset
+ )
+ dual_if.append(dict(socket=socket, threads=threads))
+ cores = len(threads)
+ port_info.append(
+ dict(
+ src_mac=Topology().get_interface_mac(
+ node, link["interface"]
+ ),
+ dest_mac=link["dst_mac"]
+ )
+ )
+ sockets.append(socket)
+
+ limit_memory = f"{Constants.TREX_LIMIT_MEMORY}"
+ if len(tg_topology) <= 2 and 0 in sockets and 1 in sockets:
+ limit_memory = (
+ f"{Constants.TREX_LIMIT_MEMORY},{Constants.TREX_LIMIT_MEMORY}"
+ )
+ if len(tg_topology) > 2:
+ limit_memory = (
+ f"{Constants.TREX_LIMIT_MEMORY_MULTI}"
+ )
+
+ trex_config = TrexConfigGenerator()
+ trex_config.set_node(node)
+ trex_config.add_version()
+ trex_config.add_interfaces(pci_addresses)
+ trex_config.add_c(cores)
+ trex_config.add_limit_memory(limit_memory)
+ trex_config.add_port_info(port_info)
+ if Constants.TREX_RX_DESCRIPTORS_COUNT != 0:
+ trex_config.add_rx_desc(Constants.TREX_RX_DESCRIPTORS_COUNT)
+ if Constants.TREX_TX_DESCRIPTORS_COUNT != 0:
+ trex_config.add_rx_desc(Constants.TREX_TX_DESCRIPTORS_COUNT)
+ trex_config.add_platform_master_thread_id(int(master_thread_id))
+ trex_config.add_platform_latency_thread_id(int(latency_thread_id))
+ trex_config.add_platform_dual_if(dual_if)
+ trex_config.write_config()
diff --git a/resources/libraries/python/Tap.py b/resources/libraries/python/Tap.py
index 7a987fc34b..7380344b72 100644
--- a/resources/libraries/python/Tap.py
+++ b/resources/libraries/python/Tap.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -64,7 +64,7 @@ class Tap:
:returns: Returns a interface index.
:rtype: int
"""
- cmd = u"tap_create_v2"
+ cmd = u"tap_create_v3"
args = dict(
id=Constants.BITWISE_NON_ZERO,
use_random_mac=bool(mac is None),
diff --git a/resources/libraries/python/TelemetryUtil.py b/resources/libraries/python/TelemetryUtil.py
index 2d4bb096c6..63d0bf60a7 100644
--- a/resources/libraries/python/TelemetryUtil.py
+++ b/resources/libraries/python/TelemetryUtil.py
@@ -13,129 +13,87 @@
"""Telemetry utility."""
-from robot.api import logger
-
+from resources.libraries.python.model.ExportResult import append_telemetry
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.OptionString import OptionString
-from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+from resources.libraries.python.ssh import exec_cmd_no_error
from resources.libraries.python.topology import NodeType
-__all__ = [u"TelemetryUtil"]
+__all__ = ["TelemetryUtil"]
class TelemetryUtil:
"""Class contains methods for telemetry utility."""
@staticmethod
- def perf_stat(node, cpu_list=None, duration=1):
- """Get perf stat read for duration.
-
- :param node: Node in the topology.
- :param cpu_list: CPU List as a string separated by comma.
- :param duration: Measure time in seconds.
- :type node: dict
- :type cpu_list: str
- :type duration: int
- """
- if cpu_list:
- cpu_list = list(dict.fromkeys(cpu_list.split(u",")))
- cpu_list = ",".join(str(cpu) for cpu in cpu_list)
-
- cmd_opts = OptionString(prefix=u"--")
- cmd_opts.add(u"no-aggr")
- cmd_opts.add_with_value_if(
- u"cpu", cpu_list, cpu_list
- )
- cmd_opts.add_if(
- u"all-cpus", not(cpu_list)
- )
- cmd_opts.add_with_value_if(
- u"event", f"'{{{Constants.PERF_STAT_EVENTS}}}'",
- Constants.PERF_STAT_EVENTS
- )
- cmd_opts.add_with_value(
- u"interval-print", 1000
- )
- cmd_opts.add_with_value(
- u"field-separator", u"';'"
- )
-
- cmd_base = OptionString()
- cmd_base.add(f"perf stat")
- cmd_base.extend(cmd_opts)
- cmd_base.add(u"--")
- cmd_base.add_with_value(u"sleep", int(duration))
-
- exec_cmd(node, cmd_base, sudo=True)
-
- @staticmethod
- def perf_stat_on_all_duts(nodes, cpu_list=None, duration=1):
- """Get perf stat read for duration on all DUTs.
-
- :param nodes: Nodes in the topology.
- :param cpu_list: CPU List.
- :param duration: Measure time in seconds.
- :type nodes: dict
- :type cpu_list: str
- :type duration: int
- """
- for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
- TelemetryUtil.perf_stat(
- node, cpu_list=cpu_list, duration=duration
- )
-
- @staticmethod
- def run_telemetry(node, profile, hook=None):
- """Get telemetry stat read for duration.
+ def _run_telemetry(
+ node, profile, sid=None, spath=None, rate="", export=False):
+ """Get telemetry read on node.
:param node: Node in the topology.
:param profile: Telemetry configuration profile.
- :param hook: Process ID or socket path (optional).
+ :param sid: Socket ID used to describe recipient side of socket.
+ :param spath: Socket path.
+ :param rate: Telemetry load, unique within the test (optional).
+ :param export: If false, do not attempt JSON export (default false).
:type node: dict
:type profile: str
- :type hook: str
+ :type sid: str
+ :type spath: str
+ :type rate: str
+ :type export: bool
"""
- config = u""
+ config = ""
config += f"{Constants.REMOTE_FW_DIR}/"
config += f"{Constants.RESOURCES_TPL_TELEMETRY}/"
config += f"{profile}"
- cd_cmd = u""
+ cd_cmd = ""
cd_cmd += f"sh -c \"cd {Constants.REMOTE_FW_DIR}/"
cd_cmd += f"{Constants.RESOURCES_TOOLS}"
- bin_cmd = f"python3 -m telemetry --config {config} --hook {hook}\""
- hostname = node[u"host"]
-
+ if spath:
+ bin_cmd = f"python3 -m telemetry --config {config} --hook {spath}\""
+ else:
+ bin_cmd = f"python3 -m telemetry --config {config}\""
exec_cmd_no_error(node, f"{cd_cmd} && {bin_cmd}", sudo=True)
+
+ if not export:
+ return
+
+ hostname = exec_cmd_no_error(node, "hostname")[0].strip()
stdout, _ = exec_cmd_no_error(
- node, u"cat /tmp/metric.prom", sudo=True, log_stdout_err=False
- )
- logger.info(
- u"# TYPE target info\n"
- u"# HELP target Target metadata\n"
- f"target_info{{hostname=\"{hostname}\",hook=\"{hook}\"}} 1\n"
- f"{stdout}"
+ node, "cat /tmp/metric.prom", sudo=True, log_stdout_err=False
)
+ prefix = "{"
+ prefix += f"hostname=\"{hostname}\","
+ if sid:
+ prefix += f"hook=\"{sid}\","
+ prefix += f"rate=\"{rate}\","
+ for line in stdout.splitlines():
+ if line and not line.startswith("#"):
+ append_telemetry(
+ prefix.join(line.rsplit("{", 1)).replace("\"", "'")
+ )
- @staticmethod
- def run_telemetry_on_all_duts(nodes, profile):
- """Get telemetry stat read on all DUTs.
+ def run_telemetry_on_all_duts(self, nodes, profile, rate="", export=False):
+ """Get telemetry read on all DUTs.
:param nodes: Nodes in the topology.
:param profile: Telemetry configuration profile.
- :param hooks: Dict of Process IDs or socket paths (optional).
+ :param rate: Telemetry load, unique within the test (optional).
+ :param export: If false, do not attempt JSON export (default false).
:type nodes: dict
:type profile: str
- :type hooks: dict
+ :type rate: str
+ :type export: bool
"""
for node in nodes.values():
- if node[u"type"] == NodeType.DUT:
+ if node["type"] == NodeType.DUT:
try:
- for socket in node[u"sockets"][u"CLI"].values():
- TelemetryUtil.run_telemetry(
- node, profile=profile, hook=socket
+ for sid, spath in node["sockets"]["CLI"].items():
+ self._run_telemetry(
+ node, profile=profile, sid=sid, spath=spath,
+ rate=rate, export=export
)
except IndexError:
pass
diff --git a/resources/libraries/python/TestConfig.py b/resources/libraries/python/TestConfig.py
index 28c740e42e..eb093a4651 100644
--- a/resources/libraries/python/TestConfig.py
+++ b/resources/libraries/python/TestConfig.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -23,7 +23,6 @@ from resources.libraries.python.IPAddress import IPAddress
from resources.libraries.python.IPUtil import IPUtil
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
-from resources.libraries.python.VatExecutor import VatExecutor
class TestConfig:
@@ -117,38 +116,6 @@ class TestConfig:
src_ip_start = ip_address(src_ip_start)
dst_ip_start = ip_address(dst_ip_start)
- if vxlan_count > 10:
- commands = list()
- for i in range(0, vxlan_count):
- try:
- src_ip = src_ip_start + i * ip_step
- dst_ip = dst_ip_start + i * ip_step
- except AddressValueError:
- logger.warn(
- u"Can't do more iterations - IP address limit "
- u"has been reached."
- )
- vxlan_count = i
- break
- commands.append(
- f"sw_interface_add_del_address sw_if_index "
- f"{Topology.get_interface_sw_index(node, node_vxlan_if)} "
- f"{src_ip}/{128 if src_ip.version == 6 else 32}\n"
- )
- commands.append(
- f"vxlan_add_del_tunnel src {src_ip} dst {dst_ip} "
- f"vni {vni_start + i}\n"
- )
- commands.append(
- f"create_vlan_subif sw_if_index "
- f"{Topology.get_interface_sw_index(node, node_vlan_if)} "
- f"vlan {i + 1}\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/create_vxlan_interfaces.config", commands
- )
- return vxlan_count
-
cmd1 = u"sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, node_vxlan_if),
@@ -174,7 +141,7 @@ class TestConfig:
vlan_id=None
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
try:
src_ip = src_ip_start + i * ip_step
@@ -198,9 +165,9 @@ class TestConfig:
args2[u"vni"] = int(vni_start) + i
args3[u"vlan_id"] = i + 1
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd1, history=history, **args1).\
- add(cmd2, history=history, **args2).\
- add(cmd3, history=history, **args3)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
+ papi_exec.add(cmd3, history=history, **args3)
papi_exec.get_replies()
return vxlan_count
@@ -220,50 +187,6 @@ class TestConfig:
:type node_vlan_if: str
"""
if_data = InterfaceUtil.vpp_get_interface_data(node)
- if vxlan_count > 10:
- commands = list()
- for i in range(0, vxlan_count):
- vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel")
- vxlan_subif_name = f"vxlan_tunnel{i}"
- founds = dict(vxlan=False, vlan=False)
- vxlan_subif_idx = None
- vlan_subif_key = Topology.add_new_port(node, u"vlan_subif")
- vlan_subif_name = \
- f"{Topology.get_interface_name(node, node_vlan_if)}.{i + 1}"
- vlan_idx = None
- for data in if_data:
- if_name = data[u"interface_name"]
- if not founds[u"vxlan"] and if_name == vxlan_subif_name:
- vxlan_subif_idx = data[u"sw_if_index"]
- founds[u"vxlan"] = True
- elif not founds[u"vlan"] and if_name == vlan_subif_name:
- vlan_idx = data[u"sw_if_index"]
- founds[u"vlan"] = True
- if founds[u"vxlan"] and founds[u"vlan"]:
- break
- Topology.update_interface_sw_if_index(
- node, vxlan_subif_key, vxlan_subif_idx)
- Topology.update_interface_name(
- node, vxlan_subif_key, vxlan_subif_name)
- commands.append(
- f"sw_interface_set_flags sw_if_index {vxlan_subif_idx} "
- f"admin-up link-up\n"
- )
- Topology.update_interface_sw_if_index(
- node, vlan_subif_key, vlan_idx
- )
- Topology.update_interface_name(
- node, vlan_subif_key, vlan_subif_name
- )
- commands.append(
- f"sw_interface_set_flags sw_if_index {vlan_idx} admin-up "
- f"link-up\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/put_subinterfaces_up.config", commands
- )
- return
-
cmd = u"sw_interface_set_flags"
args1 = dict(
sw_if_index=None,
@@ -274,7 +197,7 @@ class TestConfig:
flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
vxlan_subif_key = Topology.add_new_port(node, u"vxlan_tunnel")
vxlan_subif_name = f"vxlan_tunnel{i}"
@@ -310,9 +233,8 @@ class TestConfig:
)
args2[u"sw_if_index"] = vlan_idx
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd, history=history, **args1). \
- add(cmd, history=history, **args2)
- papi_exec.add(cmd, **args1).add(cmd, **args2)
+ papi_exec.add(cmd, history=history, **args1)
+ papi_exec.add(cmd, history=history, **args2)
papi_exec.get_replies()
@staticmethod
@@ -344,43 +266,6 @@ class TestConfig:
"""
dst_ip_start = ip_address(dst_ip_start)
- if vxlan_count > 1:
- idx_vxlan_if = Topology.get_interface_sw_index(node, node_vxlan_if)
- commands = list()
- for i in range(0, vxlan_count):
- dst_ip = dst_ip_start + i * ip_step
- commands.append(
- f"exec ip neighbor "
- f"{Topology.get_interface_name(node, node_vxlan_if)} "
- f"{dst_ip} "
- f"{Topology.get_interface_mac(op_node, op_node_if)} static "
- f"\n"
- )
- commands.append(
- f"ip_route_add_del "
- f"{dst_ip}/{128 if dst_ip.version == 6 else 32} count 1 "
- f"via {dst_ip} sw_if_index {idx_vxlan_if}\n"
- )
- sw_idx_vxlan = Topology.get_interface_sw_index(
- node, f"vxlan_tunnel{i + 1}"
- )
- commands.append(
- f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vxlan} "
- f"bd_id {bd_id_start + i} shg 0 enable\n"
- )
- sw_idx_vlan = Topology.get_interface_sw_index(
- node, f"vlan_subif{i + 1}"
- )
- commands.append(
- f"sw_interface_set_l2_bridge sw_if_index {sw_idx_vlan} "
- f"bd_id {bd_id_start + i} shg 0 enable\n"
- )
- VatExecutor().write_and_execute_script(
- node, u"/tmp/configure_routes_and_bridge_domains.config",
- commands
- )
- return
-
cmd1 = u"ip_neighbor_add_del"
neighbor = dict(
sw_if_index=Topology.get_interface_sw_index(node, node_vxlan_if),
@@ -422,7 +307,7 @@ class TestConfig:
enable=1
)
- with PapiSocketExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node, is_async=True) as papi_exec:
for i in range(0, vxlan_count):
args1[u"neighbor"][u"ip_address"] = \
str(dst_ip_start + i * ip_step)
@@ -439,8 +324,9 @@ class TestConfig:
)
args4[u"bd_id"] = int(bd_id_start+i)
history = bool(not 1 < i < vxlan_count - 1)
- papi_exec.add(cmd1, history=history, **args1). \
- add(cmd2, history=history, **args2). \
- add(cmd3, history=history, **args3). \
- add(cmd3, history=history, **args4)
+ papi_exec.add(cmd1, history=history, **args1)
+ papi_exec.add(cmd2, history=history, **args2)
+ papi_exec.add(cmd3, history=history, **args3)
+ # Yes, args4 go with cmd3, there is no cmd4.
+ papi_exec.add(cmd3, history=history, **args4)
papi_exec.get_replies()
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py
index 2a28896e63..936cb3a06d 100644
--- a/resources/libraries/python/TrafficGenerator.py
+++ b/resources/libraries/python/TrafficGenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,21 +16,25 @@
import math
import time
+from typing import Callable, List, Optional, Union
+
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from .Constants import Constants
-from .CpuUtils import CpuUtils
from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+ AbstractMeasurer, Config, GoalResult, MeasurementResult,
+ MultipleLossRatioSearch, SearchGoal,
+)
from .PLRsearch.PLRsearch import PLRsearch
from .OptionString import OptionString
from .ssh import exec_cmd_no_error, exec_cmd
from .topology import NodeType
from .topology import NodeSubTypeTG
from .topology import Topology
+from .TRexConfigGenerator import TrexConfig
+from .DUTSetup import DUTSetup as DS
__all__ = [u"TGDropRateSearchImpl", u"TrafficGenerator", u"OptimizedSearch"]
@@ -128,22 +132,18 @@ class TrexMode:
STL = u"STL"
-# TODO: Pylint says too-many-instance-attributes.
class TrafficGenerator(AbstractMeasurer):
"""Traffic Generator."""
- # TODO: Remove "trex" from lines which could work with other TGs.
-
# Use one instance of TrafficGenerator for all tests in test suite
ROBOT_LIBRARY_SCOPE = u"TEST SUITE"
def __init__(self):
- # TODO: Separate into few dataclasses/dicts.
- # Pylint dislikes large unstructured state, and it is right.
self._node = None
self._mode = None
# TG interface order mapping
self._ifaces_reordered = False
+ self._ifaces = []
# Result holding fields, to be removed.
self._result = None
self._loss = None
@@ -178,8 +178,7 @@ class TrafficGenerator(AbstractMeasurer):
self.ramp_up_duration = None
self.state_timeout = None
# Transient data needed for async measurements.
- self._xstats = (None, None)
- # TODO: Rename "xstats" to something opaque, so T-Rex is not privileged?
+ self._xstats = []
@property
def node(self):
@@ -280,122 +279,83 @@ class TrafficGenerator(AbstractMeasurer):
message = u"Get T-Rex version failed!"
stdout, _ = exec_cmd_no_error(tg_node, command, message=message)
return stdout.strip()
- else:
- return "none"
+ return "none"
- # TODO: pylint disable=too-many-locals.
- def initialize_traffic_generator(
- self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
- tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
- tg_if2_dst_mac=None):
+ def initialize_traffic_generator(self, osi_layer, pfs=2):
"""TG initialization.
- TODO: Document why do we need (and how do we use) _ifaces_reordered.
-
- :param tg_node: Traffic generator node.
- :param tg_if1: TG - name of first interface.
- :param tg_if2: TG - name of second interface.
- :param tg_if1_adj_node: TG if1 adjecent node.
- :param tg_if1_adj_if: TG if1 adjecent interface.
- :param tg_if2_adj_node: TG if2 adjecent node.
- :param tg_if2_adj_if: TG if2 adjecent interface.
:param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
- :param tg_if1_dst_mac: Interface 1 destination MAC address.
- :param tg_if2_dst_mac: Interface 2 destination MAC address.
- :type tg_node: dict
- :type tg_if1: str
- :type tg_if2: str
- :type tg_if1_adj_node: dict
- :type tg_if1_adj_if: str
- :type tg_if2_adj_node: dict
- :type tg_if2_adj_if: str
+ :param pfs: Number of physical interfaces to configure.
:type osi_layer: str
- :type tg_if1_dst_mac: str
- :type tg_if2_dst_mac: str
- :returns: nothing
- :raises RuntimeError: In case of issue during initialization.
+ :type pfs: int
+ :raises ValueError: If OSI layer is unknown.
"""
- subtype = check_subtype(tg_node)
- if subtype == NodeSubTypeTG.TREX:
- self._node = tg_node
- self._mode = TrexMode.ASTF if osi_layer == u"L7" else TrexMode.STL
- if1 = dict()
- if2 = dict()
- if1[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if1)
- if2[u"pci"] = Topology().get_interface_pci_addr(self._node, tg_if2)
- if1[u"addr"] = Topology().get_interface_mac(self._node, tg_if1)
- if2[u"addr"] = Topology().get_interface_mac(self._node, tg_if2)
-
- if osi_layer == u"L2":
- if1[u"adj_addr"] = if2[u"addr"]
- if2[u"adj_addr"] = if1[u"addr"]
- elif osi_layer in (u"L3", u"L7"):
- if1[u"adj_addr"] = Topology().get_interface_mac(
- tg_if1_adj_node, tg_if1_adj_if
- )
- if2[u"adj_addr"] = Topology().get_interface_mac(
- tg_if2_adj_node, tg_if2_adj_if
- )
- else:
- raise ValueError(u"Unknown OSI layer!")
-
- # in case of switched environment we can override MAC addresses
- if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
- if1[u"adj_addr"] = tg_if1_dst_mac
- if2[u"adj_addr"] = tg_if2_dst_mac
-
- if min(if1[u"pci"], if2[u"pci"]) != if1[u"pci"]:
- if1, if2 = if2, if1
- self._ifaces_reordered = True
+ if osi_layer not in ("L2", "L3", "L7"):
+ raise ValueError("Unknown OSI layer!")
- master_thread_id, latency_thread_id, socket, threads = \
- CpuUtils.get_affinity_trex(
- self._node, tg_if1, tg_if2,
- tg_dtc=Constants.TREX_CORE_COUNT)
-
- if osi_layer in (u"L2", u"L3", u"L7"):
- exec_cmd_no_error(
- self._node,
- f"sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
- f"- version: 2\n"
- f" c: {len(threads)}\n"
- f" limit_memory: {Constants.TREX_LIMIT_MEMORY}\n"
- f" interfaces: [\"{if1[u'pci']}\",\"{if2[u'pci']}\"]\n"
- f" port_info:\n"
- f" - dest_mac: \'{if1[u'adj_addr']}\'\n"
- f" src_mac: \'{if1[u'addr']}\'\n"
- f" - dest_mac: \'{if2[u'adj_addr']}\'\n"
- f" src_mac: \'{if2[u'addr']}\'\n"
- f" platform :\n"
- f" master_thread_id: {master_thread_id}\n"
- f" latency_thread_id: {latency_thread_id}\n"
- f" dual_if:\n"
- f" - socket: {socket}\n"
- f" threads: {threads}\n"
- f"EOF'",
- sudo=True, message=u"T-Rex config generation!"
- )
+ topology = BuiltIn().get_variable_value("&{topology_info}")
+ self._node = topology["TG"]
+ subtype = check_subtype(self._node)
- if Constants.TREX_RX_DESCRIPTORS_COUNT != 0:
- exec_cmd_no_error(
- self._node,
- f"sh -c 'cat << EOF >> /etc/trex_cfg.yaml\n"
- f" rx_desc: {Constants.TREX_RX_DESCRIPTORS_COUNT}\n"
- f"EOF'",
- sudo=True, message=u"T-Rex rx_desc modification!"
+ if subtype == NodeSubTypeTG.TREX:
+ trex_topology = list()
+ self._mode = TrexMode.ASTF if osi_layer == "L7" else TrexMode.STL
+
+ for link in range(1, pfs, 2):
+ tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
+ tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
+ if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+ ifl = BuiltIn().get_variable_value("${int}")
+ last = topology["duts_count"]
+ tg_if1_adj_addr = Topology().get_interface_mac(
+ topology["DUT1"],
+ BuiltIn().get_variable_value(
+ f"${{DUT1_{ifl}{link}}}[0]"
+ )
+ )
+ tg_if2_adj_addr = Topology().get_interface_mac(
+ topology[f"DUT{last}"],
+ BuiltIn().get_variable_value(
+ f"${{DUT{last}_{ifl}{link+1}}}[0]"
+ )
)
- if Constants.TREX_TX_DESCRIPTORS_COUNT != 0:
- exec_cmd_no_error(
- self._node,
- f"sh -c 'cat << EOF >> /etc/trex_cfg.yaml\n"
- f" tx_desc: {Constants.TREX_TX_DESCRIPTORS_COUNT}\n"
- f"EOF'",
- sudo=True, message=u"T-Rex tx_desc modification!"
+ if1_pci = topology[f"TG_pf{link}_pci"][0]
+ if2_pci = topology[f"TG_pf{link+1}_pci"][0]
+ if min(if1_pci, if2_pci) != if1_pci:
+ self._ifaces.append(str(link))
+ self._ifaces.append(str(link-1))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
+ )
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ else:
+ self._ifaces.append(str(link-1))
+ self._ifaces.append(str(link))
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link}"][0],
+ dst_mac=tg_if1_adj_addr
+ )
+ )
+ trex_topology.append(
+ dict(
+ interface=topology[f"TG_pf{link+1}"][0],
+ dst_mac=tg_if2_adj_addr
+ )
)
- else:
- raise ValueError(u"Unknown OSI layer!")
+ TrexConfig.add_startup_configuration(
+ self._node, trex_topology
+ )
TrafficGenerator.startup_trex(
self._node, osi_layer, subtype=subtype
)
@@ -418,51 +378,41 @@ class TrafficGenerator(AbstractMeasurer):
if subtype == NodeSubTypeTG.TREX:
for _ in range(0, 3):
# Kill TRex only if it is already running.
- cmd = u"sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
+ cmd = "sh -c \"pgrep t-rex && pkill t-rex && sleep 3 || true\""
exec_cmd_no_error(
- tg_node, cmd, sudo=True, message=u"Kill TRex failed!"
+ tg_node, cmd, sudo=True, message="Kill TRex failed!"
)
# Prepare interfaces for TRex.
- mlx_ports = u""
- mlx_driver = u""
- itl_ports = u""
- for port in tg_node[u"interfaces"].values():
- if u"Mellanox" in port.get(u"model"):
- mlx_ports += f" {port.get(u'pci_address')}"
- mlx_driver = port.get(u"driver")
- if u"Intel" in port.get(u"model"):
- itl_ports += f" {port.get(u'pci_address')}"
-
- if itl_ports:
- cmd = (
- f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && ",
- f"./dpdk_nic_bind.py -u {itl_ports} || ",
- f"true\""
- )
- exec_cmd_no_error(
- tg_node, cmd, sudo=True,
- message=u"Unbind PCI ports from driver failed!"
- )
- if mlx_ports:
- cmd = (
- f"sh -c \"cd {Constants.TREX_INSTALL_DIR}/scripts/ && ",
- f"./dpdk_nic_bind.py -b {mlx_driver} {mlx_ports} || ",
- f"true\""
- )
- exec_cmd_no_error(
- tg_node, cmd, sudo=True,
- message=u"Bind PCI ports from driver failed!"
- )
+ tg_port_drv = Constants.TREX_PORT_DRIVER
+ mlx_driver = ""
+ for port in tg_node["interfaces"].values():
+ if "Mellanox" in port.get("model"):
+ mlx_driver = port.get("driver")
+ pci_addr = port.get("pci_address")
+ cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+ if cur_driver == mlx_driver:
+ pass
+ elif not cur_driver:
+ DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+ else:
+ DS.pci_driver_unbind(tg_node, pci_addr)
+ DS.pci_driver_bind(tg_node, pci_addr, mlx_driver)
+ else:
+ pci_addr = port.get("pci_address")
+ cur_driver = DS.get_pci_dev_driver(tg_node, pci_addr)
+ if cur_driver:
+ DS.pci_driver_unbind(tg_node, pci_addr)
+ DS.pci_driver_bind(tg_node, pci_addr, tg_port_drv)
# Start TRex.
cd_cmd = f"cd '{Constants.TREX_INSTALL_DIR}/scripts/'"
- trex_cmd = OptionString([u"nohup", u"./t-rex-64"])
- trex_cmd.add(u"-i")
- trex_cmd.add(u"--prefix $(hostname)")
- trex_cmd.add(u"--hdrh")
- trex_cmd.add(u"--no-scapy-server")
- trex_cmd.add_if(u"--astf", osi_layer == u"L7")
+ trex_cmd = OptionString(["nohup", "./t-rex-64"])
+ trex_cmd.add("-i")
+ trex_cmd.add("--prefix $(hostname)")
+ trex_cmd.add("--hdrh")
+ trex_cmd.add("--no-scapy-server")
+ trex_cmd.add_if("--astf", osi_layer == "L7")
# OptionString does not create double space if extra is empty.
trex_cmd.add(f"{Constants.TREX_EXTRA_CMDLINE}")
inner_command = f"{cd_cmd} && {trex_cmd} > /tmp/trex.log 2>&1 &"
@@ -470,33 +420,33 @@ class TrafficGenerator(AbstractMeasurer):
try:
exec_cmd_no_error(tg_node, cmd, sudo=True)
except RuntimeError:
- cmd = u"sh -c \"cat /tmp/trex.log\""
+ cmd = "sh -c \"cat /tmp/trex.log\""
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"Get TRex logs failed!"
+ message="Get TRex logs failed!"
)
- raise RuntimeError(u"Start TRex failed!")
+ raise RuntimeError("Start TRex failed!")
# Test T-Rex API responsiveness.
cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
- if osi_layer in (u"L2", u"L3"):
- cmd += u"trex_stl_assert.py"
- elif osi_layer == u"L7":
- cmd += u"trex_astf_assert.py"
+ if osi_layer in ("L2", "L3"):
+ cmd += "trex_stl_assert.py"
+ elif osi_layer == "L7":
+ cmd += "trex_astf_assert.py"
else:
- raise ValueError(u"Unknown OSI layer!")
+ raise ValueError("Unknown OSI layer!")
try:
exec_cmd_no_error(
tg_node, cmd, sudo=True,
- message=u"T-Rex API is not responding!", retries=20
+ message="T-Rex API is not responding!", retries=20
)
except RuntimeError:
continue
return
# After max retries TRex is still not responding to API critical
# error occurred.
- exec_cmd(tg_node, u"cat /tmp/trex.log", sudo=True)
- raise RuntimeError(u"Start T-Rex failed after multiple retries!")
+ exec_cmd(tg_node, "cat /tmp/trex.log", sudo=True)
+ raise RuntimeError("Start T-Rex failed after multiple retries!")
@staticmethod
def is_trex_running(node):
@@ -507,7 +457,7 @@ class TrafficGenerator(AbstractMeasurer):
:returns: True if T-Rex is running otherwise False.
:rtype: bool
"""
- ret, _, _ = exec_cmd(node, u"pgrep t-rex", sudo=True)
+ ret, _, _ = exec_cmd(node, "pgrep t-rex", sudo=True)
return bool(int(ret) == 0)
@staticmethod
@@ -540,17 +490,17 @@ class TrafficGenerator(AbstractMeasurer):
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_astf_stop.py'")
- command_line.change_prefix(u"--")
- for index, value in enumerate(self._xstats):
+ command_line.add("--xstat")
+ for value in self._xstats:
if value is not None:
- value = value.replace(u"'", u"\"")
- command_line.add_equals(f"xstat{index}", f"'{value}'")
+ value = value.replace("'", "\"")
+ command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex ASTF runtime error!"
+ message="T-Rex ASTF runtime error!"
)
self._parse_traffic_results(stdout)
@@ -564,17 +514,17 @@ class TrafficGenerator(AbstractMeasurer):
:type node: dict
:raises RuntimeError: If stop traffic script fails.
"""
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_stop.py'")
- command_line.change_prefix(u"--")
- for index, value in enumerate(self._xstats):
+ command_line.add("--xstat")
+ for value in self._xstats:
if value is not None:
- value = value.replace(u"'", u"\"")
- command_line.add_equals(f"xstat{index}", f"'{value}'")
+ value = value.replace("'", "\"")
+ command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message=u"T-Rex STL runtime error!"
+ message="T-Rex STL runtime error!"
)
self._parse_traffic_results(stdout)
@@ -582,7 +532,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Stop all traffic on TG.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -602,7 +552,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Compute duration for profile driver.
The final result is influenced by transaction scale and duration limit.
- It is assumed a higher level function has already set those to self.
+ It is assumed a higher level function has already set those on self.
The duration argument is the target value from search point of view,
before the overrides are applied here.
@@ -679,8 +629,6 @@ class TrafficGenerator(AbstractMeasurer):
if not isinstance(duration, (float, int)):
duration = float(duration)
- # TODO: Refactor the code so duration is computed only once,
- # and both the initial and the computed durations are logged.
computed_duration, _ = self._compute_duration(duration, multiplier)
command_line = OptionString().add(u"python3")
@@ -724,7 +672,7 @@ class TrafficGenerator(AbstractMeasurer):
self._sent = None
self._loss = None
self._latency = None
- xstats = [None, None]
+ xstats = []
self._l7_data = dict()
self._l7_data[u"client"] = dict()
self._l7_data[u"client"][u"active_flows"] = None
@@ -757,10 +705,8 @@ class TrafficGenerator(AbstractMeasurer):
index = 0
for line in stdout.splitlines():
if f"Xstats snapshot {index}: " in line:
- xstats[index] = line[19:]
+ xstats.append(line[19:])
index += 1
- if index == 2:
- break
self._xstats = tuple(xstats)
else:
self._target_duration = duration
@@ -788,41 +734,36 @@ class TrafficGenerator(AbstractMeasurer):
:raises RuntimeError: In case of T-Rex driver issue.
"""
self.check_mode(TrexMode.STL)
- p_0, p_1 = (1, 0) if self._ifaces_reordered else (0, 1)
if not isinstance(duration, (float, int)):
duration = float(duration)
- # TODO: Refactor the code so duration is computed only once,
- # and both the initial and the computed durations are logged.
duration, _ = self._compute_duration(duration=duration, multiplier=rate)
- command_line = OptionString().add(u"python3")
+ command_line = OptionString().add("python3")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/tools/trex"
command_line.add(f"'{dirname}/trex_stl_profile.py'")
- command_line.change_prefix(u"--")
+ command_line.change_prefix("--")
dirname = f"{Constants.REMOTE_FW_DIR}/GPL/traffic_profiles/trex"
command_line.add_with_value(
- u"profile", f"'{dirname}/{self.traffic_profile}.py'"
+ "profile", f"'{dirname}/{self.traffic_profile}.py'"
)
- command_line.add_with_value(u"duration", f"{duration!r}")
- command_line.add_with_value(u"frame_size", self.frame_size)
- command_line.add_with_value(u"rate", f"{rate!r}")
- command_line.add_with_value(u"port_0", p_0)
- command_line.add_with_value(u"port_1", p_1)
+ command_line.add_with_value("duration", f"{duration!r}")
+ command_line.add_with_value("frame_size", self.frame_size)
+ command_line.add_with_value("rate", f"{rate!r}")
+ command_line.add_with_value("ports", " ".join(self._ifaces))
command_line.add_with_value(
- u"traffic_directions", self.traffic_directions
+ "traffic_directions", self.traffic_directions
)
- command_line.add_if(u"async_start", async_call)
- command_line.add_if(u"latency", self.use_latency)
- command_line.add_if(u"force", Constants.TREX_SEND_FORCE)
- command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY)
+ command_line.add_if("async_start", async_call)
+ command_line.add_if("latency", self.use_latency)
+ command_line.add_if("force", Constants.TREX_SEND_FORCE)
+ command_line.add_with_value("delay", Constants.PERF_TRIAL_STL_DELAY)
- # TODO: This is ugly. Handle parsing better.
self._start_time = time.monotonic()
- self._rate = float(rate[:-3]) if u"pps" in rate else float(rate)
+ self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
stdout, _ = exec_cmd_no_error(
self._node, command_line, timeout=int(duration) + 60,
- message=u"T-Rex STL runtime error"
+ message="T-Rex STL runtime error"
)
if async_call:
@@ -834,14 +775,12 @@ class TrafficGenerator(AbstractMeasurer):
self._loss = None
self._latency = None
- xstats = [None, None]
+ xstats = []
index = 0
for line in stdout.splitlines():
if f"Xstats snapshot {index}: " in line:
- xstats[index] = line[19:]
+ xstats.append(line[19:])
index += 1
- if index == 2:
- break
self._xstats = tuple(xstats)
else:
self._target_duration = duration
@@ -931,7 +870,7 @@ class TrafficGenerator(AbstractMeasurer):
:type state_timeout: float
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
self.set_rate_provider_defaults(
@@ -978,7 +917,7 @@ class TrafficGenerator(AbstractMeasurer):
:type rate: float
:type async_call: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -989,7 +928,6 @@ class TrafficGenerator(AbstractMeasurer):
)
elif u"trex-stl" in self.traffic_profile:
unit_rate_str = str(rate) + u"pps"
- # TODO: Suport transaction_scale et al?
self.trex_stl_start_remote_exec(
duration, unit_rate_str, async_call
)
@@ -1030,13 +968,12 @@ class TrafficGenerator(AbstractMeasurer):
:type async_call: bool
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
complete = False
if self.ramp_up_rate:
# Figure out whether we need to insert a ramp-up trial.
- # TODO: Give up on async_call=True?
if ramp_up_only or self.ramp_up_start is None:
# We never ramped up yet (at least not in this test case).
ramp_up_needed = True
@@ -1086,7 +1023,7 @@ class TrafficGenerator(AbstractMeasurer):
trial_end = time.monotonic()
if self.ramp_up_rate:
# Optimization: No loss acts as a good ramp-up, if it was complete.
- if complete and result is not None and result.loss_count == 0:
+ if complete and result is not None and result.loss_ratio == 0.0:
logger.debug(u"Good trial acts as a ramp-up")
self.ramp_up_start = trial_start
self.ramp_up_stop = trial_end
@@ -1108,8 +1045,6 @@ class TrafficGenerator(AbstractMeasurer):
def fail_if_no_traffic_forwarded(self):
"""Fail if no traffic forwarded.
- TODO: Check number of passed transactions instead.
-
:returns: nothing
:raises Exception: If no traffic forwarded.
"""
@@ -1260,21 +1195,20 @@ class TrafficGenerator(AbstractMeasurer):
int(self._result.get(u"server_tcp_rx_bytes", 0))
def _get_measurement_result(self):
- """Return the result of last measurement as ReceiveRateMeasurement.
+ """Return the result of last measurement as MeasurementResult.
Separate function, as measurements can end either by time
or by explicit call, this is the common block at the end.
- The target_tr field of ReceiveRateMeasurement is in
+ The intended_load field of MeasurementResult is in
transactions per second. Transmit count and loss count units
depend on the transaction type. Usually they are in transactions
per second, or aggregated packets per second.
- TODO: Fail on running or already reported measurement.
-
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
"""
+ duration_with_overheads = time.monotonic() - self._start_time
try:
# Client duration seems to include a setup period
# where TRex does not send any packets yet.
@@ -1314,7 +1248,7 @@ class TrafficGenerator(AbstractMeasurer):
expected_attempt_count = max(expected_attempt_count, self._sent)
unsent = expected_attempt_count - self._sent
pass_count = self._received
- fail_count = expected_attempt_count - pass_count
+ loss_count = self._loss
elif self.transaction_type == u"udp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_cps.")
@@ -1323,7 +1257,7 @@ class TrafficGenerator(AbstractMeasurer):
expected_attempt_count = self.transaction_scale
unsent = expected_attempt_count - partial_attempt_count
pass_count = self._l7_data[u"client"][u"received"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"tcp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_cps.")
@@ -1336,14 +1270,14 @@ class TrafficGenerator(AbstractMeasurer):
# but we are testing NAT session so client/connects counts that
# (half connections from TCP point of view).
pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"udp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_pps.")
partial_attempt_count = self._sent
expected_attempt_count = self.transaction_scale * self.ppta
unsent = expected_attempt_count - self._sent
- fail_count = self._loss + unsent
+ loss_count = self._loss
elif self.transaction_type == u"tcp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1357,29 +1291,30 @@ class TrafficGenerator(AbstractMeasurer):
# Probability of retransmissions exactly cancelling
# packets unsent due to duration stretching is quite low.
unsent = abs(expected_attempt_count - self._sent)
- fail_count = self._loss + unsent
+ loss_count = self._loss
else:
raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
if unsent and isinstance(self._approximated_duration, float):
# Do not report unsent for "manual".
logger.debug(f"Unsent packets/transactions: {unsent}")
- if fail_count < 0 and not self.negative_loss:
- fail_count = 0
- measurement = ReceiveRateMeasurement(
- duration=target_duration,
- target_tr=transmit_rate,
- transmit_count=expected_attempt_count,
- loss_count=fail_count,
- approximated_duration=approximated_duration,
- partial_transmit_count=partial_attempt_count,
+ if loss_count < 0 and not self.negative_loss:
+ loss_count = 0
+ measurement = MeasurementResult(
+ intended_duration=target_duration,
+ intended_load=transmit_rate,
+ offered_count=partial_attempt_count,
+ loss_count=loss_count,
+ offered_duration=approximated_duration,
+ duration_with_overheads=duration_with_overheads,
+ intended_count=expected_attempt_count,
)
measurement.latency = self.get_latency_int()
return measurement
- def measure(self, duration, transmit_rate):
+ def measure(self, intended_duration, intended_load):
"""Run trial measurement, parse and return results.
- The input rate is for transactions. Stateles bidirectional traffic
+ The intended load is for transactions. Stateles bidirectional traffic
is understood as sequence of (asynchronous) transactions,
two packets each.
@@ -1387,35 +1322,32 @@ class TrafficGenerator(AbstractMeasurer):
the count either transactions or packets (aggregated over directions).
Optionally, this method sleeps if measurement finished before
- the time specified as duration.
+ the time specified as intended_duration (PLRsearch needs time for math).
- :param duration: Trial duration [s].
- :param transmit_rate: Target rate in transactions per second.
- :type duration: float
- :type transmit_rate: float
+ :param intended_duration: Trial duration [s].
+ :param intended_load: Target rate in transactions per second.
+ :type intended_duration: float
+ :type intended_load: float
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises RuntimeError: If TG is not set or if node is not TG
or if subtype is not specified.
:raises NotImplementedError: If TG is not supported.
"""
- duration = float(duration)
+ intended_duration = float(intended_duration)
time_start = time.monotonic()
- time_stop = time_start + duration
+ time_stop = time_start + intended_duration
if self.resetter:
self.resetter()
result = self._send_traffic_on_tg_with_ramp_up(
- duration=duration,
- rate=transmit_rate,
+ duration=intended_duration,
+ rate=intended_load,
async_call=False,
)
logger.debug(f"trial measurement result: {result!r}")
# In PLRsearch, computation needs the specified time to complete.
if self.sleep_till_duration:
- sleeptime = time_stop - time.monotonic()
- if sleeptime > 0.0:
- # TODO: Sometimes we have time to do additional trials here,
- # adapt PLRsearch to accept all the results.
+ while (sleeptime := time_stop - time.monotonic()) > 0.0:
time.sleep(sleeptime)
return result
@@ -1456,7 +1388,6 @@ class TrafficGenerator(AbstractMeasurer):
:param transaction_type: An identifier specifying which counters
and formulas to use when computing attempted and failed
transactions. Default: "packet".
- TODO: Does this also specify parsing for the measured duration?
:param duration_limit: Zero or maximum limit for computed (or given)
duration.
:param negative_loss: If false, negative loss is reported as zero loss.
@@ -1486,7 +1417,7 @@ class TrafficGenerator(AbstractMeasurer):
self.frame_size = frame_size
self.traffic_profile = str(traffic_profile)
self.resetter = resetter
- self.ppta = ppta
+ self.ppta = int(ppta)
self.traffic_directions = int(traffic_directions)
self.transaction_duration = float(transaction_duration)
self.transaction_scale = int(transaction_scale)
@@ -1508,29 +1439,30 @@ class OptimizedSearch:
"""
@staticmethod
- def perform_optimized_ndrpdr_search(
- frame_size,
- traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
- packet_loss_ratio=0.005,
- final_relative_width=0.005,
- final_trial_duration=30.0,
- initial_trial_duration=1.0,
- number_of_intermediate_phases=2,
- timeout=1200.0,
- ppta=1,
- resetter=None,
- traffic_directions=2,
- transaction_duration=0.0,
- transaction_scale=0,
- transaction_type=u"packet",
- use_latency=False,
- ramp_up_rate=None,
- ramp_up_duration=None,
- state_timeout=240.0,
- expansion_coefficient=4.0,
- ):
+ def perform_mlr_search(
+ frame_size: Union[int, str],
+ traffic_profile: str,
+ min_load: float,
+ max_load: float,
+ loss_ratio: float = 0.005,
+ relative_width: float = 0.005,
+ initial_trial_duration: float = 1.0,
+ final_trial_duration: float = 1.0,
+ duration_sum: float = 21.0,
+ expansion_coefficient: int = 2,
+ preceding_targets: int = 2,
+ search_duration_max: float = 1200.0,
+ ppta: int = 1,
+ resetter: Optional[Callable[[], None]] = None,
+ traffic_directions: int = 2,
+ transaction_duration: float = 0.0,
+ transaction_scale: int = 0,
+ transaction_type: str = "packet",
+ use_latency: bool = False,
+ ramp_up_rate: float = 0.0,
+ ramp_up_duration: float = 0.0,
+ state_timeout: float = 240.0,
+ ) -> List[GoalResult]:
"""Setup initialized TG, perform optimized search, return intervals.
If transaction_scale is nonzero, all init and non-init trial durations
@@ -1542,18 +1474,20 @@ class OptimizedSearch:
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
- :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
- :param final_relative_width: Final lower bound transmit rate
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
+ :param loss_ratio: Ratio of packets lost, for PDR [1].
+ :param relative_width: Final lower bound intended load
cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
:param initial_trial_duration: Trial duration for the initial phase
and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param duration_sum: Max sum of duration for deciding [s].
+ :param expansion_coefficient: In external search multiply width by this.
+ :param preceding_targets: Number of intermediate phases
to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
+ :param search_duration_max: The search will fail itself
+ when not finished before this overall time [s].
:param ppta: Packets per transaction, aggregated over directions.
Needed for udp_pps which does not have a good transaction counter,
so we need to compute expected number of packets.
@@ -1572,17 +1506,18 @@ class OptimizedSearch:
:param ramp_up_rate: Rate to use in ramp-up trials [pps].
:param ramp_up_duration: Duration of ramp-up trials [s].
:param state_timeout: Time of life of DUT state [s].
- :param expansion_coefficient: In external search multiply width by this.
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- :type packet_loss_ratio: float
- :type final_relative_width: float
- :type final_trial_duration: float
+ :type min_load: float
+ :type max_load: float
+ :type loss_ratio: float
+ :type relative_width: float
:type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
+ :type final_trial_duration: float
+ :type duration_sum: float
+ :type expansion_coefficient: int
+ :type preceding_targets: int
+ :type search_duration_max: float
:type ppta: int
:type resetter: Optional[Callable[[], None]]
:type traffic_directions: int
@@ -1593,11 +1528,12 @@ class OptimizedSearch:
:type ramp_up_rate: float
:type ramp_up_duration: float
:type state_timeout: float
- :type expansion_coefficient: float
- :returns: Structure containing narrowed down NDR and PDR intervals
- and their measurements.
- :rtype: List[Receiverateinterval]
- :raises RuntimeError: If total duration is larger than timeout.
+ :returns: Goal result (based on unidirectional tps) for each goal.
+ The result contains both the offered load for stat trial,
+ and the conditional throughput for display.
+ :rtype: List[GoalResult]
+ :raises RuntimeError: If search duration exceeds search_duration_max
+ or if min load becomes an upper bound for any search goal.
"""
# we need instance of TrafficGenerator instantiated by Robot Framework
# to be able to use trex_stl-*()
@@ -1605,13 +1541,12 @@ class OptimizedSearch:
u"resources.libraries.python.TrafficGenerator"
)
# Overrides for fixed transaction amount.
- # TODO: Move to robot code? We have two call sites, so this saves space,
- # even though this is surprising for log readers.
if transaction_scale:
initial_trial_duration = 1.0
final_trial_duration = 1.0
- number_of_intermediate_phases = 0
- timeout += transaction_scale * 3e-4
+ preceding_targets = 1
+ # TODO: Move the value to Constants.py?
+ search_duration_max += transaction_scale * 3e-4
tg_instance.set_rate_provider_defaults(
frame_size=frame_size,
traffic_profile=traffic_profile,
@@ -1627,34 +1562,43 @@ class OptimizedSearch:
ramp_up_duration=ramp_up_duration,
state_timeout=state_timeout,
)
- algorithm = MultipleLossRatioSearch(
- measurer=tg_instance,
- final_trial_duration=final_trial_duration,
- final_relative_width=final_relative_width,
- number_of_intermediate_phases=number_of_intermediate_phases,
- initial_trial_duration=initial_trial_duration,
- timeout=timeout,
- debug=logger.debug,
- expansion_coefficient=expansion_coefficient,
- )
- if packet_loss_ratio:
- packet_loss_ratios = [0.0, packet_loss_ratio]
+ if loss_ratio:
+ loss_ratios = [0.0, loss_ratio]
+ exceed_ratio = 0.5
else:
# Happens in reconf tests.
- packet_loss_ratios = [packet_loss_ratio]
- results = algorithm.narrow_down_intervals(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
- packet_loss_ratios=packet_loss_ratios,
- )
- return results
+ loss_ratios = [0.0]
+ exceed_ratio = 0.0
+ goals = [
+ SearchGoal(
+ loss_ratio=loss_ratio,
+ exceed_ratio=exceed_ratio,
+ relative_width=relative_width,
+ initial_trial_duration=initial_trial_duration,
+ final_trial_duration=final_trial_duration,
+ duration_sum=duration_sum,
+ preceding_targets=preceding_targets,
+ expansion_coefficient=expansion_coefficient,
+ fail_fast=True,
+ )
+ for loss_ratio in loss_ratios
+ ]
+ config = Config()
+ config.goals = goals
+ config.min_load = min_load
+ config.max_load = max_load
+ config.search_duration_max = search_duration_max
+ config.warmup_duration = 1.0
+ algorithm = MultipleLossRatioSearch(config)
+ results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+ return [results[goal] for goal in goals]
@staticmethod
def perform_soak_search(
frame_size,
traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
+ min_load,
+ max_load,
plr_target=1e-7,
tdpt=0.1,
initial_count=50,
@@ -1676,8 +1620,8 @@ class OptimizedSearch:
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
:param plr_target: Ratio of packets lost to achieve [1].
:param tdpt: Trial duration per trial.
The algorithm linearly increases trial duration with trial number,
@@ -1711,8 +1655,8 @@ class OptimizedSearch:
:param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
+ :type min_load: float
+ :type max_load: float
:type plr_target: float
:type initial_count: int
:type timeout: float
@@ -1734,11 +1678,7 @@ class OptimizedSearch:
u"resources.libraries.python.TrafficGenerator"
)
# Overrides for fixed transaction amount.
- # TODO: Move to robot code? We have a single call site
- # but MLRsearch has two and we want the two to be used similarly.
if transaction_scale:
- # TODO: What is a good value for max scale?
- # TODO: Scale the timeout with transaction scale.
timeout = 7200.0
tg_instance.set_rate_provider_defaults(
frame_size=frame_size,
@@ -1765,7 +1705,7 @@ class OptimizedSearch:
trace_enabled=trace_enabled,
)
result = algorithm.search(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
+ min_rate=min_load,
+ max_rate=max_load,
)
return result
diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py
index daeb568bda..1ede76cdd4 100644
--- a/resources/libraries/python/VPPUtil.py
+++ b/resources/libraries/python/VPPUtil.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -29,35 +29,6 @@ class VPPUtil:
"""General class for any VPP related methods/functions."""
@staticmethod
- def show_vpp_settings(node, *additional_cmds):
- """Print default VPP settings. In case others are needed, can be
- accepted as next parameters (each setting one parameter), preferably
- in form of a string.
-
- :param node: VPP node.
- :param additional_cmds: Additional commands that the vpp should print
- settings for.
- :type node: dict
- :type additional_cmds: tuple
- """
- def_setting_tb_displayed = {
- u"IPv6 FIB": u"ip6 fib",
- u"IPv4 FIB": u"ip fib",
- u"Interface IP": u"int addr",
- u"Interfaces": u"int",
- u"ARP": u"ip arp",
- u"Errors": u"err"
- }
-
- if additional_cmds:
- for cmd in additional_cmds:
- def_setting_tb_displayed[f"Custom Setting: {cmd}"] = cmd
-
- for _, cmd in def_setting_tb_displayed.items():
- command = f"vppctl sh {cmd}"
- exec_cmd_no_error(node, command, timeout=30, sudo=True)
-
- @staticmethod
def restart_vpp_service(node, node_key=None):
"""Restart VPP service on the specified topology node.
@@ -70,7 +41,14 @@ class VPPUtil:
"""
# Containers have a separate lifecycle, but better be safe.
PapiSocketExecutor.disconnect_all_sockets_by_node(node)
- DUTSetup.restart_service(node, Constants.VPP_UNIT)
+
+ VPPUtil.stop_vpp_service(node)
+ command = "/usr/bin/vpp -c /etc/vpp/startup.conf"
+ message = f"Node {node[u'host']} failed to start VPP!"
+ exec_cmd_no_error(
+ node, command, timeout=180, sudo=True, message=message
+ )
+
if node_key:
Topology.add_new_socket(
node, SocketType.CLI, node_key, Constants.SOCKCLI_PATH)
@@ -101,12 +79,19 @@ class VPPUtil:
:type node: dict
:type node_key: str
"""
- # Containers have a separate lifecycle, but better be safe.
PapiSocketExecutor.disconnect_all_sockets_by_node(node)
- DUTSetup.stop_service(node, Constants.VPP_UNIT)
+ command = "pkill -9 vpp; sleep 1"
+ exec_cmd(node, command, timeout=180, sudo=True)
+ command = (
+ "/bin/rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api"
+ )
+ exec_cmd(node, command, timeout=180, sudo=True)
+
if node_key:
- Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
- Topology.del_node_socket_id(node, SocketType.STATS, node_key)
+ if Topology.get_node_sockets(node, socket_type=SocketType.PAPI):
+ Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
+ if Topology.get_node_sockets(node, socket_type=SocketType.STATS):
+ Topology.del_node_socket_id(node, SocketType.STATS, node_key)
@staticmethod
def stop_vpp_service_on_all_duts(nodes):
@@ -120,6 +105,39 @@ class VPPUtil:
VPPUtil.stop_vpp_service(node, node_key)
@staticmethod
+ def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
+ """Install VPP on all DUT nodes.
+
+ :param nodes: Nodes in the topology.
+ :param vpp_pkg_dir: Path to directory where VPP packages are stored.
+ :type nodes: dict
+ :type vpp_pkg_dir: str
+ """
+ VPPUtil.stop_vpp_service_on_all_duts(nodes)
+ for node in nodes.values():
+ message = f"Failed to install VPP on host {node['host']}!"
+ if node["type"] == NodeType.DUT:
+ command = "mkdir -p /var/log/vpp/"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/systemd/system/vpp.service"
+ exec_cmd(node, command, sudo=True)
+
+ command = "ln -s /dev/null /etc/sysctl.d/80-vpp.conf"
+ exec_cmd(node, command, sudo=True)
+
+ command = "apt-get purge -y '*vpp*' || true"
+ exec_cmd_no_error(node, command, timeout=120, sudo=True)
+
+ command = f"dpkg -i --force-all {vpp_pkg_dir}*.deb"
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
+
+ command = "dpkg -l | grep vpp"
+ exec_cmd_no_error(node, command, sudo=True)
+
+ @staticmethod
def verify_vpp_installed(node):
"""Verify that VPP is installed on the specified topology node.
@@ -410,3 +428,20 @@ class VPPUtil:
reply = papi_exec.add(cmd, **args).get_reply()
return reply[u"next_index"]
+
+ @staticmethod
+ def vpp_set_neighbor_limit_on_all_duts(nodes, count):
+ """VPP set neighbor count limit on all DUTs in the given topology.
+
+ :param nodes: Nodes in the topology.
+ :param count: Neighbor count need to set.
+ :type nodes: dict
+ :type count: int
+ """
+ for node in nodes.values():
+ if node[u"type"] == NodeType.DUT:
+ cmd = f"set ip neighbor-config ip4 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
+
+ cmd = f"set ip neighbor-config ip6 limit {count}"
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
diff --git a/resources/libraries/python/VatExecutor.py b/resources/libraries/python/VatExecutor.py
deleted file mode 100644
index 63f46c8b6d..0000000000
--- a/resources/libraries/python/VatExecutor.py
+++ /dev/null
@@ -1,397 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VAT executor library."""
-
-import json
-
-from os import remove
-
-from paramiko.ssh_exception import SSHException
-from robot.api import logger
-
-import resources.libraries.python.DUTSetup as PidLib
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.PapiHistory import PapiHistory
-from resources.libraries.python.ssh import SSH, SSHTimeout
-
-__all__ = [u"VatExecutor"]
-
-
-def cleanup_vat_json_output(json_output, vat_name=None):
- """Return VAT JSON output cleaned from VAT clutter.
-
- Clean up VAT JSON output from clutter like vat# prompts and such.
-
- :param json_output: Cluttered JSON output.
- :param vat_name: Name of the VAT script.
- :type json_output: JSON
- :type vat_name: str
- :returns: Cleaned up output JSON string.
- :rtype: JSON
- """
-
- retval = json_output
- clutter = [u"vat#", u"dump_interface_table error: Misc"]
- if vat_name:
- remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \
- f"{Constants.RESOURCES_TPL_VAT}/{vat_name}"
- clutter.append(f"{remote_file_path}(2):")
- for garbage in clutter:
- retval = retval.replace(garbage, u"")
- return retval
-
-
-def get_vpp_pid(node):
- """Get PID of running VPP process.
-
- :param node: DUT node.
- :type node: dict
- :returns: PID of VPP process / List of PIDs if more VPP processes are
- running on the DUT node.
- :rtype: int or list
- """
- pid = PidLib.DUTSetup.get_pid(node, u"vpp")
- return pid
-
-
-class VatExecutor:
- """Contains methods for executing VAT commands on DUTs."""
- def __init__(self):
- self._stdout = None
- self._stderr = None
- self._ret_code = None
- self._script_name = None
-
- def execute_script(
- self, vat_name, node, timeout=120, json_out=True,
- copy_on_execute=False, history=True):
- """Execute VAT script on remote node, and store the result. There is an
- option to copy script from local host to remote host before execution.
- Path is defined automatically.
-
- :param vat_name: Name of the vat script file. Only the file name of
- the script is required, the resources path is prepended
- automatically.
- :param node: Node to execute the VAT script on.
- :param timeout: Seconds to allow the script to run.
- :param json_out: Require JSON output.
- :param copy_on_execute: If true, copy the file from local host to remote
- before executing.
- :param history: If true, add command to history.
- :type vat_name: str
- :type node: dict
- :type timeout: int
- :type json_out: bool
- :type copy_on_execute: bool
- :type history: bool
- :raises SSHException: If cannot open connection for VAT.
- :raises SSHTimeout: If VAT execution is timed out.
- :raises RuntimeError: If VAT script execution fails.
- """
- ssh = SSH()
- try:
- ssh.connect(node)
- except:
- raise SSHException(
- f"Cannot open SSH connection to execute VAT command(s) "
- f"from vat script {vat_name}"
- )
-
- if copy_on_execute:
- ssh.scp(vat_name, vat_name)
- remote_file_path = vat_name
- if history:
- with open(vat_name, u"rt") as vat_file:
- for line in vat_file:
- PapiHistory.add_to_papi_history(
- node, line.replace(u"\n", u""), papi=False
- )
- else:
- remote_file_path = f"{Constants.REMOTE_FW_DIR}/" \
- f"{Constants.RESOURCES_TPL_VAT}/{vat_name}"
-
- cmd = f"{Constants.VAT_BIN_NAME}" \
- f"{u' json' if json_out is True else u''} " \
- f"in {remote_file_path} script"
- try:
- ret_code, stdout, stderr = ssh.exec_command_sudo(
- cmd=cmd, timeout=timeout
- )
- except SSHTimeout:
- logger.error(f"VAT script execution timeout: {cmd}")
- raise
- except Exception:
- raise RuntimeError(f"VAT script execution failed: {cmd}")
-
- self._ret_code = ret_code
- self._stdout = stdout
- self._stderr = stderr
- self._script_name = vat_name
-
- def write_and_execute_script(
- self, node, tmp_fn, commands, timeout=300, json_out=False):
- """Write VAT commands to the script, copy it to node and execute it.
-
- :param node: VPP node.
- :param tmp_fn: Path to temporary file script.
- :param commands: VAT command list.
- :param timeout: Seconds to allow the script to run.
- :param json_out: Require JSON output.
- :type node: dict
- :type tmp_fn: str
- :type commands: list
- :type timeout: int
- :type json_out: bool
- """
- with open(tmp_fn, u"wt") as tmp_f:
- tmp_f.writelines(commands)
-
- self.execute_script(
- tmp_fn, node, timeout=timeout, json_out=json_out,
- copy_on_execute=True
- )
- remove(tmp_fn)
-
- def execute_script_json_out(self, vat_name, node, timeout=120):
- """Pass all arguments to 'execute_script' method, then cleanup returned
- json output.
-
- :param vat_name: Name of the vat script file. Only the file name of
- the script is required, the resources path is prepended
- automatically.
- :param node: Node to execute the VAT script on.
- :param timeout: Seconds to allow the script to run.
- :type vat_name: str
- :type node: dict
- :type timeout: int
- """
- self.execute_script(vat_name, node, timeout, json_out=True)
- self._stdout = cleanup_vat_json_output(self._stdout, vat_name=vat_name)
-
- def script_should_have_failed(self):
- """Read return code from last executed script and raise exception if the
- script didn't fail."""
- if self._ret_code is None:
- raise Exception(u"First execute the script!")
- if self._ret_code == 0:
- raise AssertionError(
- f"VAT Script execution passed, but failure was expected: "
- f"{self._script_name}"
- )
-
- def script_should_have_passed(self):
- """Read return code from last executed script and raise exception if the
- script failed."""
- if self._ret_code is None:
- raise Exception(u"First execute the script!")
- if self._ret_code != 0:
- raise AssertionError(
- f"VAT Script execution failed, but success was expected: "
- f"{self._script_name}"
- )
-
- def get_script_stdout(self):
- """Returns value of stdout from last executed script."""
- return self._stdout
-
- def get_script_stderr(self):
- """Returns value of stderr from last executed script."""
- return self._stderr
-
- @staticmethod
- def cmd_from_template(node, vat_template_file, json_param=True, **vat_args):
- """Execute VAT script on specified node. This method supports
- script templates with parameters.
-
- :param node: Node in topology on witch the script is executed.
- :param vat_template_file: Template file of VAT script.
- :param json_param: Require JSON mode.
- :param vat_args: Arguments to the template file.
- :returns: List of JSON objects returned by VAT.
- """
- with VatTerminal(node, json_param=json_param) as vat:
- return vat.vat_terminal_exec_cmd_from_template(
- vat_template_file, **vat_args
- )
-
-
-class VatTerminal:
- """VAT interactive terminal.
-
- :param node: Node to open VAT terminal on.
- :param json_param: Defines if outputs from VAT are in JSON format.
- Default is True.
- :type node: dict
- :type json_param: bool
-
- """
-
- __VAT_PROMPT = (u"vat# ", )
- __LINUX_PROMPT = (u":~# ", u":~$ ", u"~]$ ", u"~]# ")
-
- def __init__(self, node, json_param=True):
- json_text = u" json" if json_param else u""
- self.json = json_param
- self._node = node
- self._ssh = SSH()
- self._ssh.connect(self._node)
- try:
- self._tty = self._ssh.interactive_terminal_open()
- except Exception:
- raise RuntimeError(
- f"Cannot open interactive terminal on node "
- f"{self._node[u'host']}"
- )
-
- for _ in range(3):
- try:
- self._ssh.interactive_terminal_exec_command(
- self._tty, f"sudo -S {Constants.VAT_BIN_NAME}{json_text}",
- self.__VAT_PROMPT
- )
- except Exception:
- continue
- else:
- break
- else:
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- logger.trace(f"VPP running on node {self._node[u'host']}")
- else:
- logger.error(
- f"More instances of VPP running "
- f"on node {self._node[u'host']}."
- )
- else:
- logger.error(f"VPP not running on node {self._node[u'host']}.")
- raise RuntimeError(
- f"Failed to open VAT console on node {self._node[u'host']}"
- )
-
- self._exec_failure = False
- self.vat_stdout = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.vat_terminal_close()
-
- def vat_terminal_exec_cmd(self, cmd):
- """Execute command on the opened VAT terminal.
-
- :param cmd: Command to be executed.
-
- :returns: Command output in python representation of JSON format or
- None if not in JSON mode.
- """
- PapiHistory.add_to_papi_history(self._node, cmd, papi=False)
- logger.debug(f"Executing command in VAT terminal: {cmd}")
- try:
- out = self._ssh.interactive_terminal_exec_command(
- self._tty, cmd, self.__VAT_PROMPT
- )
- self.vat_stdout = out
- except Exception:
- self._exec_failure = True
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- msg = f"VPP running on node {self._node[u'host']} " \
- f"but VAT command {cmd} execution failed."
- else:
- msg = f"More instances of VPP running on node " \
- f"{self._node[u'host']}. VAT command {cmd} " \
- f"execution failed."
- else:
- msg = f"VPP not running on node {self._node[u'host']}. " \
- f"VAT command {cmd} execution failed."
- raise RuntimeError(msg)
-
- logger.debug(f"VAT output: {out}")
- if self.json:
- obj_start = out.find(u"{")
- obj_end = out.rfind(u"}")
- array_start = out.find(u"[")
- array_end = out.rfind(u"]")
-
- if obj_start == -1 and array_start == -1:
- raise RuntimeError(f"VAT command {cmd}: no JSON data.")
-
- if obj_start < array_start or array_start == -1:
- start = obj_start
- end = obj_end + 1
- else:
- start = array_start
- end = array_end + 1
- out = out[start:end]
- json_out = json.loads(out)
- return json_out
-
- return None
-
- def vat_terminal_close(self):
- """Close VAT terminal."""
- # interactive terminal is dead, we only need to close session
- if not self._exec_failure:
- try:
- self._ssh.interactive_terminal_exec_command(
- self._tty, u"quit", self.__LINUX_PROMPT
- )
- except Exception:
- vpp_pid = get_vpp_pid(self._node)
- if vpp_pid:
- if isinstance(vpp_pid, int):
- logger.trace(
- f"VPP running on node {self._node[u'host']}."
- )
- else:
- logger.error(
- f"More instances of VPP running "
- f"on node {self._node[u'host']}."
- )
- else:
- logger.error(
- f"VPP not running on node {self._node[u'host']}."
- )
- raise RuntimeError(
- f"Failed to close VAT console "
- f"on node {self._node[u'host']}"
- )
- try:
- self._ssh.interactive_terminal_close(self._tty)
- except Exception:
- raise RuntimeError(
- f"Cannot close interactive terminal "
- f"on node {self._node[u'host']}"
- )
-
- def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
- """Execute VAT script from a file.
-
- :param vat_template_file: Template file name of a VAT script.
- :param args: Dictionary of parameters for VAT script.
- :returns: List of JSON objects returned by VAT.
- """
- file_path = f"{Constants.RESOURCES_TPL_VAT}/{vat_template_file}"
-
- with open(file_path, u"rt") as template_file:
- cmd_template = template_file.readlines()
- ret = list()
- for line_tmpl in cmd_template:
- vat_cmd = line_tmpl.format(**args)
- ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace(u"\n", u"")))
- return ret
diff --git a/resources/libraries/python/VatJsonUtil.py b/resources/libraries/python/VatJsonUtil.py
deleted file mode 100644
index 3e956e790d..0000000000
--- a/resources/libraries/python/VatJsonUtil.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utilities to work with JSON data format from VAT."""
-
-from robot.api import logger
-
-from resources.libraries.python.parsers.JsonParser import JsonParser
-
-
-class VatJsonUtil:
- """Utilities to work with JSON data format from VAT."""
-
- @staticmethod
- def _convert_mac_to_number_list(mac_address):
- """Convert MAC address string to list of decimal numbers.
-
- Converts a ":" separated MAC address to decimal number list as used
- in JSON interface dump.
-
- :param mac_address: MAC address.
- :type mac_address: str
- :returns: List representation of MAC address.
- :rtype: list
- """
- list_mac = list()
- for num in mac_address.split(u":"):
- list_mac.append(int(num, 16))
- return list_mac
-
- @staticmethod
- def get_vpp_interface_by_mac(interfaces_list, mac_address):
- """Return interface dictionary from interface_list by MAC address.
-
- Extracts interface dictionary from all of the interfaces in interfaces
- list parsed from JSON according to mac_address of the interface.
-
- :param interfaces_list: Interfaces parsed from JSON.
- :param mac_address: MAC address of interface we are looking for.
- :type interfaces_list: dict
- :type mac_address: str
- :returns: Interface from JSON.
- :rtype: dict
- """
- interface_dict = dict()
- list_mac_address = VatJsonUtil._convert_mac_to_number_list(mac_address)
- logger.trace(
- f"MAC address {mac_address} converted to list {list_mac_address}."
- )
- for interface in interfaces_list:
- # TODO: create vat json integrity checking and move there
- if u"l2_address" not in interface:
- raise KeyError(
- u"key l2_address not found in interface dict."
- u"Probably input list is not parsed from correct VAT "
- u"json output."
- )
- if u"l2_address_length" not in interface:
- raise KeyError(
- u"key l2_address_length not found in interface "
- u"dict. Probably input list is not parsed from correct "
- u"VAT json output."
- )
- mac_from_json = interface[u"l2_address"][:6]
- if mac_from_json == list_mac_address:
- if interface[u"l2_address_length"] != 6:
- raise ValueError(u"l2_address_length value is not 6.")
- interface_dict = interface
- break
- return interface_dict
-
- @staticmethod
- def update_vpp_interface_data_from_json(node, interface_dump_json):
- """Update vpp node data in node__DICT from JSON interface dump.
-
- This method updates vpp interface names and sw if indexes according to
- interface MAC addresses found in interface_dump_json.
-
- :param node: Node dictionary.
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :type node: dict
- :type interface_dump_json: str
- """
- interface_list = JsonParser().parse_data(interface_dump_json)
- for ifc in node[u"interfaces"].values():
- if_mac = ifc[u"mac_address"]
- interface_dict = VatJsonUtil.get_vpp_interface_by_mac(
- interface_list, if_mac
- )
- if not interface_dict:
- logger.trace(f"Interface {ifc} not found by MAC {if_mac}")
- ifc[u"vpp_sw_index"] = None
- continue
- ifc[u"name"] = interface_dict[u"interface_name"]
- ifc[u"vpp_sw_index"] = interface_dict[u"sw_if_index"]
- ifc[u"mtu"] = interface_dict[u"mtu"]
-
- @staticmethod
- def get_interface_sw_index_from_json(interface_dump_json, interface_name):
- """Get sw_if_index from given JSON output by interface name.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param interface_name: Interface name.
- :type interface_dump_json: str
- :type interface_name: str
- :returns: SW interface index.
- :rtype: int
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"interface_name"] == interface_name:
- index = interface[u"sw_if_index"]
- logger.debug(
- f"Interface with name {interface_name} "
- f"has sw_if_index {index}."
- )
- return index
- except KeyError:
- pass
- raise ValueError(f"Interface with name {interface_name} not found.")
-
- @staticmethod
- def get_interface_name_from_json(interface_dump_json, sw_if_index):
- """Get interface name from given JSON output by sw_if_index.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param sw_if_index: SW interface index.
- :type interface_dump_json: str
- :type sw_if_index: int
- :returns: Interface name.
- :rtype: str
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"sw_if_index"] == sw_if_index:
- interface_name = interface[u"interface_name"]
- logger.debug(
- f"Interface with sw_if_index {sw_if_index} "
- f"has name {interface_name}."
- )
- return interface_name
- except KeyError:
- pass
- raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.")
-
- @staticmethod
- def get_interface_mac_from_json(interface_dump_json, sw_if_index):
- """Get interface MAC address from given JSON output by sw_if_index.
-
- :param interface_dump_json: JSON output from dump_interface_list VAT
- command.
- :param sw_if_index: SW interface index.
- :type interface_dump_json: str
- :type sw_if_index: int
- :returns: Interface MAC address.
- :rtype: str
- :raises ValueError: If interface not found in interface_dump_json.
- """
- logger.trace(interface_dump_json)
- interface_list = JsonParser().parse_data(interface_dump_json)
- for interface in interface_list:
- try:
- if interface[u"sw_if_index"] == sw_if_index:
- mac_from_json = interface[u"l2_address"][:6] \
- if u"l2_address" in list(interface.keys()) else u""
- mac_address = u":".join(
- f"{item:02x}" for item in mac_from_json
- )
- logger.debug(
- f"Interface with sw_if_index {sw_if_index} "
- f"has MAC address {mac_address}."
- )
- return mac_address
- except KeyError:
- pass
- raise ValueError(f"Interface with sw_if_index {sw_if_index} not found.")
-
- @staticmethod
- def verify_vat_retval(vat_out, exp_retval=0, err_msg=u"VAT cmd failed"):
- """Verify return value of VAT command.
-
- VAT command JSON output should be object (dict in python) or array. We
- are looking for something like this: { "retval": 0 }. Verification is
- skipped if VAT output does not contain return value element or root
- elemet is array.
-
- :param vat_out: VAT command output in python representation of JSON.
- :param exp_retval: Expected return value (default 0).
- :err_msg: Message to be displayed in case of error (optional).
- :type vat_out: dict or list
- :type exp_retval: int
- :type err_msg: str
- :raises RuntimeError: If VAT command return value is incorrect.
- """
- if isinstance(vat_out, dict):
- retval = vat_out.get(u"retval")
- if retval is not None:
- if retval != exp_retval:
- raise RuntimeError(err_msg)
diff --git a/resources/libraries/python/VppApiCrc.py b/resources/libraries/python/VppApiCrc.py
index 0cb8c2b7e7..a8947a18cb 100644
--- a/resources/libraries/python/VppApiCrc.py
+++ b/resources/libraries/python/VppApiCrc.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -75,8 +75,9 @@ class VppApiCrcChecker:
Starts the same as _expected, but each time an encountered api,crc pair
fits the expectation, the pair is removed from all collections
- within this mapping. Ideally, the active mappings will become empty.
- If not, it is an error, VPP removed or renamed a message CSIT needs."""
+ within this mapping. It is fine if an api is missing
+ from some collections, as long as it is not missing from all collections
+ that remained in _expected."""
self._found = dict()
"""Mapping from API name to CRC string.
@@ -325,12 +326,15 @@ class VppApiCrcChecker:
if not report_missing:
return
missing = {name: mapp for name, mapp in self._missing.items() if mapp}
- if missing:
- missing_indented = json.dumps(
- missing, indent=1, sort_keys=True, separators=[u",", u":"])
- self.log_and_raise(
- f"API CRCs missing from .api.json:\n{missing_indented}"
- )
+ if set(missing.keys()) < set(self._expected.keys()):
+ # There is a collection where nothing is missing.
+ return
+ missing_indented = json.dumps(
+ missing, indent=1, sort_keys=True, separators=[u",", u":"]
+ )
+ self.log_and_raise(
+ f"API CRCs missing from .api.json:\n{missing_indented}"
+ )
def check_api_name(self, api_name):
"""Fail if the api_name has no, or different from known CRC associated.
@@ -375,8 +379,25 @@ class VppApiCrcChecker:
self.log_and_raise(
f"No active collection has API {api_name!r} with CRC {crc!r}"
)
- options = self._options[api_name]
+ options = self._options.get(api_name, None)
+ if not options:
+ # None means CSIT is attempting a new API on an old VPP build.
+ # If that is an issue, the API has been reported as missing already.
+ return
options.pop(u"vat_help", None)
if options:
self._reported[api_name] = crc
logger.console(f"{api_name} used but has options {options}")
+
+ def print_warnings(self):
+ """Call check_api_name for API names in surviving collections.
+
+ Useful for VPP CRC checking job.
+ The API name is only checked when it appears
+ in all surviving collections.
+ """
+ api_name_to_crc_maps = self._expected.values()
+ api_name_sets = (set(n2c.keys()) for n2c in api_name_to_crc_maps)
+ api_names = set.intersection(*api_name_sets)
+ for api_name in sorted(api_names):
+ self.check_api_name(api_name)
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index e1830147d8..4191c0eed2 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -21,7 +21,7 @@ from resources.libraries.python.topology import NodeType
from resources.libraries.python.topology import Topology
from resources.libraries.python.VPPUtil import VPPUtil
-__all__ = [u"VppConfigGenerator"]
+__all__ = ["VppConfigGenerator", "VppInitConfig"]
def pci_dev_check(pci_dev):
@@ -49,19 +49,17 @@ class VppConfigGenerator:
def __init__(self):
"""Initialize library."""
# VPP Node to apply configuration on
- self._node = u""
+ self._node = ""
# Topology node key
- self._node_key = u""
+ self._node_key = ""
# VPP Configuration
self._nodeconfig = dict()
# Serialized VPP Configuration
- self._vpp_config = u""
+ self._vpp_config = ""
# VPP Service name
- self._vpp_service_name = u"vpp"
- # VPP Logfile location
- self._vpp_logfile = u"/tmp/vpe.log"
+ self._vpp_service_name = "vpp"
# VPP Startup config location
- self._vpp_startup_conf = u"/etc/vpp/startup.conf"
+ self._vpp_startup_conf = "/etc/vpp/startup.conf"
def set_node(self, node, node_key=None):
"""Set DUT node.
@@ -72,9 +70,9 @@ class VppConfigGenerator:
:type node_key: str
:raises RuntimeError: If Node type is not DUT.
"""
- if node[u"type"] != NodeType.DUT:
+ if node["type"] != NodeType.DUT:
raise RuntimeError(
- u"Startup config can only be applied to DUTnode."
+ "Startup config can only be applied to DUTnode."
)
self._node = node
self._node_key = node_key
@@ -104,8 +102,8 @@ class VppConfigGenerator:
if path[0] not in config:
config[path[0]] = dict()
elif isinstance(config[path[0]], str):
- config[path[0]] = dict() if config[path[0]] == u"" \
- else {config[path[0]]: u""}
+ config[path[0]] = dict() if config[path[0]] == "" \
+ else {config[path[0]]: ""}
self.add_config_item(config[path[0]], value, path[1:])
def dump_config(self, obj, level=-1):
@@ -117,7 +115,7 @@ class VppConfigGenerator:
:type level: int
:returns: nothing
"""
- indent = u" "
+ indent = " "
if level >= 0:
self._vpp_config += f"{level * indent}{{\n"
if isinstance(obj, dict):
@@ -133,58 +131,56 @@ class VppConfigGenerator:
if level >= 0:
self._vpp_config += f"{level * indent}}}\n"
- def add_unix_log(self, value=None):
+ def add_unix_log(self, value="/var/log/vpp/vpp.log"):
"""Add UNIX log configuration.
:param value: Log file.
:type value: str
"""
- path = [u"unix", u"log"]
- if value is None:
- value = self._vpp_logfile
+ path = ["unix", "log"]
self.add_config_item(self._nodeconfig, value, path)
- def add_unix_cli_listen(self, value=u"/run/vpp/cli.sock"):
+ def add_unix_cli_listen(self, value="/run/vpp/cli.sock"):
"""Add UNIX cli-listen configuration.
:param value: CLI listen address and port or path to CLI socket.
:type value: str
"""
- path = [u"unix", u"cli-listen"]
+ path = ["unix", "cli-listen"]
self.add_config_item(self._nodeconfig, value, path)
def add_unix_cli_no_pager(self):
"""Add UNIX cli-no-pager configuration."""
- path = [u"unix", u"cli-no-pager"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["unix", "cli-no-pager"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_unix_gid(self, value=u"vpp"):
+ def add_unix_gid(self, value="vpp"):
"""Add UNIX gid configuration.
:param value: Gid.
:type value: str
"""
- path = [u"unix", u"gid"]
+ path = ["unix", "gid"]
self.add_config_item(self._nodeconfig, value, path)
def add_unix_nodaemon(self):
"""Add UNIX nodaemon configuration."""
- path = [u"unix", u"nodaemon"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["unix", "nodaemon"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_unix_coredump(self):
"""Add UNIX full-coredump configuration."""
- path = [u"unix", u"full-coredump"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["unix", "full-coredump"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_unix_exec(self, value):
"""Add UNIX exec configuration."""
- path = [u"unix", u"exec"]
+ path = ["unix", "exec"]
self.add_config_item(self._nodeconfig, value, path)
def add_socksvr(self, socket=Constants.SOCKSVR_PATH):
"""Add socksvr configuration."""
- path = [u"socksvr", u"socket-name"]
+ path = ["socksvr", "socket-name"]
self.add_config_item(self._nodeconfig, socket, path)
def add_graph_node_variant(self, variant=Constants.GRAPH_NODE_VARIANT):
@@ -193,39 +189,48 @@ class VppConfigGenerator:
:param value: Graph node variant default value.
:type value: str
"""
- if variant == u"":
+ if variant == "":
return
- variant_list = [u"hsw", u"skx", u"icl"]
+ variant_list = ["hsw", "skx", "icl"]
if variant not in variant_list:
raise ValueError("Invalid graph node variant value")
- path = [u"node", u"default", u"variant"]
+ path = ["node", "default", "variant"]
self.add_config_item(self._nodeconfig, variant, path)
- def add_api_segment_gid(self, value=u"vpp"):
- """Add API-SEGMENT gid configuration.
+ def add_api_segment_gid(self, value="vpp"):
+ """Add api-segment gid configuration.
:param value: Gid.
:type value: str
"""
- path = [u"api-segment", u"gid"]
+ path = ["api-segment", "gid"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_segment_global_size(self, value):
- """Add API-SEGMENT global-size configuration.
+ """Add api-segment global-size configuration.
:param value: Global size.
:type value: str
"""
- path = [u"api-segment", u"global-size"]
+ path = ["api-segment", "global-size"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_api_segment_prefix(self, value="vpp"):
+ """Add api-segment prefix configuration.
+
+ :param value: Gid.
+ :type value: str
+ """
+ path = ["api-segment", "prefix"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_segment_api_size(self, value):
- """Add API-SEGMENT api-size configuration.
+ """Add api-segment api-size configuration.
:param value: API size.
:type value: str
"""
- path = [u"api-segment", u"api-size"]
+ path = ["api-segment", "api-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_buffers_per_numa(self, value):
@@ -234,7 +239,7 @@ class VppConfigGenerator:
:param value: Number of buffers allocated.
:type value: int
"""
- path = [u"buffers", u"buffers-per-numa"]
+ path = ["buffers", "buffers-per-numa"]
self.add_config_item(self._nodeconfig, value, path)
def add_buffers_default_data_size(self, value):
@@ -243,7 +248,7 @@ class VppConfigGenerator:
:param value: Buffers data-size allocated.
:type value: int
"""
- path = [u"buffers", u"default data-size"]
+ path = ["buffers", "default data-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev(self, *devices):
@@ -254,21 +259,26 @@ class VppConfigGenerator:
"""
for device in devices:
if pci_dev_check(device):
- path = [u"dpdk", f"dev {device}"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", f"dev {device}"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_dpdk_cryptodev(self, count):
+ def add_dpdk_cryptodev(self, count, num_rx_queues=1):
"""Add DPDK Crypto PCI device configuration.
:param count: Number of HW crypto devices to add.
+ :param num_rx_queues: Number of RX queues per QAT interface.
:type count: int
- """
- cryptodev = Topology.get_cryptodev(self._node)
- for i in range(count):
- cryptodev_config = re.sub(r"\d.\d$", f"1.{str(i)}", cryptodev)
- path = [u"dpdk", f"dev {cryptodev_config}"]
- self.add_config_item(self._nodeconfig, u"", path)
- self.add_dpdk_uio_driver(u"vfio-pci")
+ :type num_rx_queues: int
+ """
+ cryptodevs = Topology.get_cryptodev(self._node)
+ for device in cryptodevs.values():
+ for i in range(int(count/len(cryptodevs))):
+ numvfs = device["numvfs"]
+ computed = f"{(i+1)//numvfs}.{(i+1)%numvfs}"
+ addr = re.sub(r"\d.\d$", computed, device["pci_address"])
+ path = ["dpdk", f"dev {addr}", "num-rx-queues"]
+ self.add_config_item(self._nodeconfig, num_rx_queues, path)
+ self.add_dpdk_uio_driver("vfio-pci")
def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count):
"""Add DPDK SW Crypto device configuration.
@@ -283,8 +293,8 @@ class VppConfigGenerator:
for _ in range(count):
cryptodev_config = f"vdev cryptodev_{sw_pmd_type}_pmd," \
f"socket_id={str(socket_id)}"
- path = [u"dpdk", cryptodev_config]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", cryptodev_config]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_dev_default_rxq(self, value):
"""Add DPDK dev default rxq configuration.
@@ -292,7 +302,7 @@ class VppConfigGenerator:
:param value: Default number of rxqs.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-rx-queues"]
+ path = ["dpdk", "dev default", "num-rx-queues"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_txq(self, value):
@@ -301,7 +311,7 @@ class VppConfigGenerator:
:param value: Default number of txqs.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-tx-queues"]
+ path = ["dpdk", "dev default", "num-tx-queues"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_rxd(self, value):
@@ -310,7 +320,7 @@ class VppConfigGenerator:
:param value: Default number of rxds.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-rx-desc"]
+ path = ["dpdk", "dev default", "num-rx-desc"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_txd(self, value):
@@ -319,22 +329,27 @@ class VppConfigGenerator:
:param value: Default number of txds.
:type value: str
"""
- path = [u"dpdk", u"dev default", u"num-tx-desc"]
+ path = ["dpdk", "dev default", "num-tx-desc"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_dpdk_dev_default_tso(self):
+ """Add DPDK dev default tso configuration."""
+ path = [u"dpdk", u"dev default", u"tso"]
+ self.add_config_item(self._nodeconfig, "on", path)
+
def add_dpdk_log_level(self, value):
"""Add DPDK log-level configuration.
:param value: Log level.
:type value: str
"""
- path = [u"dpdk", u"log-level"]
+ path = ["dpdk", "log-level"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_no_pci(self):
"""Add DPDK no-pci."""
- path = [u"dpdk", u"no-pci"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-pci"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_uio_driver(self, value=None):
"""Add DPDK uio-driver configuration.
@@ -346,7 +361,7 @@ class VppConfigGenerator:
"""
if value is None:
value = Topology.get_uio_driver(self._node)
- path = [u"dpdk", u"uio-driver"]
+ path = ["dpdk", "uio-driver"]
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_max_simd_bitwidth(self, variant=Constants.GRAPH_NODE_VARIANT):
@@ -355,23 +370,28 @@ class VppConfigGenerator:
:param value: Graph node variant default value.
:type value: str
"""
- if variant == u"icl":
+ if variant == "icl":
value = 512
- elif variant in [u"skx", u"hsw"]:
+ elif variant in ["skx", "hsw"]:
value = 256
else:
return
- path = [u"dpdk", u"max-simd-bitwidth"]
+ path = ["dpdk", "max-simd-bitwidth"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_dpdk_enable_tcp_udp_checksum(self):
+ """Add DPDK enable-tcp-udp-checksum configuration."""
+ path = [u"dpdk", u"enable-tcp-udp-checksum"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
def add_cpu_main_core(self, value):
"""Add CPU main core configuration.
:param value: Main core option.
:type value: str
"""
- path = [u"cpu", u"main-core"]
+ path = ["cpu", "main-core"]
self.add_config_item(self._nodeconfig, value, path)
def add_cpu_corelist_workers(self, value):
@@ -380,7 +400,7 @@ class VppConfigGenerator:
:param value: Corelist-workers option.
:type value: str
"""
- path = [u"cpu", u"corelist-workers"]
+ path = ["cpu", "corelist-workers"]
self.add_config_item(self._nodeconfig, value, path)
def add_main_heap_size(self, value):
@@ -389,7 +409,7 @@ class VppConfigGenerator:
:param value: Amount of heap.
:type value: str
"""
- path = [u"memory", u"main-heap-size"]
+ path = ["memory", "main-heap-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_main_heap_page_size(self, value):
@@ -398,7 +418,7 @@ class VppConfigGenerator:
:param value: Heap page size.
:type value: str
"""
- path = [u"memory", u"main-heap-page-size"]
+ path = ["memory", "main-heap-page-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_default_hugepage_size(self, value=Constants.DEFAULT_HUGEPAGE_SIZE):
@@ -407,13 +427,13 @@ class VppConfigGenerator:
:param value: Hugepage size.
:type value: str
"""
- path = [u"memory", u"default-hugepage-size"]
+ path = ["memory", "default-hugepage-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_api_trace(self):
"""Add API trace configuration."""
- path = [u"api-trace", u"on"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["api-trace", "on"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_ip6_hash_buckets(self, value):
"""Add IP6 hash buckets configuration.
@@ -421,7 +441,7 @@ class VppConfigGenerator:
:param value: Number of IP6 hash buckets.
:type value: str
"""
- path = [u"ip6", u"hash-buckets"]
+ path = ["ip6", "hash-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_ip6_heap_size(self, value):
@@ -430,13 +450,53 @@ class VppConfigGenerator:
:param value: IP6 Heapsize amount.
:type value: str
"""
- path = [u"ip6", u"heap-size"]
+ path = ["ip6", "heap-size"]
self.add_config_item(self._nodeconfig, value, path)
- def add_spd_flow_cache_ipv4_outbound(self):
- """Add SPD flow cache for IP4 outbound traffic"""
- path = [u"ipsec", u"ipv4-outbound-spd-flow-cache"]
- self.add_config_item(self._nodeconfig, "on", path)
+ def add_ipsec_spd_flow_cache_ipv4_inbound(self, value):
+ """Add IPsec spd flow cache for IP4 inbound.
+
+ :param value: "on" to enable spd flow cache.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-inbound-spd-flow-cache"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_flow_cache_ipv4_outbound(self, value):
+ """Add IPsec spd flow cache for IP4 outbound.
+
+ :param value: "on" to enable spd flow cache.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-outbound-spd-flow-cache"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_ipv4_inbound(self, value):
+ """Add IPsec spd fast path for IP4 inbound.
+
+ :param value: "on" to enable spd fast path.
+ :type value: str
+ """
+ path = [u"ipsec", u"ipv4-inbound-spd-fast-path"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_ipv4_outbound(self, value):
+ """Add IPsec spd fast path for IP4 outbound.
+
+ :param value: "on" to enable spd fast path.
+ :type value: str
+ """
+ path = ["ipsec", "ipv4-outbound-spd-fast-path"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ipsec_spd_fast_path_num_buckets(self, value):
+ """Add num buckets for IPsec spd fast path.
+
+ :param value: Number of buckets.
+ :type value: int
+ """
+ path = ["ipsec", "spd-fast-path-num-buckets"]
+ self.add_config_item(self._nodeconfig, value, path)
def add_statseg_size(self, value):
"""Add Stats Heap Size configuration.
@@ -444,7 +504,7 @@ class VppConfigGenerator:
:param value: Stats heapsize amount.
:type value: str
"""
- path = [u"statseg", u"size"]
+ path = ["statseg", "size"]
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_page_size(self, value):
@@ -453,7 +513,7 @@ class VppConfigGenerator:
:param value: Stats heapsize amount.
:type value: str
"""
- path = [u"statseg", u"page-size"]
+ path = ["statseg", "page-size"]
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_per_node_counters(self, value):
@@ -462,7 +522,7 @@ class VppConfigGenerator:
:param value: "on" to switch the counters on.
:type value: str
"""
- path = [u"statseg", u"per-node-counters"]
+ path = ["statseg", "per-node-counters"]
self.add_config_item(self._nodeconfig, value, path)
def add_plugin(self, state, *plugins):
@@ -474,27 +534,27 @@ class VppConfigGenerator:
:type plugins: list
"""
for plugin in plugins:
- path = [u"plugins", f"plugin {plugin}", state]
- self.add_config_item(self._nodeconfig, u" ", path)
+ path = ["plugins", f"plugin {plugin}", state]
+ self.add_config_item(self._nodeconfig, " ", path)
def add_dpdk_no_multi_seg(self):
"""Add DPDK no-multi-seg configuration."""
- path = [u"dpdk", u"no-multi-seg"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-multi-seg"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_dpdk_no_tx_checksum_offload(self):
"""Add DPDK no-tx-checksum-offload configuration."""
- path = [u"dpdk", u"no-tx-checksum-offload"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["dpdk", "no-tx-checksum-offload"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_nat(self, value=u"deterministic"):
+ def add_nat(self, value="deterministic"):
"""Add NAT mode configuration.
:param value: NAT mode.
:type value: str
"""
- path = [u"nat", value]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["nat", value]
+ self.add_config_item(self._nodeconfig, "", path)
def add_nat_max_translations_per_thread(self, value):
"""Add NAT max. translations per thread number configuration.
@@ -502,21 +562,21 @@ class VppConfigGenerator:
:param value: NAT mode.
:type value: str
"""
- path = [u"nat", u"max translations per thread"]
+ path = ["nat", "max translations per thread"]
self.add_config_item(self._nodeconfig, value, path)
def add_nsim_poll_main_thread(self):
"""Add NSIM poll-main-thread configuration."""
- path = [u"nsim", u"poll-main-thread"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["nsim", "poll-main-thread"]
+ self.add_config_item(self._nodeconfig, "", path)
- def add_tcp_congestion_control_algorithm(self, value=u"cubic"):
+ def add_tcp_congestion_control_algorithm(self, value="cubic"):
"""Add TCP congestion control algorithm.
:param value: The congestion control algorithm to use. Example: cubic
:type value: str
"""
- path = [u"tcp", u"cc-algo"]
+ path = ["tcp", "cc-algo"]
self.add_config_item(self._nodeconfig, value, path)
def add_tcp_preallocated_connections(self, value):
@@ -525,7 +585,7 @@ class VppConfigGenerator:
:param value: The number of pre-allocated connections.
:type value: int
"""
- path = [u"tcp", u"preallocated-connections"]
+ path = ["tcp", "preallocated-connections"]
self.add_config_item(self._nodeconfig, value, path)
def add_tcp_preallocated_half_open_connections(self, value):
@@ -534,18 +594,28 @@ class VppConfigGenerator:
:param value: The number of pre-allocated half open connections.
:type value: int
"""
- path = [u"tcp", u"preallocated-half-open-connections"]
+ path = ["tcp", "preallocated-half-open-connections"]
self.add_config_item(self._nodeconfig, value, path)
+ def add_tcp_tso(self):
+ """Add TCP tso configuration."""
+ path = [u"tcp", u"tso"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
def add_session_enable(self):
"""Add session enable."""
- path = [u"session", u"enable"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["session", "enable"]
+ self.add_config_item(self._nodeconfig, "", path)
+
+ def add_session_app_socket_api(self):
+ """Use session app socket api."""
+ path = ["session", "use-app-socket-api"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_session_event_queues_memfd_segment(self):
"""Add session event queue memfd segment."""
- path = [u"session", u"evt_qs_memfd_seg"]
- self.add_config_item(self._nodeconfig, u"", path)
+ path = ["session", "evt_qs_memfd_seg"]
+ self.add_config_item(self._nodeconfig, "", path)
def add_session_event_queue_length(self, value):
"""Add session event queue length.
@@ -553,7 +623,7 @@ class VppConfigGenerator:
:param value: Session event queue length.
:type value: int
"""
- path = [u"session", u"event-queue-length"]
+ path = ["session", "event-queue-length"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_event_queues_segment_size(self, value):
@@ -562,7 +632,7 @@ class VppConfigGenerator:
:param value: Session event queue segment size.
:type value: str
"""
- path = [u"session", u"evt_qs_seg_size"]
+ path = ["session", "evt_qs_seg_size"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_preallocated_sessions(self, value):
@@ -571,7 +641,7 @@ class VppConfigGenerator:
:param value: Number of pre-allocated sessions.
:type value: int
"""
- path = [u"session", u"preallocated-sessions"]
+ path = ["session", "preallocated-sessions"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_session_table_buckets(self, value):
@@ -580,7 +650,7 @@ class VppConfigGenerator:
:param value: Number of v4 session table buckets.
:type value: int
"""
- path = [u"session", u"v4-session-table-buckets"]
+ path = ["session", "v4-session-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_session_table_memory(self, value):
@@ -589,7 +659,7 @@ class VppConfigGenerator:
:param value: Size of v4 session table memory.
:type value: str
"""
- path = [u"session", u"v4-session-table-memory"]
+ path = ["session", "v4-session-table-memory"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_halfopen_table_buckets(self, value):
@@ -598,7 +668,7 @@ class VppConfigGenerator:
:param value: Number of v4 halfopen table buckets.
:type value: int
"""
- path = [u"session", u"v4-halfopen-table-buckets"]
+ path = ["session", "v4-halfopen-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_v4_halfopen_table_memory(self, value):
@@ -607,7 +677,7 @@ class VppConfigGenerator:
:param value: Size of v4 halfopen table memory.
:type value: str
"""
- path = [u"session", u"v4-halfopen-table-memory"]
+ path = ["session", "v4-halfopen-table-memory"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_local_endpoints_table_buckets(self, value):
@@ -616,7 +686,7 @@ class VppConfigGenerator:
:param value: Number of local endpoints table buckets.
:type value: int
"""
- path = [u"session", u"local-endpoints-table-buckets"]
+ path = ["session", "local-endpoints-table-buckets"]
self.add_config_item(self._nodeconfig, value, path)
def add_session_local_endpoints_table_memory(self, value):
@@ -625,7 +695,31 @@ class VppConfigGenerator:
:param value: Size of local endpoints table memory.
:type value: str
"""
- path = [u"session", u"local-endpoints-table-memory"]
+ path = ["session", "local-endpoints-table-memory"]
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_session_use_dma(self):
+ """Add session use-dma configuration."""
+ path = [u"session", u"use-dma"]
+ self.add_config_item(self._nodeconfig, u"", path)
+
+ def add_dma_dev(self, devices):
+ """Add DMA devices configuration.
+
+ :param devices: DMA devices or work queues.
+ :type devices: list
+ """
+ for device in devices:
+ path = ["dsa", f"dev {device}"]
+ self.add_config_item(self._nodeconfig, "", path)
+
+ def add_logging_default_syslog_log_level(self, value="debug"):
+ """Add default logging level for syslog.
+
+ :param value: Log level.
+ :type value: str
+ """
+ path = ["logging", "default-syslog-log-level"]
self.add_config_item(self._nodeconfig, value, path)
def write_config(self, filename=None):
@@ -644,7 +738,7 @@ class VppConfigGenerator:
cmd = f"echo \"{self._vpp_config}\" | sudo tee {filename}"
exec_cmd_no_error(
- self._node, cmd, message=u"Writing config file failed!"
+ self._node, cmd, message="Writing config file failed!"
)
def apply_config(self, filename=None, verify_vpp=True):
@@ -663,3 +757,40 @@ class VppConfigGenerator:
VPPUtil.restart_vpp_service(self._node, self._node_key)
if verify_vpp:
VPPUtil.verify_vpp(self._node)
+
+
+class VppInitConfig:
+ """VPP Initial Configuration."""
+ @staticmethod
+ def init_vpp_startup_configuration_on_all_duts(nodes):
+ """Apply initial VPP startup configuration on all DUTs.
+
+ :param nodes: Nodes in the topology.
+ :type nodes: dict
+ """
+ huge_size = Constants.DEFAULT_HUGEPAGE_SIZE
+ for node in nodes.values():
+ if node["type"] == NodeType.DUT:
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(node)
+ vpp_config.add_unix_log()
+ vpp_config.add_unix_cli_listen()
+ vpp_config.add_unix_cli_no_pager()
+ vpp_config.add_unix_gid()
+ vpp_config.add_unix_coredump()
+ vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+ vpp_config.add_main_heap_size("2G")
+ vpp_config.add_main_heap_page_size(huge_size)
+ vpp_config.add_default_hugepage_size(huge_size)
+ vpp_config.add_statseg_size("2G")
+ vpp_config.add_statseg_page_size(huge_size)
+ vpp_config.add_statseg_per_node_counters("on")
+ vpp_config.add_plugin("disable", "default")
+ vpp_config.add_plugin("enable", "dpdk_plugin.so")
+ vpp_config.add_dpdk_dev(
+ *[node["interfaces"][interface].get("pci_address") \
+ for interface in node["interfaces"]]
+ )
+ vpp_config.add_ip6_hash_buckets(2000000)
+ vpp_config.add_ip6_heap_size("4G")
+ vpp_config.apply_config()
diff --git a/resources/libraries/python/WireGuardUtil.py b/resources/libraries/python/WireGuardUtil.py
index d8d2396164..6e6237e7e7 100644
--- a/resources/libraries/python/WireGuardUtil.py
+++ b/resources/libraries/python/WireGuardUtil.py
@@ -68,7 +68,7 @@ class WireGuardUtil:
:param node: VPP node to add config on.
:param listen_port: WireGuard interface listen port.
- :param wg_src: WireGuard srouce IPv4.
+ :param wg_src: WireGuard source IPv4.
:param private_key: WireGuard interface private key
:type node: dict
:type listen_port: int
@@ -118,12 +118,9 @@ class WireGuardUtil:
:type keepalive_time: int
"""
endpoint_ip = ip_address(endpoint_ip)
- wg_name = InterfaceUtil.vpp_get_interface_name(
- node, sw_if_index=interface
- )
cmd = u"wireguard_peer_add"
- err_msg = f"Failed to add wireguard interface" \
- f"{wg_name} peer on host {node[u'host']}"
+ err_msg = f"Failed to add peer of wireguard interface" \
+ f"{interface} on host {node[u'host']}"
args = dict(
peer=dict(
public_key=peer_pubkey,
@@ -139,6 +136,23 @@ class WireGuardUtil:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
+ def vpp_wireguard_set_async_mode(node, async_enable=1):
+ """Set wireguard async mode on or off.
+
+ :param node: VPP node to set wireguard async mode.
+ :param async_enable: Async mode on or off.
+ :type node: dict
+ :type async_enable: int
+ """
+ cmd = u"wg_set_async_mode"
+ err_msg = f"Failed to set wireguard async mode on host {node[u'host']}"
+ args = dict(
+ async_enable=async_enable
+ )
+ with PapiSocketExecutor(node) as papi_exec:
+ papi_exec.add(cmd, **args).get_reply(err_msg)
+
+ @staticmethod
def _wireguard_create_tunnel_interface_on_dut(
node, if1_key, if2_mac_addr, src_ip, peer_endpoint_ip,
peer_allowed_ips, peer_n_allowed_ips, dut_wg_ip, port,
@@ -162,11 +176,11 @@ class WireGuardUtil:
:type nodes: dict
:type if1_key: str
:type if2_mac_addr: str
- :type src_ip: src
- :type peer_endpoint_ip: src
+ :type src_ip: str
+ :type peer_endpoint_ip: str
:type peer_allowed_ips: list
:type peer_n_allowed_ips: int
- :type dut_wg_ip: src
+ :type dut_wg_ip: str
:type port: int
:type keepalive_time: int
:type dut_private_key: bytes
@@ -213,10 +227,10 @@ class WireGuardUtil:
)
@staticmethod
- def vpp_wireguard_create_tunnel_interface_on_duts(
+ def vpp_wireguard_create_tunnel_interfaces_on_duts(
nodes, if1_key, if2_key, if1_ip_addr, if2_ip_addr,
if1_mac_addr, if2_mac_addr, wg_if1_ip_addr, wg_if2_ip_addr,
- n_allowed_ips, port, keepalive_time, raddr_ip1, raddr_ip2):
+ n_tunnels, port, keepalive_time, raddr_ip1, raddr_ip2):
"""Create WireGuard tunnel interfaces between two VPP nodes.
:param nodes: VPP nodes to create tunnel interfaces.
@@ -229,8 +243,7 @@ class WireGuardUtil:
:param if2_mac_addr: VPP node2 interface mac address.
:param wg_if1_ip_addr: VPP node 1 WireGuard interface IPv4 address.
:param wg_if2_ip_addr: VPP node 2 WireGuard interface IPv4 address.
- :param allowed_ips: WireGuard interface allowed ip list.
- :param n_allowed_ips: Number of allowed ips.
+ :param n_tunnels: Number of wireguard tunnels.
:param port: WireGuard interface listen port or
Peer interface destination port.
:param keepalive_time: WireGuard persistent keepalive time.
@@ -247,32 +260,39 @@ class WireGuardUtil:
:type if2_mac_addr: str
:type wg_if1_ip_addr: str
:type wg_if2_ip_addr: str
- :type allowed_ips: str
- :type n_allowed_ips: int
+ :type n_tunnels: int
:type port: int
:type keepalive_time: int
:type raddr_ip1: str
:type raddr_ip2: str
"""
- dut1_privatekey, dut1_pubkey = \
- WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
- dut2_privatekey, dut2_pubkey = \
- WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
- raddr_ip1 = ip_address(raddr_ip1)
- raddr_ip2 = ip_address(raddr_ip2)
- dut1_allowed_ips = \
- [IPUtil.create_prefix_object(raddr_ip2, 24),]
- dut2_allowed_ips = \
- [IPUtil.create_prefix_object(raddr_ip1, 24),]
- #Configure WireGuard interface on DUT1
- WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
- nodes[u'DUT1'], if1_key, if2_mac_addr, if1_ip_addr, if2_ip_addr,
- dut1_allowed_ips, n_allowed_ips, wg_if1_ip_addr, port,
- keepalive_time, dut1_privatekey, dut2_pubkey
- )
- #Configure WireGuard interface on DUT2
- WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
- nodes[u'DUT2'], if2_key, if1_mac_addr, if2_ip_addr, if1_ip_addr,
- dut2_allowed_ips, n_allowed_ips, wg_if2_ip_addr, port,
- keepalive_time, dut2_privatekey, dut1_pubkey
- )
+ for i in range(n_tunnels):
+ if1_ipaddr = str(ip_address(if1_ip_addr) + i*256)
+ if2_ipaddr = str(ip_address(if2_ip_addr) + i*256)
+ wg_if1_ipaddr = str(ip_address(wg_if1_ip_addr) + i*256)
+ wg_if2_ipaddr = str(ip_address(wg_if2_ip_addr) + i*256)
+
+ allowed_ipaddr1 = ip_address(raddr_ip1) + i*256
+ allowed_ipaddr2 = ip_address(raddr_ip2) + i*256
+ dut1_allowed_ips = \
+ [IPUtil.create_prefix_object(allowed_ipaddr2, 24),]
+ dut2_allowed_ips = \
+ [IPUtil.create_prefix_object(allowed_ipaddr1, 24),]
+
+ dut1_privatekey, dut1_pubkey = \
+ WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
+ dut2_privatekey, dut2_pubkey = \
+ WireGuardUtil.generate_wireguard_privatekey_and_pubkey()
+
+ #Configure WireGuard interface on DUT1
+ WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
+ nodes[u'DUT1'], if1_key, if2_mac_addr, if1_ipaddr, if2_ipaddr,
+ dut1_allowed_ips, 1, wg_if1_ipaddr, port,
+ keepalive_time, dut1_privatekey, dut2_pubkey
+ )
+ #Configure WireGuard interface on DUT2
+ WireGuardUtil._wireguard_create_tunnel_interface_on_dut(
+ nodes[u'DUT2'], if2_key, if1_mac_addr, if2_ipaddr, if1_ipaddr,
+ dut2_allowed_ips, 1, wg_if2_ipaddr, port,
+ keepalive_time, dut2_privatekey, dut1_pubkey
+ )
diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py
index 4474996ef1..8d593fecca 100644
--- a/resources/libraries/python/autogen/Regenerator.py
+++ b/resources/libraries/python/autogen/Regenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -150,16 +150,17 @@ def filter_and_edit_kwargs_for_astf(suite_id, kwargs):
return kwargs
-def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list):
+def add_default_testcases(
+ testcase, nic_code, suite_id, file_out, tc_kwargs_list):
"""Add default testcases to file.
:param testcase: Testcase class.
- :param iface: Interface.
+ :param nic_code: NIC code.
:param suite_id: Suite ID.
:param file_out: File to write testcases to.
:param tc_kwargs_list: Key-value pairs used to construct testcases.
:type testcase: Testcase
- :type iface: str
+ :type nic_code: str
:type suite_id: str
:type file_out: file
:type tc_kwargs_list: dict
@@ -169,29 +170,20 @@ def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list):
kwargs = copy.deepcopy(kwas)
# TODO: Is there a better way to disable some combinations?
emit = True
- if kwargs[u"frame_size"] == 9000:
- if u"vic1227" in iface:
- # Not supported in HW.
- emit = False
- if u"vic1385" in iface:
- # Not supported in HW.
- emit = False
- if u"-16vm2t-" in suite_id or u"-16dcr2t-" in suite_id:
- if kwargs[u"phy_cores"] > 3:
- # CSIT lab only has 28 (physical) core processors,
- # so these test would fail when attempting to assign cores.
- emit = False
- if u"-24vm1t-" in suite_id or u"-24dcr1t-" in suite_id:
- if kwargs[u"phy_cores"] > 3:
- # CSIT lab only has 28 (physical) core processors,
- # so these test would fail when attempting to assign cores.
- emit = False
+ core_scale = Constants.NIC_CODE_TO_CORESCALE[nic_code]
if u"soak" in suite_id:
# Soak test take too long, do not risk other than tc01.
if kwargs[u"phy_cores"] != 1:
emit = False
- if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
- emit = False
+ if u"reassembly" in suite_id:
+ if kwargs[u"frame_size"] != 1518:
+ emit = False
+ else:
+ if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
+ emit = False
+
+ kwargs.update({'phy_cores': kwas['phy_cores']*core_scale})
+
kwargs = filter_and_edit_kwargs_for_astf(suite_id, kwargs)
if emit and kwargs is not None:
file_out.write(testcase.generate(**kwargs))
@@ -257,75 +249,75 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
"""
for suite_type in Constants.PERF_TYPE_TO_KEYWORD:
tmp_filename = replace_defensively(
- in_filename, u"ndrpdr", suite_type, 1,
- u"File name should contain suite type once.", in_filename
+ in_filename, "ndrpdr", suite_type, 1,
+ "File name should contain suite type once.", in_filename
)
tmp_prolog = replace_defensively(
- in_prolog, u"ndrpdr".upper(), suite_type.upper(), 1,
- u"Suite type should appear once in uppercase (as tag).",
+ in_prolog, "ndrpdr".upper(), suite_type.upper(), 1,
+ "Suite type should appear once in uppercase (as tag).",
in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- u"Find NDR and PDR intervals using optimized search",
+ "Find NDR and PDR intervals using optimized search",
Constants.PERF_TYPE_TO_KEYWORD[suite_type], 1,
- u"Main search keyword should appear once in suite.",
+ "Main search keyword should appear once in suite.",
in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_SUITE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_SUITE_DOC_VER["ndrpdr"],
Constants.PERF_TYPE_TO_SUITE_DOC_VER[suite_type],
- 1, u"Exact suite type doc not found.", in_filename
+ 1, "Exact suite type doc not found.", in_filename
)
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[u"ndrpdr"],
+ Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER["ndrpdr"],
Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[suite_type],
- 1, u"Exact template type doc not found.", in_filename
+ 1, "Exact template type doc not found.", in_filename
)
_, suite_id, _ = get_iface_and_suite_ids(tmp_filename)
testcase = Testcase.default(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp2_filename = replace_defensively(
- tmp_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
- u"File name should contain NIC code once.", in_filename
+ tmp_filename, "10ge2p1x710", nic_code, 1,
+ "File name should contain NIC code once.", in_filename
)
tmp2_prolog = replace_defensively(
- tmp_prolog, u"Intel-X710", nic_name, 2,
- u"NIC name should appear twice (tag and variable).",
+ tmp_prolog, "Intel-X710", nic_name, 2,
+ "NIC name should appear twice (tag and variable).",
in_filename
)
- if tmp2_prolog.count(u"HW_") == 2:
+ if tmp2_prolog.count("HW_") == 2:
# TODO CSIT-1481: Crypto HW should be read
# from topology file instead.
if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW:
tmp2_prolog = replace_defensively(
- tmp2_prolog, u"HW_DH895xcc",
+ tmp2_prolog, "HW_DH895xcc",
Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1,
- u"HW crypto name should appear.", in_filename
+ "HW crypto name should appear.", in_filename
)
iface, old_suite_id, old_suite_tag = get_iface_and_suite_ids(
tmp2_filename
)
- if u"DPDK" in in_prolog:
+ if "DPDK" in in_prolog:
for driver in Constants.DPDK_NIC_NAME_TO_DRIVER[nic_name]:
out_filename = replace_defensively(
tmp2_filename, old_suite_id,
Constants.DPDK_NIC_DRIVER_TO_SUITE_PREFIX[driver] \
+ old_suite_id,
- 1, u"Error adding driver prefix.", in_filename
+ 1, "Error adding driver prefix.", in_filename
)
out_prolog = replace_defensively(
- tmp2_prolog, u"vfio-pci", driver, 1,
- u"Driver name should appear once.", in_filename
+ tmp2_prolog, "vfio-pci", driver, 1,
+ "Driver name should appear once.", in_filename
)
out_prolog = replace_defensively(
out_prolog,
- Constants.DPDK_NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ Constants.DPDK_NIC_DRIVER_TO_TAG["vfio-pci"],
Constants.DPDK_NIC_DRIVER_TO_TAG[driver], 1,
- u"Driver tag should appear once.", in_filename
+ "Driver tag should appear once.", in_filename
)
iface, suite_id, suite_tag = get_iface_and_suite_ids(
out_filename
@@ -340,36 +332,41 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
check_suite_tag(suite_tag, out_prolog)
# TODO: Reorder loops so suite_id is finalized sooner.
testcase = Testcase.default(suite_id)
- with open(out_filename, u"wt") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list
+ testcase, nic_code, suite_id, file_out, kwargs_list
)
continue
for driver in Constants.NIC_NAME_TO_DRIVER[nic_name]:
out_filename = replace_defensively(
tmp2_filename, old_suite_id,
Constants.NIC_DRIVER_TO_SUITE_PREFIX[driver] + old_suite_id,
- 1, u"Error adding driver prefix.", in_filename
+ 1, "Error adding driver prefix.", in_filename
)
out_prolog = replace_defensively(
- tmp2_prolog, u"vfio-pci", driver, 1,
- u"Driver name should appear once.", in_filename
+ tmp2_prolog, "vfio-pci", driver, 1,
+ "Driver name should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_TAG[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_TAG["vfio-pci"],
Constants.NIC_DRIVER_TO_TAG[driver], 1,
- u"Driver tag should appear once.", in_filename
+ "Driver tag should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_PLUGINS[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_PLUGINS["vfio-pci"],
Constants.NIC_DRIVER_TO_PLUGINS[driver], 1,
- u"Driver plugin should appear once.", in_filename
+ "Driver plugin should appear once.", in_filename
)
out_prolog = replace_defensively(
- out_prolog, Constants.NIC_DRIVER_TO_VFS[u"vfio-pci"],
+ out_prolog, Constants.NIC_DRIVER_TO_VFS["vfio-pci"],
Constants.NIC_DRIVER_TO_VFS[driver], 1,
- u"NIC VFs argument should appear once.", in_filename
+ "NIC VFs argument should appear once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
)
iface, suite_id, suite_tag = get_iface_and_suite_ids(
out_filename
@@ -384,10 +381,10 @@ def write_default_files(in_filename, in_prolog, kwargs_list):
check_suite_tag(suite_tag, out_prolog)
# TODO: Reorder loops so suite_id is finalized sooner.
testcase = Testcase.default(suite_id)
- with open(out_filename, u"wt") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list
+ testcase, nic_code, suite_id, file_out, kwargs_list
)
@@ -407,10 +404,10 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list):
"""
_, suite_id, _ = get_iface_and_suite_ids(in_filename)
testcase = Testcase.default(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ in_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
tmp_prolog = replace_defensively(
@@ -455,6 +452,11 @@ def write_reconf_files(in_filename, in_prolog, kwargs_list):
Constants.NIC_DRIVER_TO_VFS[driver], 1,
u"NIC VFs argument should appear once.", in_filename
)
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
iface, suite_id, suite_tag = get_iface_and_suite_ids(out_filename)
out_prolog = replace_defensively(
out_prolog, old_suite_tag, suite_tag, 1,
@@ -483,10 +485,10 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
# TODO: Generate rps from cps? There are subtle differences.
_, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
testcase = Testcase.tcp(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ in_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
tmp_prolog = replace_defensively(
@@ -522,6 +524,11 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
Constants.NIC_DRIVER_TO_VFS[driver], 1,
u"NIC VFs argument should appear once.", in_filename
)
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
iface, suite_id, suite_tag = get_iface_and_suite_ids(out_filename)
out_prolog = replace_defensively(
out_prolog, old_suite_tag, suite_tag, 1,
@@ -546,10 +553,10 @@ def write_iperf3_files(in_filename, in_prolog, kwargs_list):
"""
_, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
testcase = Testcase.iperf3(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
out_filename = replace_defensively(
- in_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ in_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
out_prolog = replace_defensively(
@@ -604,10 +611,10 @@ def write_trex_files(in_filename, in_prolog, kwargs_list):
)
_, suite_id, suite_tag = get_iface_and_suite_ids(tmp_filename)
testcase = Testcase.trex(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
out_filename = replace_defensively(
- tmp_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ tmp_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
out_prolog = replace_defensively(
@@ -638,10 +645,10 @@ def write_device_files(in_filename, in_prolog, kwargs_list):
)
_, suite_id, _ = get_iface_and_suite_ids(tmp_filename)
testcase = Testcase.default(suite_id)
- for nic_name in Constants.NIC_NAME_TO_CODE:
+ for nic_code in Constants.NIC_CODE_TO_NAME:
+ nic_name = Constants.NIC_CODE_TO_NAME[nic_code]
tmp2_filename = replace_defensively(
- tmp_filename, u"10ge2p1x710",
- Constants.NIC_NAME_TO_CODE[nic_name], 1,
+ tmp_filename, u"10ge2p1x710", nic_code, 1,
u"File name should contain NIC code once.", in_filename
)
tmp2_prolog = replace_defensively(
@@ -677,6 +684,11 @@ def write_device_files(in_filename, in_prolog, kwargs_list):
Constants.NIC_DRIVER_TO_VFS[driver], 1,
u"NIC VFs argument should appear once.", in_filename
)
+ out_prolog = replace_defensively(
+ out_prolog, Constants.NIC_CODE_TO_PFS["10ge2p1x710"],
+ Constants.NIC_CODE_TO_PFS[nic_code], 1,
+ "NIC PFs argument should appear once.", in_filename
+ )
iface, suite_id, suite_tag = get_iface_and_suite_ids(
out_filename
)
diff --git a/resources/libraries/python/jumpavg/__init__.py b/resources/libraries/python/jumpavg/__init__.py
index 4fa696c538..7f63b5ee39 100644
--- a/resources/libraries/python/jumpavg/__init__.py
+++ b/resources/libraries/python/jumpavg/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -15,8 +15,8 @@
__init__ file for "jumpavg" Python package.
"""
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingStats import BitCountingStats
-from .BitCountingGroup import BitCountingGroup
-from .BitCountingGroupList import BitCountingGroupList
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_stats import BitCountingStats
+from .bit_counting_group import BitCountingGroup
+from .bit_counting_group_list import BitCountingGroupList
from .classify import classify
diff --git a/resources/libraries/python/jumpavg/AvgStdevStats.py b/resources/libraries/python/jumpavg/avg_stdev_stats.py
index 4720c10f3d..c21c50c8f8 100644
--- a/resources/libraries/python/jumpavg/AvgStdevStats.py
+++ b/resources/libraries/python/jumpavg/avg_stdev_stats.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,9 +13,12 @@
"""Module holding AvgStdevStats class."""
+import dataclasses
import math
+import typing
+@dataclasses.dataclass
class AvgStdevStats:
"""Class for statistics which include average and stdev of a group.
@@ -25,45 +28,18 @@ class AvgStdevStats:
Instances are only statistics, the data itself is stored elsewhere.
"""
- def __init__(self, size=0, avg=0.0, stdev=0.0):
- """Construct the stats object by storing the values needed.
-
- Each value has to be numeric.
- The values are not sanitized depending on size, wrong initialization
- can cause delayed math errors.
-
- :param size: Number of values participating in this group.
- :param avg: Population average of the participating sample values.
- :param stdev: Population standard deviation of the sample values.
- :type size: int
- :type avg: float
- :type stdev: float
- """
- self.size = size
- self.avg = avg
- self.stdev = stdev
-
- def __str__(self):
- """Return string with human readable description of the group.
-
- :returns: Readable description.
- :rtype: str
- """
- return f"size={self.size} avg={self.avg} stdev={self.stdev}"
-
- def __repr__(self):
- """Return string executable as Python constructor call.
-
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"AvgStdevStats(size={self.size!r},avg={self.avg!r}"
- f",stdev={self.stdev!r})"
- )
+ size: int = 0
+ """Number of scalar values (samples) participating in this group."""
+ avg: float = 0.0
+ """Population average of the participating sample values."""
+ stdev: float = 0.0
+ """Population standard deviation of the sample values."""
@classmethod
- def for_runs(cls, runs):
+ def for_runs(
+ cls,
+ runs: typing.Iterable[typing.Union[float, "AvgStdevStats"]],
+ ) -> "AvgStdevStats":
"""Return new stats instance describing the sequence of runs.
If you want to append data to existing stats object,
@@ -72,8 +48,8 @@ class AvgStdevStats:
Instead of a verb, "for" is used to start this method name,
to signify the result contains less information than the input data.
- Here, Run is a hypothetical abstract class, an union of float and cls.
- Defining that as a real abstract class in Python 2 is too much hassle.
+ Here, run is a hypothetical abstract class, an union of float and cls.
+ Defining that as a real abstract class in Python is too much hassle.
:param runs: Sequence of data to describe by the new metadata.
:type runs: Iterable[Union[float, cls]]
@@ -97,6 +73,8 @@ class AvgStdevStats:
run_size = run.size
run_avg = run.avg
run_stdev = run.stdev
+ if run_size < 1:
+ continue
old_total_size = total_size
delta = run_avg - total_avg
total_size += run_size
diff --git a/resources/libraries/python/jumpavg/BitCountingGroup.py b/resources/libraries/python/jumpavg/bit_counting_group.py
index f1bdc502fd..22c9337532 100644
--- a/resources/libraries/python/jumpavg/BitCountingGroup.py
+++ b/resources/libraries/python/jumpavg/bit_counting_group.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,14 +13,16 @@
"""Module holding BitCountingGroup class."""
-import copy
+import collections
+import dataclasses
+import typing
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingStats import BitCountingStats
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_stats import BitCountingStats
-class BitCountingGroup:
- # TODO: Inherit from collections.abc.Sequence in Python 3.
+@dataclasses.dataclass
+class BitCountingGroup(collections.abc.Sequence):
"""Group of runs which tracks bit count in an efficient manner.
This class contains methods that mutate the internal state,
@@ -38,74 +40,63 @@ class BitCountingGroup:
a method to add a single run in an efficient manner is provided.
"""
- def __init__(self, run_list=None, stats=None, bits=None,
- max_value=None, prev_avg=None, comment="unknown"):
- """Set the internal state and partially the stats.
-
- A "group" stands for an Iterable of runs, where "run" is either
- a float value, or a stats-like object (only size, avg and stdev
- are accessed). Run is a hypothetical abstract class,
- defining it in Python 2 is too much hassle.
-
- Only a copy of the run list argument value is stored in the instance,
- so it is not a problem if the value object is mutated afterwards.
+ run_list: typing.List[typing.Union[float, AvgStdevStats]]
+ """List of run to compose into this group.
+ The init call takes ownership of the list,
+ so the caller should clone it to avoid unexpected muations."""
+ max_value: float
+ """Maximal sample value to expect."""
+ unit: float = 1.0
+ """Typical resolution of the values"""
+ comment: str = "normal"
+ """Any string giving more info, e.g. "regression"."""
+ prev_avg: typing.Optional[float] = None
+ """Average of the previous group, if any."""
+ stats: AvgStdevStats = None
+ """Stats object used for computing bits.
+ Almost always recomputed, except when non-None in init."""
+ cached_bits: typing.Optional[float] = None
+ """Cached value of information content.
+ Noned on edit, recomputed if needed and None."""
+
+ def __post_init__(self):
+ """Recompute stats is None.
It is not verified whether the user provided values are valid,
e.g. whether the stats and bits values reflect the runs.
-
- :param run_list: List of run to compose into this group. Default: empty.
- :param stats: Stats object used for computing bits.
- :param bits: Cached value of information content.
- :param max_value: Maximal sample value to be used for computing.
- :param prev_avg: Average of the previous group, affects bits.
- :param comment: Any string giving more info, e.g. "regression".
- :type run_list: Iterable[Run]
- :type stats: Optional[AvgStdevStats]
- :type bits: Optional[float]
- :type max_value: float
- :type prev_avg: Optional[float]
- :type comment: str
"""
- self.run_list = copy.deepcopy(run_list) if run_list else list()
- self.stats = stats
- self.cached_bits = bits
- self.max_value = max_value
- self.prev_avg = prev_avg
- self.comment = comment
if self.stats is None:
- self.stats = AvgStdevStats.for_runs(self.run_list)
-
- def __str__(self):
- """Return string with human readable description of the group.
+ self.stats = AvgStdevStats.for_runs(runs=self.run_list)
- :returns: Readable description.
- :rtype: str
- """
- return f"stats={self.stats} bits={self.cached_bits}"
+ @property
+ def bits(self) -> float:
+ """Return overall bit content of the group list.
- def __repr__(self):
- """Return string executable as Python constructor call.
+ If not cached, compute from stats and cache.
- :returns: Executable constructor call.
- :rtype: str
+ :returns: The overall information content in bits.
+ :rtype: float
"""
- return (
- f"BitCountingGroup(run_list={self.run_list!r},stats={self.stats!r}"
- f",bits={self.cached_bits!r},max_value={self.max_value!r}"
- f",prev_avg={self.prev_avg!r},comment={self.comment!r})"
- )
+ if self.cached_bits is None:
+ self.cached_bits = BitCountingStats.for_runs_and_params(
+ runs=[self.stats],
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=self.prev_avg,
+ ).bits
+ return self.cached_bits
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> typing.Union[float, AvgStdevStats]:
"""Return the run at the index.
:param index: Index of the run to return.
:type index: int
:returns: The run at the index.
- :rtype: Run
+ :rtype: typing.Union[float, AvgStdevStats]
"""
return self.run_list[index]
- def __len__(self):
+ def __len__(self) -> int:
"""Return the number of runs in the group.
:returns: The Length of run_list.
@@ -113,39 +104,36 @@ class BitCountingGroup:
"""
return len(self.run_list)
- def copy(self):
+ def copy(self) -> "BitCountingGroup":
"""Return a new instance with copied internal state.
+ Stats are preserved to avoid re-computation.
+ As both float and AvgStdevStats are effectively immutable,
+ only a shallow copy of the runs list is performed.
+
:returns: The copied instance.
:rtype: BitCountingGroup
"""
stats = AvgStdevStats.for_runs([self.stats])
return self.__class__(
- run_list=self.run_list, stats=stats, bits=self.cached_bits,
- max_value=self.max_value, prev_avg=self.prev_avg,
- comment=self.comment)
-
- @property
- def bits(self):
- """Return overall bit content of the group list.
-
- If not cached, compute from stats and cache.
-
- :returns: The overall information content in bits.
- :rtype: float
- """
- if self.cached_bits is None:
- self.cached_bits = BitCountingStats.for_runs(
- [self.stats], self.max_value, self.prev_avg).bits
- return self.cached_bits
+ run_list=list(self.run_list),
+ stats=stats,
+ cached_bits=self.cached_bits,
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=self.prev_avg,
+ comment=self.comment,
+ )
- def append(self, run):
+ def append(
+ self, run: typing.Union[float, AvgStdevStats]
+ ) -> "BitCountingGroup":
"""Mutate to add the new run, return self.
Stats are updated, but old bits value is deleted from cache.
:param run: The run value to add to the group.
- :type value: Run
+ :type value: typing.Union[float, AvgStdevStats]
:returns: The updated self.
:rtype: BitCountingGroup
"""
@@ -154,7 +142,9 @@ class BitCountingGroup:
self.cached_bits = None
return self
- def extend(self, runs):
+ def extend(
+ self, runs: typing.Iterable[typing.Union[float, AvgStdevStats]]
+ ) -> "BitCountingGroup":
"""Mutate to add the new runs, return self.
This is saves small amount of computation
@@ -163,7 +153,7 @@ class BitCountingGroup:
Stats are updated, but old bits value is deleted from cache.
:param runs: The runs to add to the group.
- :type value: Iterable[Run]
+ :type value: typing.Iterable[typing.Union[float, AvgStdevStats]]
:returns: The updated self.
:rtype: BitCountingGroup
"""
diff --git a/resources/libraries/python/jumpavg/BitCountingGroupList.py b/resources/libraries/python/jumpavg/bit_counting_group_list.py
index 6a1c86baf2..e4d33b53a2 100644
--- a/resources/libraries/python/jumpavg/BitCountingGroupList.py
+++ b/resources/libraries/python/jumpavg/bit_counting_group_list.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,13 +13,16 @@
"""Module holding BitCountingGroupList class."""
-import copy
+import collections
+import dataclasses
+import typing
-from .BitCountingGroup import BitCountingGroup
+from .avg_stdev_stats import AvgStdevStats # Just for type hints.
+from .bit_counting_group import BitCountingGroup
-class BitCountingGroupList:
- # TODO: Inherit from collections.abc.Sequence in Python 3.
+@dataclasses.dataclass
+class BitCountingGroupList(collections.abc.Sequence):
"""List of data groups which tracks overall bit count.
The Sequence-like access is related to the list of groups,
@@ -41,55 +44,29 @@ class BitCountingGroupList:
recalculations if the bit count is not needed.
"""
- def __init__(self, group_list=None, bits_except_last=0.0, max_value=None):
- """Set the internal state without any calculations.
-
- The group list argument is copied deeply, so it is not a problem
- if the value object is mutated afterwards.
+ max_value: float
+ """Maximal sample value to base bits computation on."""
+ unit: float = 1.0
+ """Typical resolution of the values."""
+ group_list: typing.List[BitCountingGroup] = None
+ """List of groups to compose this group list.
+ Init also accepts None standing for an empty list.
+ This class takes ownership of the list,
+ so caller of init should clone their copy to avoid unexpected mutations.
+ """
+ bits_except_last: float = 0.0
+ """Partial sum of all but one group bits."""
- A "group" stands for an Iterable of runs, where "run" is either
- a float value, or a stats-like object (only size, avg and stdev
- are accessed). Run is a hypothetical abstract class,
- defining it in Python 2 is too much hassle.
+ def __post_init__(self):
+ """Turn possible None into an empty list.
It is not verified whether the user provided values are valid,
- e.g. whether the cached bits values make sense.
-
- The max_value is required and immutable,
- it is recommended the callers find their maximum beforehand.
-
- :param group_list: List of groups to compose this group list (or empty).
- :param bits_except_last: Partial sum of all but one group bits.
- :param max_value: Maximal sample value to base bits computation on.
- :type group_list: Iterable[BitCountingGroup]
- :type bits_except_last: float
- :type max_value: float
- """
- self.group_list = copy.deepcopy(group_list) if group_list else list()
- self.bits_except_last = bits_except_last
- self.max_value = max_value
-
- def __str__(self):
- """Return string with human readable description of the group list.
-
- :returns: Readable description.
- :rtype: str
+ e.g. whether the cached bits values (and bits_except_last) make sense.
"""
- return f"group_list={self.group_list} bits={self.bits}"
-
- def __repr__(self):
- """Return string executable as Python constructor call.
+ if self.group_list is None:
+ self.group_list = []
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"BitCountingGroupList(group_list={self.group_list!r}"
- f",bits_except_last={self.bits_except_last!r}"
- f",max_value={self.max_value!r})"
- )
-
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> BitCountingGroup:
"""Return the group at the index.
:param index: Index of the group to return.
@@ -99,7 +76,7 @@ class BitCountingGroupList:
"""
return self.group_list[index]
- def __len__(self):
+ def __len__(self) -> int:
"""Return the length of the group list.
:returns: The Length of group_list.
@@ -107,19 +84,46 @@ class BitCountingGroupList:
"""
return len(self.group_list)
- def copy(self):
+ def copy(self) -> "BitCountingGroupList":
"""Return a new instance with copied internal state.
:returns: The copied instance.
:rtype: BitCountingGroupList
"""
return self.__class__(
- group_list=self.group_list, bits_except_last=self.bits_except_last,
- max_value=self.max_value
+ max_value=self.max_value,
+ unit=self.unit,
+ group_list=[group.copy() for group in self.group_list],
+ bits_except_last=self.bits_except_last,
+ )
+
+ def copy_fast(self) -> "BitCountingGroupList":
+ """Return a new instance with minimaly copied internal state.
+
+ The assumption here is that only the last group will ever be mutated
+ (in self, probably never in the return value),
+ so all the previous groups can be "copied by reference".
+
+ :returns: The copied instance.
+ :rtype: BitCountingGroupList
+ """
+ group_list = list(self.group_list)
+ if group_list:
+ group_list[-1] = group_list[-1].copy()
+ # Further speedup is possible by keeping the last group
+ # as a singly linked (from end) list,
+ # but for CSIT sample sizes, copy of whole Python list is faster.
+ # TODO: Implement linked list as an option
+ # for users with many samples.
+ return self.__class__(
+ max_value=self.max_value,
+ unit=self.unit,
+ group_list=group_list,
+ bits_except_last=self.bits_except_last,
)
@property
- def bits(self):
+ def bits(self) -> float:
"""Return overall bit content of the group list.
:returns: The overall information content in bits.
@@ -130,12 +134,17 @@ class BitCountingGroupList:
# TODO: Is it worth to cache the overall result?
return self.bits_except_last + self.group_list[-1].bits
- def append_group_of_runs(self, runs):
+ def append_group_of_runs(
+ self,
+ runs: typing.Union[
+ BitCountingGroup, typing.List[typing.Union[float, AvgStdevStats]]
+ ],
+ ) -> "BitCountingGroupList":
"""Mutate to add a new group based on the runs, return self.
- The argument is copied before adding to the group list,
- so further edits do not affect the grup list.
- The argument can also be a group, only runs from it are used.
+ The list argument is NOT copied before adding to the group list,
+ so further edits MAY not affect the grup list.
+ The list from BitCountingGroup is shallow copied though.
:param runs: Runs to form the next group to be appended to self.
:type runs: Union[Iterable[Run], BitCountingGroup]
@@ -147,16 +156,23 @@ class BitCountingGroupList:
# It is faster to avoid stats recalculation.
new_group = runs.copy()
new_group.max_value = self.max_value
+ # Unit is common.
new_group.prev_avg = prev_avg
new_group.cached_bits = None
else:
new_group = BitCountingGroup(
- run_list=runs, max_value=self.max_value, prev_avg=prev_avg)
+ run_list=runs,
+ max_value=self.max_value,
+ unit=self.unit,
+ prev_avg=prev_avg,
+ )
self.bits_except_last = self.bits
self.group_list.append(new_group)
return self
- def append_run_to_to_last_group(self, run):
+ def append_run_to_to_last_group(
+ self, run: typing.Union[float, AvgStdevStats]
+ ) -> "BitCountingGroupList":
"""Mutate to add new run at the end of the last group.
Basically a one-liner, only returning group list instead of last group.
@@ -170,7 +186,9 @@ class BitCountingGroupList:
self.group_list[-1].append(run)
return self
- def extend_runs_to_last_group(self, runs):
+ def extend_runs_to_last_group(
+ self, runs: typing.Iterable[typing.Union[float, AvgStdevStats]]
+ ) -> "BitCountingGroupList":
"""Mutate to add new runs to the end of the last group.
A faster alternative to appending runs one by one in a loop.
diff --git a/resources/libraries/python/jumpavg/BitCountingStats.py b/resources/libraries/python/jumpavg/bit_counting_stats.py
index 7b5e659214..3d1cb8aef0 100644
--- a/resources/libraries/python/jumpavg/BitCountingStats.py
+++ b/resources/libraries/python/jumpavg/bit_counting_stats.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,11 +13,14 @@
"""Module holding BitCountingStats class."""
+import dataclasses
import math
+import typing
-from .AvgStdevStats import AvgStdevStats
+from .avg_stdev_stats import AvgStdevStats
+@dataclasses.dataclass
class BitCountingStats(AvgStdevStats):
"""Class for statistics which include information content of a group.
@@ -33,11 +36,22 @@ class BitCountingStats(AvgStdevStats):
Only for_runs method calls the parent implementation, without using super().
"""
- def __init__(
- self, size=0, avg=None, stdev=0.0, max_value=None, prev_avg=None):
- """Construct the stats object by computing from the values needed.
+ max_value: float = None
+ """Maximal sample value (real or estimated).
+ Default value is there just for argument ordering reasons,
+ leaving None leads to exceptions."""
+ unit: float = 1.0
+ """Typical resolution of the values."""
+ prev_avg: typing.Optional[float] = None
+ """Population average of the previous group (if any)."""
+ bits: float = None
+ """The computed information content of the group.
+ It is formally an argument to init function, just to keep repr string
+ a valid call. ut the init value is ignored and always recomputed.
+ """
- The values are not sanitized, faulty callers can cause math errors.
+ def __post_init__(self):
+ """Construct the stats object by computing from the values needed.
The None values are allowed for stats for zero size data,
but such stats can report arbitrary avg and max_value.
@@ -54,91 +68,60 @@ class BitCountingStats(AvgStdevStats):
(but not with floating point mechanic).
The hope is the difference will have
no real impact on the classification procedure.
-
- :param size: Number of values participating in this group.
- :param avg: Population average of the participating sample values.
- :param stdev: Population standard deviation of the sample values.
- :param max_value: Maximal expected value.
- TODO: This might be more optimal,
- but max-invariant algorithm will be nicer.
- :param prev_avg: Population average of the previous group.
- If None, no previous average is taken into account.
- If not None, the given previous average is used to discourage
- consecutive groups with similar averages
- (opposite triangle distribution is assumed).
- :type avg: float
- :type size: int
- :type stdev: float
- :type max_value: Union[float, NoneType]
- :type prev_avg: Union[float, NoneType]
"""
- self.avg = avg
- self.size = size
- self.stdev = stdev
- self.max_value = max_value
- self.prev_avg = prev_avg
# Zero size should in principle have non-zero bits (coding zero size),
# but zero allows users to add empty groups without affecting bits.
self.bits = 0.0
if self.size < 1:
return
- if avg is None:
- raise ValueError(f"Avg is None: {self!r}")
- if max_value is None or max_value <= 0.0:
+ if self.max_value <= 0.0:
raise ValueError(f"Invalid max value: {self!r}")
+ max_value = self.max_value / self.unit
+ avg = self.avg / self.unit
# Length of the sequence must be also counted in bits,
# otherwise the message would not be decodable.
# Model: probability of k samples is 1/k - 1/(k+1) == 1/k/(k+1)
# This is compatible with zero size leading to zero bits.
- self.bits += math.log(size * (size + 1), 2)
- if prev_avg is None:
+ self.bits += math.log(self.size * (self.size + 1), 2)
+ if self.prev_avg is None:
# Avg is considered to be uniformly distributed
# from zero to max_value.
- self.bits += math.log(max_value + 1.0, 2)
+ self.bits += math.log(max_value + 1, 2)
else:
# Opposite triangle distribution with minimum.
- self.bits += math.log(
- max_value * (max_value + 1) / (abs(avg - prev_avg) + 1), 2)
+ prev_avg = self.prev_avg / self.unit
+ norm = prev_avg * prev_avg
+ norm -= (prev_avg - 1) * max_value
+ norm += max_value * max_value / 2
+ self.bits -= math.log((abs(avg - prev_avg) + 1) / norm, 2)
if self.size < 2:
return
- # Stdev is considered to be uniformly distributed
- # from zero to max_value. That is quite a bad expectation,
- # but resilient to negative samples etc.
- self.bits += math.log(max_value + 1.0, 2)
+ stdev = self.stdev / self.unit
+ # Stdev can be anything between zero and max value.
+ # For size==2, sphere surface is 2 points regardless of radius,
+ # we need to penalize large stdev already when encoding the stdev.
+ # The simplest way is to use the same distribution as with size...
+ self.bits += math.log((stdev + 1) * (stdev + 2), 2)
+ # .. just with added normalization from the max value cut-off.
+ self.bits += math.log(1 - 1 / (max_value + 2), 2)
# Now we know the samples lie on sphere in size-1 dimensions.
# So it is (size-2)-sphere, with radius^2 == stdev^2 * size.
# https://en.wikipedia.org/wiki/N-sphere
- sphere_area_ln = math.log(2) + math.log(math.pi) * ((size - 1) / 2.0)
- sphere_area_ln -= math.lgamma((size - 1) / 2.0)
- sphere_area_ln += math.log(stdev + 1.0) * (size - 2)
- sphere_area_ln += math.log(size) * ((size - 2) / 2.0)
+ sphere_area_ln = math.log(2)
+ sphere_area_ln += math.log(math.pi) * ((self.size - 1) / 2)
+ sphere_area_ln -= math.lgamma((self.size - 1) / 2)
+ sphere_area_ln += math.log(stdev + 1) * (self.size - 2)
+ sphere_area_ln += math.log(self.size) * ((self.size - 2) / 2)
self.bits += sphere_area_ln / math.log(2)
- def __str__(self):
- """Return string with human readable description of the group.
-
- :returns: Readable description.
- :rtype: str
- """
- return (
- f"size={self.size} avg={self.avg} stdev={self.stdev}"
- f" bits={self.bits}"
- )
-
- def __repr__(self):
- """Return string executable as Python constructor call.
-
- :returns: Executable constructor call.
- :rtype: str
- """
- return (
- f"BitCountingStats(size={self.size!r},avg={self.avg!r}"
- f",stdev={self.stdev!r},max_value={self.max_value!r}"
- f",prev_avg={self.prev_avg!r})"
- )
-
@classmethod
- def for_runs(cls, runs, max_value=None, prev_avg=None):
+ def for_runs_and_params(
+ cls,
+ runs: typing.Iterable[typing.Union[float, AvgStdevStats]],
+ max_value: float,
+ unit: float = 1.0,
+ prev_avg: typing.Optional[float] = None,
+ ):
"""Return new stats instance describing the sequence of runs.
If you want to append data to existing stats object,
@@ -156,14 +139,22 @@ class BitCountingStats(AvgStdevStats):
:param runs: Sequence of data to describe by the new metadata.
:param max_value: Maximal expected value.
+ :param unit: Typical resolution of the values.
:param prev_avg: Population average of the previous group, if any.
:type runs: Iterable[Union[float, AvgStdevStats]]
:type max_value: Union[float, NoneType]
+ :type unit: float
:type prev_avg: Union[float, NoneType]
:returns: The new stats instance.
:rtype: cls
"""
asd = AvgStdevStats.for_runs(runs)
- ret_obj = cls(size=asd.size, avg=asd.avg, stdev=asd.stdev,
- max_value=max_value, prev_avg=prev_avg)
+ ret_obj = cls(
+ size=asd.size,
+ avg=asd.avg,
+ stdev=asd.stdev,
+ max_value=max_value,
+ unit=unit,
+ prev_avg=prev_avg,
+ )
return ret_obj
diff --git a/resources/libraries/python/jumpavg/classify.py b/resources/libraries/python/jumpavg/classify.py
index 252c71e8d5..cc3cdcceed 100644
--- a/resources/libraries/python/jumpavg/classify.py
+++ b/resources/libraries/python/jumpavg/classify.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,32 +13,54 @@
"""Module holding the classify function
-Classification os one of primary purposes of this package.
+Classification is one of primary purposes of this package.
Minimal message length principle is used
for grouping results into the list of groups,
assuming each group is a population of different Gaussian distribution.
"""
-from .AvgStdevStats import AvgStdevStats
-from .BitCountingGroupList import BitCountingGroupList
+from typing import Iterable, Optional, Union
+from .avg_stdev_stats import AvgStdevStats
+from .bit_counting_group_list import BitCountingGroupList
-def classify(values):
+
+def classify(
+ values: Iterable[Union[float, Iterable[float]]],
+ unit: Optional[float] = None,
+ sbps: Optional[float] = None,
+) -> BitCountingGroupList:
"""Return the values in groups of optimal bit count.
Here, a value is either a float, or an iterable of floats.
Such iterables represent an undivisible sequence of floats.
+ Int is accepted anywhere instead of float.
Internally, such sequence is replaced by AvgStdevStats
after maximal value is found.
+ If the values are smaller than expected (below one unit),
+ the underlying assumption break down and the classification is wrong.
+ Use the "unit" parameter to hint at what the input resolution is.
+
+ If the correct value of unit is not known beforehand,
+ the argument "sbps" (Significant Bits Per Sample) can be used
+ to set unit such that maximal sample value is this many ones in binary.
+ If neither "unit" nor "sbps" are given, "sbps" of 12 is used by default.
+
:param values: Sequence of runs to classify.
+ :param unit: Typical resolution of the values.
+ Zero and None means no unit given.
+ :param sbps: Significant Bits Per Sample. None on zero means 12.
+ If units is not set, this is used to compute unit from max sample value.
:type values: Iterable[Union[float, Iterable[float]]]
+ :type unit: Optional[float]
+ :type sbps: Optional[float]
:returns: Classified group list.
:rtype: BitCountingGroupList
"""
- processed_values = list()
+ processed_values = []
max_value = 0.0
for value in values:
if isinstance(value, (float, int)):
@@ -50,27 +72,27 @@ def classify(values):
if subvalue > max_value:
max_value = subvalue
processed_values.append(AvgStdevStats.for_runs(value))
- open_at = list()
- closed_before = [BitCountingGroupList(max_value=max_value)]
- for index, value in enumerate(processed_values):
- newly_open = closed_before[index].copy()
- newly_open.append_group_of_runs([value])
- open_at.append(newly_open)
- record_group_list = newly_open
- for previous_index, old_open in enumerate(open_at[:index]):
- new_open = old_open.copy().append_run_to_to_last_group(value)
- open_at[previous_index] = new_open
- if new_open.bits < record_group_list.bits:
- record_group_list = new_open
- closed_before.append(record_group_list)
- partition = closed_before[-1]
- previous_average = partition[0].stats.avg
- for group in partition:
- if group.stats.avg == previous_average:
- group.comment = u"normal"
- elif group.stats.avg < previous_average:
- group.comment = u"regression"
+ if not unit:
+ if not sbps:
+ sbps = 12.0
+ max_in_units = pow(2.0, sbps + 1.0) - 1.0
+ unit = max_value / max_in_units
+ # Glist means group list (BitCountingGroupList).
+ open_glists = []
+ record_glist = BitCountingGroupList(max_value=max_value, unit=unit)
+ for value in processed_values:
+ new_open_glist = record_glist.copy_fast().append_group_of_runs([value])
+ record_glist = new_open_glist
+ for old_open_glist in open_glists:
+ old_open_glist.append_run_to_to_last_group(value)
+ if old_open_glist.bits < record_glist.bits:
+ record_glist = old_open_glist
+ open_glists.append(new_open_glist)
+ previous_average = record_glist[0].stats.avg
+ for group in record_glist:
+ if group.stats.avg < previous_average:
+ group.comment = "regression"
elif group.stats.avg > previous_average:
- group.comment = u"progression"
+ group.comment = "progression"
previous_average = group.stats.avg
- return partition
+ return record_glist
diff --git a/resources/libraries/python/model/ExportJson.py b/resources/libraries/python/model/ExportJson.py
new file mode 100644
index 0000000000..3f923d6d0e
--- /dev/null
+++ b/resources/libraries/python/model/ExportJson.py
@@ -0,0 +1,395 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module tracking json in-memory data and saving it to files.
+
+Each test case, suite setup (hierarchical) and teardown has its own file pair.
+
+Validation is performed for output files with available JSON schema.
+Validation is performed in data deserialized from disk,
+as serialization might have introduced subtle errors.
+"""
+
+import datetime
+import os.path
+
+from binascii import b2a_base64
+from dateutil.parser import parse
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from zlib import compress
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.jumpavg import AvgStdevStats
+from resources.libraries.python.model.ExportResult import (
+ export_dut_type_and_version, export_tg_type_and_version
+)
+from resources.libraries.python.model.MemDump import write_output
+from resources.libraries.python.model.validate import (
+ get_validators, validate
+)
+
+
+class ExportJson():
+ """Class handling the json data setting and export."""
+
+ ROBOT_LIBRARY_SCOPE = "GLOBAL"
+
+ def __init__(self):
+ """Declare required fields, cache output dir.
+
+ Also memorize schema validator instances.
+ """
+ self.output_dir = BuiltIn().get_variable_value("\\${OUTPUT_DIR}", ".")
+ self.file_path = None
+ self.data = None
+ self.validators = get_validators()
+
+ def _detect_test_type(self):
+ """Return test_type, as inferred from robot test tags.
+
+ :returns: The inferred test type value.
+ :rtype: str
+ :raises RuntimeError: If the test tags does not contain expected values.
+ """
+ tags = self.data["tags"]
+ # First 5 options are specific for VPP tests.
+ if "DEVICETEST" in tags:
+ test_type = "device"
+ elif "LDP_NGINX" in tags:
+ test_type = "hoststack"
+ elif "HOSTSTACK" in tags:
+ test_type = "hoststack"
+ elif "GSO_TRUE" in tags or "GSO_FALSE" in tags:
+ test_type = "mrr"
+ elif "RECONF" in tags:
+ test_type = "reconf"
+ # The remaining 3 options could also apply to DPDK and TRex tests.
+ elif "SOAK" in tags:
+ test_type = "soak"
+ elif "NDRPDR" in tags:
+ test_type = "ndrpdr"
+ elif "MRR" in tags:
+ test_type = "mrr"
+ else:
+ raise RuntimeError(f"Unable to infer test type from tags: {tags}")
+ return test_type
+
+ def export_pending_data(self):
+ """Write the accumulated data to disk.
+
+ Create missing directories.
+ Reset both file path and data to avoid writing multiple times.
+
+ Functions which finalize content for given file are calling this,
+ so make sure each test and non-empty suite setup or teardown
+ is calling this as their last keyword.
+
+ If no file path is set, do not write anything,
+ as that is the failsafe behavior when caller from unexpected place.
+ Aso do not write anything when EXPORT_JSON constant is false.
+
+ Regardless of whether data was written, it is cleared.
+ """
+ if not Constants.EXPORT_JSON or not self.file_path:
+ self.data = None
+ self.file_path = None
+ return
+ new_file_path = write_output(self.file_path, self.data)
+ # Data is going to be cleared (as a sign that export succeeded),
+ # so this is the last chance to detect if it was for a test case.
+ is_testcase = "result" in self.data
+ self.data = None
+ # Validation for output goes here when ready.
+ self.file_path = None
+ if is_testcase:
+ validate(new_file_path, self.validators["tc_info"])
+
+ def warn_on_bad_export(self):
+ """If bad state is detected, log a warning and clean up state."""
+ if self.file_path is not None or self.data is not None:
+ logger.warn(f"Previous export not clean, path {self.file_path}")
+ self.data = None
+ self.file_path = None
+
+ def start_suite_setup_export(self):
+ """Set new file path, initialize data for the suite setup.
+
+ This has to be called explicitly at start of suite setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ output_dir = self.output_dir
+ self.file_path = os.path.join(
+ output_dir, suite_path_part, "setup.info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ self.data["suite_documentation"] = BuiltIn().get_variable_value(
+ "\\${SUITE_DOCUMENTATION}"
+ )
+ # "end_time" and "duration" are added on flush.
+ self.data["hosts"] = set()
+ self.data["telemetry"] = list()
+
+ def start_test_export(self):
+ """Set new file path, initialize data to minimal tree for the test case.
+
+ It is assumed Robot variables DUT_TYPE and DUT_VERSION
+ are already set (in suite setup) to correct values.
+
+ This function has to be called explicitly at the start of test setup,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite and test.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ test_name = BuiltIn().get_variable_value("\\${TEST_NAME}")
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part,
+ test_name.lower().replace(" ", "_") + ".info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ self.data["test_name"] = test_name
+ test_doc = BuiltIn().get_variable_value("\\${TEST_DOCUMENTATION}", "")
+ self.data["test_documentation"] = test_doc
+ # "test_type" is added on flush.
+ # "tags" is detected and added on flush.
+ # "end_time" and "duration" is added on flush.
+ # Robot status and message are added on flush.
+ self.data["result"] = dict(type="unknown")
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["telemetry"] = list()
+ export_dut_type_and_version()
+ export_tg_type_and_version()
+
+ def start_suite_teardown_export(self):
+ """Set new file path, initialize data for the suite teardown.
+
+ This has to be called explicitly at start of suite teardown,
+ otherwise Robot likes to postpone initialization
+ until first call by a data-adding keyword.
+
+ File path is set based on suite.
+ """
+ self.warn_on_bad_export()
+ start_time = datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%fZ"
+ )
+ suite_name = BuiltIn().get_variable_value("\\${SUITE_NAME}")
+ suite_id = suite_name.lower().replace(" ", "_")
+ suite_path_part = os.path.join(*suite_id.split("."))
+ self.file_path = os.path.join(
+ self.output_dir, suite_path_part, "teardown.info.json"
+ )
+ self.data = dict()
+ self.data["version"] = Constants.MODEL_VERSION
+ self.data["start_time"] = start_time
+ self.data["suite_name"] = suite_name
+ # "end_time" and "duration" is added on flush.
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["telemetry"] = list()
+
+ def finalize_suite_setup_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite setup.
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data["hosts"] = BuiltIn().get_variable_value("\\${hosts}")
+ self.data["end_time"] = end_time
+ self.export_pending_data()
+
+ def finalize_test_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be at the end of test teardown, as the implementation
+ reads various Robot variables, some of them only available at teardown.
+
+ The write is done at next start (or at the end of global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ message = BuiltIn().get_variable_value("\\${TEST_MESSAGE}")
+ test_tags = BuiltIn().get_variable_value("\\${TEST_TAGS}")
+ self.data["end_time"] = end_time
+ start_float = parse(self.data["start_time"]).timestamp()
+ end_float = parse(self.data["end_time"]).timestamp()
+ self.data["duration"] = end_float - start_float
+ self.data["tags"] = list(test_tags)
+ self.data["message"] = message
+ self.process_passed()
+ self.process_test_name()
+ self.process_results()
+ self.export_pending_data()
+
+ def finalize_suite_teardown_export(self):
+ """Add the missing fields to data. Do not write yet.
+
+ Should be run at the end of suite teardown
+ (but before the explicit write in the global suite teardown).
+ The write is done at next start (or explicitly for global teardown).
+ """
+ end_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ self.data["end_time"] = end_time
+ self.export_pending_data()
+
+ def process_test_name(self):
+ """Replace raw test name with short and long test name and set
+ test_type.
+
+ Perform in-place edits on the data dictionary.
+ Remove raw suite_name and test_name, they are not published.
+ Return early if the data is not for test case.
+ Insert test ID and long and short test name into the data.
+ Besides suite_name and test_name, also test tags are read.
+
+ Short test name is basically a suite tag, but with NIC driver prefix,
+ if the NIC driver used is not the default one (drv_vfio_pci for VPP
+ tests).
+
+ Long test name has the following form:
+ {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
+ Lookup in test tags is needed to get the threads value.
+ The threads_and_cores part may be empty, e.g. for TRex tests.
+
+ Test ID has form {suite_name}.{test_name} where the two names come from
+ Robot variables, converted to lower case and spaces replaces by
+ undescores.
+
+ Test type is set in an internal function.
+
+ :raises RuntimeError: If the data does not contain expected values.
+ """
+ suite_part = self.data.pop("suite_name").lower().replace(" ", "_")
+ if "test_name" not in self.data:
+ # There will be no test_id, provide suite_id instead.
+ self.data["suite_id"] = suite_part
+ return
+ test_part = self.data.pop("test_name").lower().replace(" ", "_")
+ self.data["test_id"] = f"{suite_part}.{test_part}"
+ tags = self.data["tags"]
+ # Test name does not contain thread count.
+ subparts = test_part.split("-")
+ if any("tg" in s for s in subparts) and subparts[1] == "":
+ # Physical core count not detected, assume it is a TRex test.
+ if "--" not in test_part:
+ raise RuntimeError(f"Invalid TG test name for: {subparts}")
+ short_name = test_part.split("--", 1)[1]
+ else:
+ short_name = "-".join(subparts[2:])
+ # Add threads to test_part.
+ core_part = subparts[1]
+ tag = list(filter(lambda t: subparts[1].upper() in t, tags))[0]
+ test_part = test_part.replace(f"-{core_part}-", f"-{tag.lower()}-")
+ # For long name we need NIC model, which is only in suite name.
+ last_suite_part = suite_part.split(".")[-1]
+ # Short name happens to be the suffix we want to ignore.
+ prefix_part = last_suite_part.split(short_name)[0]
+ # Also remove the trailing dash.
+ prefix_part = prefix_part[:-1]
+ # Throw away possible link prefix such as "1n1l-".
+ nic_code = prefix_part.split("-", 1)[-1]
+ nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
+ long_name = f"{nic_short}-{test_part}"
+ # Set test type.
+ test_type = self._detect_test_type()
+ self.data["test_type"] = test_type
+ # Remove trailing test type from names (if present).
+ short_name = short_name.split(f"-{test_type}")[0]
+ long_name = long_name.split(f"-{test_type}")[0]
+ # Store names.
+ self.data["test_name_short"] = short_name
+ self.data["test_name_long"] = long_name
+
+ def process_passed(self):
+ """Process the test status information as boolean.
+
+ Boolean is used to make post processing more efficient.
+ In case the test status is PASS, we will truncate the test message.
+ """
+ status = BuiltIn().get_variable_value("\\${TEST_STATUS}")
+ if status is not None:
+ self.data["passed"] = (status == "PASS")
+ if self.data["passed"]:
+ # Also truncate success test messages.
+ self.data["message"] = ""
+
+ def process_results(self):
+ """Process measured results.
+
+ Results are used to avoid future post processing, making it more
+ efficient to consume.
+ """
+ if self.data["telemetry"]:
+ telemetry_encode = "\n".join(self.data["telemetry"]).encode()
+ telemetry_compress = compress(telemetry_encode, level=9)
+ telemetry_base64 = b2a_base64(telemetry_compress, newline=False)
+ self.data["telemetry"] = [telemetry_base64.decode()]
+ if "result" not in self.data:
+ return
+ result_node = self.data["result"]
+ result_type = result_node["type"]
+ if result_type == "unknown":
+ # Device or something else not supported.
+ return
+
+ # Compute avg and stdev for mrr (rate and bandwidth).
+ if result_type == "mrr":
+ for node_name in ("rate", "bandwidth"):
+ node = result_node["receive_rate"].get(node_name, None)
+ if node is not None:
+ stats = AvgStdevStats.for_runs(node["values"])
+ node["avg"] = stats.avg
+ node["stdev"] = stats.stdev
+ return
+
+ # Multiple processing steps for ndrpdr.
+ if result_type != "ndrpdr":
+ return
+ # Filter out invalid latencies.
+ for which_key in ("latency_forward", "latency_reverse"):
+ if which_key not in result_node:
+ # Probably just an unidir test.
+ continue
+ for load in ("pdr_0", "pdr_10", "pdr_50", "pdr_90"):
+ if result_node[which_key][load]["max"] <= 0:
+ # One invalid number is enough to remove all loads.
+ break
+ else:
+ # No break means all numbers are ok, nothing to do here.
+ continue
+ # Break happened, something is invalid, remove all loads.
+ result_node.pop(which_key)
+ return
diff --git a/resources/libraries/python/model/ExportLog.py b/resources/libraries/python/model/ExportLog.py
deleted file mode 100644
index e02eef63c5..0000000000
--- a/resources/libraries/python/model/ExportLog.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module with keywords that publish metric and other log events.
-"""
-
-import datetime
-
-from resources.libraries.python.model.util import get_export_data
-
-
-def export_ssh_command(host, port, command):
- """Add a log item about SSH command execution starting.
-
- The log item is present only in raw output.
- Result arrives in a separate log item.
- Log level is always DEBUG.
-
- The command is stored as "data" (not "msg") as in some cases
- the command can be too long to act as a message.
-
- The host is added to the info set of hosts.
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param command: Serialized bash command to execute.
- :type host: str
- :type port: int
- :type command: str
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_command",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg="",
- data=str(command),
- )
- data[u"hosts"].add(host)
- data[u"log"].append(ssh_record)
-
-
-def export_ssh_result(host, port, code, stdout, stderr, duration):
- """Add a log item about ssh execution result.
-
- Only for raw output log.
-
- There is no easy way to pair with the corresponding command,
- but usually there is only one SSH session for given host and port.
- The duration value may give a hint if that is not the case.
-
- Message is empty, data has fields "rc", "stdout", "stderr" and "duration".
- Log level is always DEBUG.
-
- The host is NOT added to the info set of hosts, as each result
- comes after a command.
-
- TODO: Do not require duration, find preceding ssh command in log.
- Reason: Pylint complains about too many arguments.
- Alternative: Define type for SSH endopoint (and use that instead host+port).
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param code: Bash return code, e.g. 0 for success.
- :param stdout: Captured standard output of the command execution.
- :param stderr: Captured error output of the command execution.
- :param duration: How long has the command been executing, in seconds.
- :type host: str
- :type port: int
- :type code: int
- :type stdout: str
- :type stderr: str
- :type duration: float
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_result",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg=u"",
- data=dict(
- rc=int(code),
- stdout=str(stdout),
- stderr=str(stderr),
- duration=float(duration),
- ),
- )
- data[u"log"].append(ssh_record)
-
-
-def export_ssh_timeout(host, port, stdout, stderr, duration):
- """Add a log item about ssh execution timing out.
-
- Only for debug log.
-
- There is no easy way to pair with the corresponding command,
- but usually there is only one SSH session for given host and port.
-
- Message is empty, data has fields "stdout", "stderr" and "duration".
- The duration value may give a hint if that is not the case.
- Log level is always DEBUG.
-
- The host is NOT added to the info set of hosts, as each timeout
- comes after a command.
-
- :param host: Node "host" attribute, usually its IPv4 address.
- :param port: SSH port number to use when connecting to the host.
- :param stdout: Captured standard output of the command execution so far.
- :param stderr: Captured error output of the command execution so far.
- :param duration: How long has the command been executing, in seconds.
- :type host: str
- :type port: int
- :type stdout: str
- :type stderr: str
- :type duration: float
- """
- timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- data = get_export_data()
- ssh_record = dict(
- source_type=u"host,port",
- source_id=dict(host=host, port=port),
- msg_type=u"ssh_timeout",
- log_level=u"DEBUG",
- timestamp=timestamp,
- msg=u"",
- data=dict(
- stdout=str(stdout),
- stderr=str(stderr),
- duration=float(duration),
- ),
- )
- data[u"log"].append(ssh_record)
diff --git a/resources/libraries/python/model/ExportResult.py b/resources/libraries/python/model/ExportResult.py
index 16c6b89fb3..f155848913 100644
--- a/resources/libraries/python/model/ExportResult.py
+++ b/resources/libraries/python/model/ExportResult.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,7 +18,7 @@ from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.model.util import descend, get_export_data
-def export_dut_type_and_version(dut_type=u"unknown", dut_version=u"unknown"):
+def export_dut_type_and_version(dut_type="unknown", dut_version="unknown"):
"""Export the arguments as dut type and version.
Robot tends to convert "none" into None, hence the unusual default values.
@@ -32,32 +32,32 @@ def export_dut_type_and_version(dut_type=u"unknown", dut_version=u"unknown"):
:type dut_version: Optiona[str]
:raises RuntimeError: If value is neither in argument not robot variable.
"""
- if dut_type == u"unknown":
- dut_type = BuiltIn().get_variable_value(u"\\${DUT_TYPE}", u"unknown")
- if dut_type == u"unknown":
- raise RuntimeError(u"Dut type not provided.")
+ if dut_type == "unknown":
+ dut_type = BuiltIn().get_variable_value("\\${DUT_TYPE}", "unknown")
+ if dut_type == "unknown":
+ raise RuntimeError("Dut type not provided.")
else:
# We want to set a variable in higher level suite setup
# to be available to test setup several levels lower.
BuiltIn().set_suite_variable(
- u"\\${DUT_TYPE}", dut_type, u"children=True"
+ "\\${DUT_TYPE}", dut_type, "children=True"
)
- if dut_version == u"unknown":
+ if dut_version == "unknown":
dut_version = BuiltIn().get_variable_value(
- u"\\${DUT_VERSION}", u"unknown"
+ "\\${DUT_VERSION}", "unknown"
)
- if dut_type == u"unknown":
- raise RuntimeError(u"Dut version not provided.")
+ if dut_type == "unknown":
+ raise RuntimeError("Dut version not provided.")
else:
BuiltIn().set_suite_variable(
- u"\\${DUT_VERSION}", dut_version, u"children=True"
+ "\\${DUT_VERSION}", dut_version, "children=True"
)
data = get_export_data()
- data[u"dut_type"] = dut_type.lower()
- data[u"dut_version"] = dut_version
+ data["dut_type"] = dut_type.lower()
+ data["dut_version"] = dut_version
-def export_tg_type_and_version(tg_type=u"unknown", tg_version=u"unknown"):
+def export_tg_type_and_version(tg_type="unknown", tg_version="unknown"):
"""Export the arguments as tg type and version.
Robot tends to convert "none" into None, hence the unusual default values.
@@ -71,50 +71,57 @@ def export_tg_type_and_version(tg_type=u"unknown", tg_version=u"unknown"):
:type tg_version: Optiona[str]
:raises RuntimeError: If value is neither in argument not robot variable.
"""
- if tg_type == u"unknown":
- tg_type = BuiltIn().get_variable_value(u"\\${TG_TYPE}", u"unknown")
- if tg_type == u"unknown":
- raise RuntimeError(u"TG type not provided.")
+ if tg_type == "unknown":
+ tg_type = BuiltIn().get_variable_value("\\${TG_TYPE}", "unknown")
+ if tg_type == "unknown":
+ raise RuntimeError("TG type not provided!")
else:
# We want to set a variable in higher level suite setup
# to be available to test setup several levels lower.
BuiltIn().set_suite_variable(
- u"\\${TG_TYPE}", tg_type, u"children=True"
+ "\\${TG_TYPE}", tg_type, "children=True"
)
- if tg_version == u"unknown":
+ if tg_version == "unknown":
tg_version = BuiltIn().get_variable_value(
- u"\\${TG_VERSION}", u"unknown"
+ "\\${TG_VERSION}", "unknown"
)
- if tg_type == u"unknown":
- raise RuntimeError(u"TG version not provided.")
+ if tg_type == "unknown":
+ raise RuntimeError("TG version not provided!")
else:
BuiltIn().set_suite_variable(
- u"\\${TG_VERSION}", tg_version, u"children=True"
+ "\\${TG_VERSION}", tg_version, "children=True"
)
data = get_export_data()
- data[u"tg_type"] = tg_type.lower()
- data[u"tg_version"] = tg_version
+ data["tg_type"] = tg_type.lower()
+ data["tg_version"] = tg_version
-def append_mrr_value(mrr_value, unit):
+def append_mrr_value(mrr_value, mrr_unit, bandwidth_value=None,
+ bandwidth_unit="bps"):
"""Store mrr value to proper place so it is dumped into json.
The value is appended only when unit is not empty.
:param mrr_value: Forwarding rate from MRR trial.
- :param unit: Unit of measurement for the rate.
+ :param mrr_unit: Unit of measurement for the rate.
+ :param bandwidth_value: The same value recomputed into L1 bits per second.
:type mrr_value: float
- :type unit: str
+ :type mrr_unit: str
+ :type bandwidth_value: Optional[float]
+ :type bandwidth_unit: Optional[str]
"""
- if not unit:
+ if not mrr_unit:
return
data = get_export_data()
- data[u"result"][u"type"] = u"mrr"
- rate_node = descend(descend(data[u"result"], u"receive_rate"), "rate")
- rate_node[u"unit"] = str(unit)
- values_list = descend(rate_node, u"values", list)
- values_list.append(float(mrr_value))
- # TODO: Fill in the bandwidth part for pps?
+ data["result"]["type"] = "mrr"
+
+ for node_val, node_unit, node_name in ((mrr_value, mrr_unit, "rate"),
+ (bandwidth_value, bandwidth_unit, "bandwidth")):
+ if node_val is not None:
+ node = descend(descend(data["result"], "receive_rate"), node_name)
+ node["unit"] = str(node_unit)
+ values_list = descend(node, "values", list)
+ values_list.append(float(node_val))
def export_search_bound(text, value, unit, bandwidth=None):
@@ -140,18 +147,17 @@ def export_search_bound(text, value, unit, bandwidth=None):
"""
value = float(value)
text = str(text).lower()
- result_type = u"soak" if u"plrsearch" in text else u"ndrpdr"
- upper_or_lower = u"upper" if u"upper" in text else u"lower"
- ndr_or_pdr = u"ndr" if u"ndr" in text else u"pdr"
+ result_type = "soak" if "plrsearch" in text else "ndrpdr"
+ upper_or_lower = "upper" if "upper" in text else "lower"
+ ndr_or_pdr = "ndr" if "ndr" in text else "pdr"
- data = get_export_data()
- result_node = data[u"result"]
- result_node[u"type"] = result_type
+ result_node = get_export_data()["result"]
+ result_node["type"] = result_type
rate_item = dict(rate=dict(value=value, unit=unit))
if bandwidth:
- rate_item[u"bandwidth"] = dict(value=float(bandwidth), unit=u"bps")
- if result_type == u"soak":
- descend(result_node, u"critical_rate")[upper_or_lower] = rate_item
+ rate_item["bandwidth"] = dict(value=float(bandwidth), unit="bps")
+ if result_type == "soak":
+ descend(result_node, "critical_rate")[upper_or_lower] = rate_item
return
descend(result_node, ndr_or_pdr)[upper_or_lower] = rate_item
@@ -171,14 +177,14 @@ def _add_latency(result_node, percent, whichward, latency_string):
:type whichward: str
:latency_string: str
"""
- l_min, l_avg, l_max, l_hdrh = latency_string.split(u"/", 3)
+ l_min, l_avg, l_max, l_hdrh = latency_string.split("/", 3)
whichward_node = descend(result_node, f"latency_{whichward}")
percent_node = descend(whichward_node, f"pdr_{percent}")
- percent_node[u"min"] = int(l_min)
- percent_node[u"avg"] = int(l_avg)
- percent_node[u"max"] = int(l_max)
- percent_node[u"hdrh"] = l_hdrh
- percent_node[u"unit"] = u"us"
+ percent_node["min"] = int(l_min)
+ percent_node["avg"] = int(l_avg)
+ percent_node["max"] = int(l_max)
+ percent_node["hdrh"] = l_hdrh
+ percent_node["unit"] = "us"
def export_ndrpdr_latency(text, latency):
@@ -197,17 +203,114 @@ def export_ndrpdr_latency(text, latency):
:type text: str
:type latency: 1-tuple or 2-tuple of str
"""
- data = get_export_data()
- result_node = data[u"result"]
+ result_node = get_export_data()["result"]
percent = 0
- if u"90" in text:
+ if "90" in text:
percent = 90
- elif u"50" in text:
+ elif "50" in text:
percent = 50
- elif u"10" in text:
+ elif "10" in text:
percent = 10
- _add_latency(result_node, percent, u"forward", latency[0])
+ _add_latency(result_node, percent, "forward", latency[0])
# Else TRex does not support latency measurement for this traffic profile.
if len(latency) < 2:
return
- _add_latency(result_node, percent, u"reverse", latency[1])
+ _add_latency(result_node, percent, "reverse", latency[1])
+
+
+def export_reconf_result(packet_rate, packet_loss, bandwidth):
+ """Export the RECONF type results.
+
+ Result type is set to reconf.
+
+ :param packet_rate: Aggregate offered load in packets per second.
+ :param packet_loss: How many of the packets were dropped or unsent.
+ :param bandwidth: The offered load recomputed into L1 bits per second.
+ :type packet_rate: float
+ :type packet_loss: int
+ :type bandwidth: float
+ """
+ result_node = get_export_data()["result"]
+ result_node["type"] = "reconf"
+
+ time_loss = int(packet_loss) / float(packet_rate)
+ result_node["aggregate_rate"] = dict(
+ bandwidth=dict(
+ unit="bps",
+ value=float(bandwidth)
+ ),
+ rate=dict(
+ unit="pps",
+ value=float(packet_rate)
+ )
+ )
+ result_node["loss"] = dict(
+ packet=dict(
+ unit="packets",
+ value=int(packet_loss)
+ ),
+ time=dict(
+ unit="s",
+ value=time_loss
+ )
+ )
+
+
+def export_hoststack_results(
+ bandwidth, rate=None, rate_unit=None, latency=None,
+ failed_requests=None, completed_requests=None, retransmits=None,
+ duration=None
+):
+ """Export the HOSTSTACK type results.
+
+ Result type is set to hoststack.
+
+ :param bandwidth: Measured transfer rate using bps as a unit.
+ :param rate: Resulting rate measured by the test. [Optional]
+ :param rate_unit: CPS or RPS. [Optional]
+ :param latency: Measure latency. [Optional]
+ :param failed_requests: Number of failed requests. [Optional]
+ :param completed_requests: Number of completed requests. [Optional]
+ :param retransmits: Retransmitted TCP packets. [Optional]
+ :param duration: Measurment duration. [Optional]
+ :type bandwidth: float
+ :type rate: float
+ :type rate_unit: str
+ :type latency: float
+ :type failed_requests: int
+ :type completed_requests: int
+ :type retransmits: int
+ :type duration: float
+ """
+ result_node = get_export_data()["result"]
+ result_node["type"] = "hoststack"
+
+ result_node["bandwidth"] = dict(unit="bps", value=bandwidth)
+ if rate is not None:
+ result_node["rate"] = \
+ dict(unit=rate_unit, value=rate)
+ if latency is not None:
+ result_node["latency"] = \
+ dict(unit="ms", value=latency)
+ if failed_requests is not None:
+ result_node["failed_requests"] = \
+ dict(unit="requests", value=failed_requests)
+ if completed_requests is not None:
+ result_node["completed_requests"] = \
+ dict(unit="requests", value=completed_requests)
+ if retransmits is not None:
+ result_node["retransmits"] = \
+ dict(unit="packets", value=retransmits)
+ if duration is not None:
+ result_node["duration"] = \
+ dict(unit="s", value=duration)
+
+
+def append_telemetry(telemetry_item):
+ """Append telemetry entry to proper place so it is dumped into json.
+
+ :param telemetry_item: Telemetry entry.
+ :type telemetry_item: str
+ """
+ data = get_export_data()
+ data["telemetry"].append(telemetry_item)
diff --git a/resources/libraries/python/model/mem2raw.py b/resources/libraries/python/model/MemDump.py
index 543ee935e2..b391569286 100644
--- a/resources/libraries/python/model/mem2raw.py
+++ b/resources/libraries/python/model/MemDump.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -11,17 +11,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Module for converting in-memory data into raw JSON output.
+"""Module for converting in-memory data into JSON output.
-CSIT and VPP PAPI are using custom data types
-that are not directly serializable into JSON.
+CSIT and VPP PAPI are using custom data types that are not directly serializable
+into JSON.
-Thus, before writing the raw outpt onto disk,
-the data is recursively converted to equivalent serializable types,
-in extreme cases replaced by string representation.
+Thus, before writing the output onto disk, the data is recursively converted to
+equivalent serializable types, in extreme cases replaced by string
+representation.
-Validation is outside the scope of this module,
-as it should use the JSON data read from disk.
+Validation is outside the scope of this module, as it should use the JSON data
+read from disk.
"""
import json
@@ -29,6 +29,7 @@ import os
from collections.abc import Iterable, Mapping, Set
from enum import IntFlag
+from dateutil.parser import parse
def _pre_serialize_recursive(data):
@@ -107,7 +108,7 @@ def _pre_serialize_root(data):
to make it more human friendly.
We are moving "version" to the top,
followed by start time and end time.
- and various long fields (such as "log") to the bottom.
+ and various long fields to the bottom.
Some edits are done in-place, do not trust the argument value after calling.
@@ -122,24 +123,72 @@ def _pre_serialize_root(data):
if not isinstance(data, dict):
raise RuntimeError(f"Root data object needs to be a dict: {data!r}")
data = _pre_serialize_recursive(data)
- log = data.pop(u"log")
new_data = dict(version=data.pop(u"version"))
new_data[u"start_time"] = data.pop(u"start_time")
new_data[u"end_time"] = data.pop(u"end_time")
new_data.update(data)
- new_data[u"log"] = log
return new_data
-def write_raw_output(raw_file_path, raw_data):
+def _merge_into_suite_info_file(teardown_path):
+ """Move setup and teardown data into a singe file, remove old files.
+
+ The caller has to confirm the argument is correct, e.g. ending in
+ "/teardown.info.json".
+
+ :param teardown_path: Local filesystem path to teardown file.
+ :type teardown_path: str
+ :returns: Local filesystem path to newly created suite file.
+ :rtype: str
+ """
+ # Manual right replace: https://stackoverflow.com/a/9943875
+ setup_path = u"setup".join(teardown_path.rsplit(u"teardown", 1))
+ with open(teardown_path, u"rt", encoding="utf-8") as file_in:
+ teardown_data = json.load(file_in)
+ # Transforming setup data into suite data.
+ with open(setup_path, u"rt", encoding="utf-8") as file_in:
+ suite_data = json.load(file_in)
+
+ end_time = teardown_data[u"end_time"]
+ suite_data[u"end_time"] = end_time
+ start_float = parse(suite_data[u"start_time"]).timestamp()
+ end_float = parse(suite_data[u"end_time"]).timestamp()
+ suite_data[u"duration"] = end_float - start_float
+ setup_telemetry = suite_data.pop(u"telemetry")
+ suite_data[u"setup_telemetry"] = setup_telemetry
+ suite_data[u"teardown_telemetry"] = teardown_data[u"telemetry"]
+
+ suite_path = u"suite".join(teardown_path.rsplit(u"teardown", 1))
+ with open(suite_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(suite_data, file_out, indent=1)
+ # We moved everything useful from temporary setup/teardown info files.
+ os.remove(setup_path)
+ os.remove(teardown_path)
+
+ return suite_path
+
+
+def write_output(file_path, data):
"""Prepare data for serialization and dump into a file.
Ancestor directories are created if needed.
- :param to_raw_path: Local filesystem path, including the file name.
- :type to_raw_path: str
+ :param file_path: Local filesystem path, including the file name.
+ :param data: Root data to make serializable, dictized when applicable.
+ :type file_path: str
+ :type data: dict
"""
- raw_data = _pre_serialize_root(raw_data)
- os.makedirs(os.path.dirname(raw_file_path), exist_ok=True)
- with open(raw_file_path, u"wt", encoding="utf-8") as file_out:
- json.dump(raw_data, file_out, indent=1)
+ data = _pre_serialize_root(data)
+
+ # Lets move Telemetry to the end.
+ telemetry = data.pop(u"telemetry")
+ data[u"telemetry"] = telemetry
+
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ with open(file_path, u"wt", encoding="utf-8") as file_out:
+ json.dump(data, file_out, indent=1)
+
+ if file_path.endswith(u"/teardown.info.json"):
+ file_path = _merge_into_suite_info_file(file_path)
+
+ return file_path
diff --git a/resources/libraries/python/model/export_json.py b/resources/libraries/python/model/export_json.py
deleted file mode 100644
index 840c49fa70..0000000000
--- a/resources/libraries/python/model/export_json.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module tracking json in-memory data and saving it to files.
-
-The current implementation tracks data for raw output,
-and info output is created from raw output on disk (see raw2info module).
-Raw file contains all log items but no derived quantities,
-info file contains only important log items but also derived quantities.
-The overlap between two files is big.
-
-Each test case, suite setup (hierarchical) and teardown has its own file pair.
-
-Validation is performed for output files with available JSON schema.
-Validation is performed in data deserialized from disk,
-as serialization might have introduced subtle errors.
-"""
-
-import datetime
-import os.path
-
-from robot.api import logger
-from robot.libraries.BuiltIn import BuiltIn
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.model.ExportResult import (
- export_dut_type_and_version, export_tg_type_and_version
-)
-from resources.libraries.python.model.mem2raw import write_raw_output
-from resources.libraries.python.model.raw2info import convert_content_to_info
-from resources.libraries.python.model.validate import (get_validators, validate)
-
-
-class export_json():
- """Class handling the json data setting and export."""
-
- ROBOT_LIBRARY_SCOPE = u"GLOBAL"
-
- def __init__(self):
- """Declare required fields, cache output dir.
-
- Also memorize schema validator instances.
- """
- self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".")
- self.raw_file_path = None
- self.raw_data = None
- self.validators = get_validators()
-
- def export_pending_data(self):
- """Write the accumulated data to disk.
-
- Create missing directories.
- Reset both file path and data to avoid writing multiple times.
-
- Functions which finalize content for given file are calling this,
- so make sure each test and non-empty suite setup or teardown
- is calling this as their last keyword.
-
- If no file path is set, do not write anything,
- as that is the failsafe behavior when caller from unexpected place.
- Aso do not write anything when EXPORT_JSON constant is false.
-
- Regardless of whether data was written, it is cleared.
- """
- if not Constants.EXPORT_JSON or not self.raw_file_path:
- self.raw_data = None
- self.raw_file_path = None
- return
- write_raw_output(self.raw_file_path, self.raw_data)
- # Raw data is going to be cleared (as a sign that raw export succeeded),
- # so this is the last chance to detect if it was for a test case.
- is_testcase = u"result" in self.raw_data
- self.raw_data = None
- # Validation for raw output goes here when ready.
- info_file_path = convert_content_to_info(self.raw_file_path)
- self.raw_file_path = None
- # If "result" is missing from info content,
- # it could be a bug in conversion from raw test case content,
- # so instead of that we use the flag detected earlier.
- if is_testcase:
- validate(info_file_path, self.validators[u"tc_info"])
-
- def warn_on_bad_export(self):
- """If bad state is detected, log a warning and clean up state."""
- if self.raw_file_path is not None or self.raw_data is not None:
- logger.warn(f"Previous export not clean, path {self.raw_file_path}")
- self.raw_data = None
- self.raw_file_path = None
-
- def start_suite_setup_export(self):
- """Set new file path, initialize data for the suite setup.
-
- This has to be called explicitly at start of suite setup,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- output_dir = self.output_dir
- self.raw_file_path = os.path.join(
- output_dir, suite_path_part, u"setup.raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- self.raw_data[u"suite_documentation"] = BuiltIn().get_variable_value(
- u"\\${SUITE_DOCUMENTATION}"
- )
- # "end_time" and "duration" is added on flush.
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
-
- def start_test_export(self):
- """Set new file path, initialize data to minimal tree for the test case.
-
- It is assumed Robot variables DUT_TYPE and DUT_VERSION
- are already set (in suite setup) to correct values.
-
- This function has to be called explicitly at the start of test setup,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite and test.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}")
- self.raw_file_path = os.path.join(
- self.output_dir, suite_path_part,
- test_name.lower().replace(u" ", u"_") + u".raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- self.raw_data[u"test_name"] = test_name
- test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"")
- self.raw_data[u"test_documentation"] = test_doc
- # "test_type" is added when converting to info.
- # "tags" is detected and added on flush.
- # "end_time" and "duration" is added on flush.
- # Robot status and message are added on flush.
- self.raw_data[u"result"] = dict(type=u"unknown")
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
- export_dut_type_and_version()
- export_tg_type_and_version()
-
- def start_suite_teardown_export(self):
- """Set new file path, initialize data for the suite teardown.
-
- This has to be called explicitly at start of suite teardown,
- otherwise Robot likes to postpone initialization
- until first call by a data-adding keyword.
-
- File path is set based on suite.
- """
- self.warn_on_bad_export()
- start_time = datetime.datetime.utcnow().strftime(
- u"%Y-%m-%dT%H:%M:%S.%fZ"
- )
- suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
- suite_id = suite_name.lower().replace(u" ", u"_")
- suite_path_part = os.path.join(*suite_id.split(u"."))
- self.raw_file_path = os.path.join(
- self.output_dir, suite_path_part, u"teardown.raw.json"
- )
- self.raw_data = dict()
- self.raw_data[u"version"] = Constants.MODEL_VERSION
- self.raw_data[u"start_time"] = start_time
- self.raw_data[u"suite_name"] = suite_name
- # "end_time" and "duration" is added on flush.
- self.raw_data[u"hosts"] = set()
- self.raw_data[u"log"] = list()
-
- def finalize_suite_setup_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be run at the end of suite setup.
- The write is done at next start (or at the end of global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- self.raw_data[u"end_time"] = end_time
- self.export_pending_data()
-
- def finalize_test_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be at the end of test teardown, as the implementation
- reads various Robot variables, some of them only available at teardown.
-
- The write is done at next start (or at the end of global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}")
- status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}")
- test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}")
- self.raw_data[u"end_time"] = end_time
- self.raw_data[u"tags"] = list(test_tags)
- self.raw_data[u"status"] = status
- self.raw_data[u"message"] = message
- self.export_pending_data()
-
- def finalize_suite_teardown_export(self):
- """Add the missing fields to data. Do not write yet.
-
- Should be run at the end of suite teardown
- (but before the explicit write in the global suite teardown).
- The write is done at next start (or explicitly for global teardown).
- """
- end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
- self.raw_data[u"end_time"] = end_time
- self.export_pending_data()
diff --git a/resources/libraries/python/model/parse.py b/resources/libraries/python/model/parse.py
new file mode 100644
index 0000000000..1e0aebfe18
--- /dev/null
+++ b/resources/libraries/python/model/parse.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library for parsing results from JSON back to python objects.
+
+This is useful for vpp-csit jobs like per-patch performance verify.
+Such jobs invoke robot multiple times, each time on a different build.
+Each robot invocation may execute several test cases.
+How exactly are the results compared depends on the job type,
+but extracting just the main results from jsons (file trees) is a common task,
+so it is placed into this library.
+
+As such, the code in this file does not directly interact
+with the code in other files in this directory
+(result comparison is done outside robot invocation),
+but all files share common assumptions about json structure.
+
+The function here expects a particular tree created on a filesystem by
+a bootstrap script, including test results
+exported as json files according to a current model schema.
+This script extracts the results (according to result type)
+and joins them mapping from test IDs to lists of floats.
+Also, the result is cached into a results.json file,
+so each tree is parsed only once.
+
+The cached result does not depend on tree placement,
+so the bootstrap script may move and copy trees around
+before or after parsing.
+"""
+
+import json
+import os
+import pathlib
+
+from typing import Dict, List
+
+
+def parse(dirpath: str, fake_value: float = 1.0) -> Dict[str, List[float]]:
+ """Look for test jsons, extract scalar results.
+
+ Files other than .json are skipped, jsons without test_id are skipped.
+ If the test failed, four fake values are used as a fake result.
+
+ Units are ignored, as both parent and current are tested
+ with the same CSIT code so the unit should be identical.
+
+ The test results are sorted by test_id,
+ as the filesystem order is not deterministic enough.
+
+ The result is also cached as results.json file.
+
+ :param dirpath: Path to the directory tree to examine.
+ :param fail_value: Fake value to use for test cases that failed.
+ :type dirpath: str
+ :type fail_falue: float
+ :returns: Mapping from test IDs to list of measured values.
+ :rtype: Dict[str, List[float]]
+ :raises RuntimeError: On duplicate test ID or unknown test type.
+ """
+ if not pathlib.Path(dirpath).is_dir():
+ # This happens when per-patch runs out of iterations.
+ return {}
+ resultpath = pathlib.Path(f"{dirpath}/results.json")
+ if resultpath.is_file():
+ with open(resultpath, "rt", encoding="utf8") as file_in:
+ return json.load(file_in)
+ results = {}
+ for root, _, files in os.walk(dirpath):
+ for filename in files:
+ if not filename.endswith(".json"):
+ continue
+ filepath = os.path.join(root, filename)
+ with open(filepath, "rt", encoding="utf8") as file_in:
+ data = json.load(file_in)
+ if "test_id" not in data:
+ continue
+ name = data["test_id"]
+ if name in results:
+ raise RuntimeError(f"Duplicate: {name}")
+ if not data["passed"]:
+ results[name] = [fake_value] * 4
+ continue
+ result_object = data["result"]
+ result_type = result_object["type"]
+ if result_type == "mrr":
+ results[name] = result_object["receive_rate"]["rate"]["values"]
+ elif result_type == "ndrpdr":
+ results[name] = [result_object["pdr"]["lower"]["rate"]["value"]]
+ elif result_type == "soak":
+ results[name] = [
+ result_object["critical_rate"]["lower"]["rate"]["value"]
+ ]
+ elif result_type == "reconf":
+ results[name] = [result_object["loss"]["time"]["value"]]
+ elif result_type == "hoststack":
+ results[name] = [result_object["bandwidth"]["value"]]
+ else:
+ raise RuntimeError(f"Unknown result type: {result_type}")
+ results = {test_id: results[test_id] for test_id in sorted(results)}
+ with open(resultpath, "wt", encoding="utf8") as file_out:
+ json.dump(results, file_out, indent=1, separators=(", ", ": "))
+ return results
diff --git a/resources/libraries/python/model/raw2info.py b/resources/libraries/python/model/raw2info.py
deleted file mode 100644
index bd7d0e3cf1..0000000000
--- a/resources/libraries/python/model/raw2info.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module facilitating conversion from raw outputs into info outputs."""
-
-import copy
-import json
-import os
-
-import dateutil.parser
-
-from resources.libraries.python.Constants import Constants
-from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
-
-
-def _raw_to_info_path(raw_path):
- """Compute path for info output corresponding to given raw output.
-
- :param raw_path: Local filesystem path to read raw JSON data from.
- :type raw_path: str
- :returns: Local filesystem path to write info JSON content to.
- :rtype: str
- :raises RuntimeError: If the input path does not meet all expectations.
- """
- raw_extension = u".raw.json"
- tmp_parts = raw_path.split(raw_extension)
- if len(tmp_parts) != 2 or tmp_parts[1] != u"":
- raise RuntimeError(f"Not good extension {raw_extension}: {raw_path}")
- info_path = tmp_parts[0] + u".info.json"
- return info_path
-
-
-def _process_test_name(data):
- """Replace raw test name with short and long test name and set test_type.
-
- Perform in-place edits on the data dictionary.
- Remove raw suite_name and test_name, they are not part of info schema.
- Return early if the data is not for test case.
- Inserttest ID and long and short test name into the data.
- Besides suite_name and test_name, also test tags are read.
-
- Short test name is basically a suite tag, but with NIC driver prefix,
- if the NIC driver used is not the default one (drv_vfio_pci for VPP tests).
-
- Long test name has the following form:
- {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
- Lookup in test tags is needed to get the threads value.
- The threads_and_cores part may be empty, e.g. for TRex tests.
-
- Test ID has form {suite_name}.{test_name} where the two names come from
- Robot variables, converted to lower case and spaces replaces by undescores.
-
- Test type is set in an internal function.
-
- :param data: Raw data, perhaps some fields converted into info data already.
- :type data: dict
- :raises RuntimeError: If the raw data does not contain expected values.
- """
- suite_part = data.pop(u"suite_name").lower().replace(u" ", u"_")
- if u"test_name" not in data:
- # There will be no test_id, provide suite_id instead.
- data[u"suite_id"] = suite_part
- return
- test_part = data.pop(u"test_name").lower().replace(u" ", u"_")
- data[u"test_id"] = f"{suite_part}.{test_part}"
- tags = data[u"tags"]
- # Test name does not contain thread count.
- subparts = test_part.split(u"c-", 1)
- if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
- # Physical core count not detected, assume it is a TRex test.
- if u"--" not in test_part:
- raise RuntimeError(f"Cores not found for {subparts}")
- short_name = test_part.split(u"--", 1)[1]
- else:
- short_name = subparts[1]
- # Add threads to test_part.
- core_part = subparts[0][-1] + u"c"
- for tag in tags:
- tag = tag.lower()
- if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
- test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
- break
- else:
- raise RuntimeError(f"Threads not found for {test_part} tags {tags}")
- # For long name we need NIC model, which is only in suite name.
- last_suite_part = suite_part.split(u".")[-1]
- # Short name happens to be the suffix we want to ignore.
- prefix_part = last_suite_part.split(short_name)[0]
- # Also remove the trailing dash.
- prefix_part = prefix_part[:-1]
- # Throw away possible link prefix such as "1n1l-".
- nic_code = prefix_part.split(u"-", 1)[-1]
- nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
- long_name = f"{nic_short}-{test_part}"
- # Set test type.
- test_type = _detect_test_type(data)
- data[u"test_type"] = test_type
- # Remove trailing test type from names (if present).
- short_name = short_name.split(f"-{test_type}")[0]
- long_name = long_name.split(f"-{test_type}")[0]
- # Store names.
- data[u"test_name_short"] = short_name
- data[u"test_name_long"] = long_name
-
-
-def _detect_test_type(data):
- """Return test_type, as inferred from robot test tags.
-
- :param data: Raw data, perhaps some fields converted into info data already.
- :type data: dict
- :returns: The inferred test type value.
- :rtype: str
- :raises RuntimeError: If the test tags does not contain expected values.
- """
- tags = data[u"tags"]
- # First 5 options are specific for VPP tests.
- if u"DEVICETEST" in tags:
- test_type = u"device"
- elif u"LDP_NGINX" in tags:
- test_type = u"vsap"
- elif u"HOSTSTACK" in tags:
- test_type = u"hoststack"
- elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
- test_type = u"gso"
- elif u"RECONF" in tags:
- test_type = u"reconf"
- # The remaining 3 options could also apply to DPDK and TRex tests.
- elif u"SOAK" in tags:
- test_type = u"soak"
- elif u"NDRPDR" in tags:
- test_type = u"ndrpdr"
- elif u"MRR" in tags:
- test_type = u"mrr"
- else:
- raise RuntimeError(f"Unable to infer test type from tags: {tags}")
- return test_type
-
-
-def _convert_to_info_in_memory(data):
- """Perform all changes needed for processing of data, return new data.
-
- Data is assumed to be valid for raw schema, so no exceptions are expected.
- The original argument object is not edited,
- a new copy is created for edits and returned,
- because there is no easy way to sort keys in-place.
-
- :param data: The whole composite object to filter and enhance.
- :type data: dict
- :returns: New object with the edited content.
- :rtype: dict
- """
- data = copy.deepcopy(data)
-
- # Drop any SSH log items.
- data[u"log"] = list()
-
- # Duration is computed for every file.
- start_float = dateutil.parser.parse(data[u"start_time"]).timestamp()
- end_float = dateutil.parser.parse(data[u"end_time"]).timestamp()
- data[u"duration"] = end_float - start_float
-
- # Reorder impotant fields to the top.
- sorted_data = dict(version=data.pop(u"version"))
- sorted_data[u"duration"] = data.pop(u"duration")
- sorted_data[u"start_time"] = data.pop(u"start_time")
- sorted_data[u"end_time"] = data.pop(u"end_time")
- sorted_data.update(data)
- data = sorted_data
- # TODO: Do we care about the order of subsequently added fields?
-
- # Convert status into a boolean.
- status = data.pop(u"status", None)
- if status is not None:
- data[u"passed"] = (status == u"PASS")
- if data[u"passed"]:
- # Also truncate success test messages.
- data[u"message"] = u""
-
- # Replace raw names with processed ones, set test_id and test_type.
- _process_test_name(data)
-
- # The rest is only relevant for test case outputs.
- if u"result" not in data:
- return data
- result_node = data[u"result"]
- result_type = result_node[u"type"]
- if result_type == u"unknown":
- # Device or something else not supported.
- return data
-
- # More processing depending on result type. TODO: Separate functions?
-
- # Compute avg and stdev for mrr.
- if result_type == u"mrr":
- rate_node = result_node[u"receive_rate"][u"rate"]
- stats = AvgStdevStats.for_runs(rate_node[u"values"])
- rate_node[u"avg"] = stats.avg
- rate_node[u"stdev"] = stats.stdev
-
- # Multiple processing steps for ndrpdr.
- if result_type != u"ndrpdr":
- return data
- # Filter out invalid latencies.
- for which_key in (u"latency_forward", u"latency_reverse"):
- if which_key not in result_node:
- # Probably just an unidir test.
- continue
- for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
- if result_node[which_key][load][u"max"] <= 0:
- # One invalid number is enough to remove all loads.
- break
- else:
- # No break means all numbers are ok, nothing to do here.
- continue
- # Break happened, something is invalid, remove all loads.
- result_node.pop(which_key)
-
- return data
-
-
-def _merge_into_suite_info_file(teardown_info_path):
- """Move setup and teardown data into a singe file, remove old files.
-
- The caller has to confirm the argument is correct, e.g. ending in
- "/teardown.info.json".
-
- :param teardown_info_path: Local filesystem path to teardown info file.
- :type teardown_info_path: str
- :returns: Local filesystem path to newly created suite info file.
- :rtype: str
- """
- # Manual right replace: https://stackoverflow.com/a/9943875
- setup_info_path = u"setup".join(teardown_info_path.rsplit(u"teardown", 1))
- with open(teardown_info_path, u"rt", encoding="utf-8") as file_in:
- teardown_data = json.load(file_in)
- # Transforming setup data into suite data.
- with open(setup_info_path, u"rt", encoding="utf-8") as file_in:
- suite_data = json.load(file_in)
-
- end_time = teardown_data[u"end_time"]
- suite_data[u"end_time"] = end_time
- start_float = dateutil.parser.parse(suite_data[u"start_time"]).timestamp()
- end_float = dateutil.parser.parse(suite_data[u"end_time"]).timestamp()
- suite_data[u"duration"] = end_float - start_float
- setup_log = suite_data.pop(u"log")
- suite_data[u"setup_log"] = setup_log
- suite_data[u"teardown_log"] = teardown_data[u"log"]
-
- suite_info_path = u"suite".join(teardown_info_path.rsplit(u"teardown", 1))
- with open(suite_info_path, u"wt", encoding="utf-8") as file_out:
- json.dump(suite_data, file_out, indent=1)
- # We moved everything useful from temporary setup/teardown info files.
- os.remove(setup_info_path)
- os.remove(teardown_info_path)
-
- return suite_info_path
-
-
-def convert_content_to_info(from_raw_path):
- """Read raw output, perform filtering, add derivatves, write info output.
-
- Directory path is created if missing.
-
- When processing teardown, create also suite output using setup info.
-
- :param from_raw_path: Local filesystem path to read raw JSON data from.
- :type from_raw_path: str
- :returns: Local filesystem path to written info JSON file.
- :rtype: str
- :raises RuntimeError: If path or content do not match expectations.
- """
- to_info_path = _raw_to_info_path(from_raw_path)
- with open(from_raw_path, u"rt", encoding="utf-8") as file_in:
- data = json.load(file_in)
-
- data = _convert_to_info_in_memory(data)
-
- with open(to_info_path, u"wt", encoding="utf-8") as file_out:
- json.dump(data, file_out, indent=1)
- if to_info_path.endswith(u"/teardown.info.json"):
- to_info_path = _merge_into_suite_info_file(to_info_path)
- # TODO: Return both paths for validation?
-
- return to_info_path
diff --git a/resources/libraries/python/model/util.py b/resources/libraries/python/model/util.py
index 879f1f28b1..db2ef14bbb 100644
--- a/resources/libraries/python/model/util.py
+++ b/resources/libraries/python/model/util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -52,7 +52,7 @@ def descend(parent_node, key, default_factory=None):
def get_export_data():
- """Return raw_data member of export_json library instance.
+ """Return data member of ExportJson library instance.
This assumes the data has been initialized already.
Return None if Robot is not running.
@@ -62,8 +62,8 @@ def get_export_data():
:raises AttributeError: If library is not imported yet.
"""
instance = BuiltIn().get_library_instance(
- u"resources.libraries.python.model.export_json"
+ u"resources.libraries.python.model.ExportJson"
)
if instance is None:
return None
- return instance.raw_data
+ return instance.data
diff --git a/resources/libraries/python/model/validate.py b/resources/libraries/python/model/validate.py
index c441936ac8..85c4b993c9 100644
--- a/resources/libraries/python/model/validate.py
+++ b/resources/libraries/python/model/validate.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -19,27 +19,7 @@ Structure will probably change when we start validation mode file types.
import json
import jsonschema
-
-
-def _get_validator(schema_path):
- """Contruct validator with format checking enabled.
-
- Load json schema from disk.
- Perform validation against meta-schema before returning.
-
- :param schema_path: Local filesystem path to .json file storing the schema.
- :type schema_path: str
- :returns: Instantiated validator class instance.
- :rtype: jsonschema.validators.Validator
- :raises RuntimeError: If the schema is not valid according its meta-schema.
- """
- with open(schema_path, u"rt", encoding="utf-8") as file_in:
- schema = json.load(file_in)
- validator_class = jsonschema.validators.validator_for(schema)
- validator_class.check_schema(schema)
- fmt_checker = jsonschema.FormatChecker()
- validator = validator_class(schema, format_checker=fmt_checker)
- return validator
+import yaml
def get_validators():
@@ -51,9 +31,17 @@ def get_validators():
:rtype: Mapping[str, jsonschema.validators.Validator]
:raises RuntimeError: If schemas are not readable or not valid.
"""
- relative_path = u"docs/model/current/schema/test_case.info.schema.json"
+ relative_path = "resources/model_schema/test_case.schema.yaml"
# Robot is always started when CWD is CSIT_DIR.
- validator = _get_validator(relative_path)
+ with open(relative_path, "rt", encoding="utf-8") as file_in:
+ schema = json.loads(
+ json.dumps(yaml.safe_load(file_in.read()), indent=2)
+ )
+ validator_class = jsonschema.validators.validator_for(schema)
+ validator_class.check_schema(schema)
+ fmt_checker = jsonschema.FormatChecker()
+ validator = validator_class(schema, format_checker=fmt_checker)
+
return dict(tc_info=validator)
@@ -64,10 +52,11 @@ def validate(file_path, validator):
:param validator: Validator instance to use for validation.
:type file_path: str
:type validator: jsonschema.validators.Validator
- :raises RuntimeError: If schema validation fails.
+ :raises ValidationError: If schema validation fails.
"""
- with open(file_path, u"rt", encoding="utf-8") as file_in:
+ with open(file_path, "rt", encoding="utf-8") as file_in:
instance = json.load(file_in)
error = jsonschema.exceptions.best_match(validator.iter_errors(instance))
if error is not None:
+ print(json.dumps(instance, indent=4))
raise error
diff --git a/resources/libraries/python/parsers/JsonParser.py b/resources/libraries/python/parsers/JsonParser.py
deleted file mode 100644
index c7a28bc1e4..0000000000
--- a/resources/libraries/python/parsers/JsonParser.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Used to parse JSON files or JSON data strings to dictionaries"""
-
-import json
-
-from io import open
-
-
-class JsonParser:
- """Parses JSON data string or files containing JSON data strings"""
- def __init__(self):
- pass
-
- @staticmethod
- def parse_data(json_data):
- """Return list parsed from JSON data string.
-
- Translates JSON data into list of values/dictionaries/lists.
-
- :param json_data: Data in JSON format.
- :type json_data: str
- :returns: JSON data parsed as python list.
- :rtype: list
- """
- parsed_data = json.loads(json_data)
- return parsed_data
-
- @staticmethod
- def parse_file(json_file):
- """Return list parsed from file containing JSON string.
-
- Translates JSON data found in file into list of
- values/dictionaries/lists.
-
- :param json_file: File with JSON type data.
- :type json_file: str
- :returns: JSON data parsed as python list.
- :rtype: list
- """
- input_data = open(json_file, u"rt").read()
- parsed_data = JsonParser.parse_data(input_data)
- return parsed_data
diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py
index e47272f4db..437b1ad3e6 100644
--- a/resources/libraries/python/ssh.py
+++ b/resources/libraries/python/ssh.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -25,9 +25,6 @@ from robot.api import logger
from scp import SCPClient, SCPException
from resources.libraries.python.OptionString import OptionString
-from resources.libraries.python.model.ExportLog import (
- export_ssh_command, export_ssh_result, export_ssh_timeout
-)
__all__ = [
u"exec_cmd", u"exec_cmd_no_error", u"SSH", u"SSHTimeout", u"scp_node"
@@ -145,7 +142,7 @@ class SSH:
f"Reconnecting peer done: {node[u'host']}, {node[u'port']}"
)
- def exec_command(self, cmd, timeout=10, log_stdout_err=True, export=True):
+ def exec_command(self, cmd, timeout=10, log_stdout_err=True):
"""Execute SSH command on a new channel on the connected Node.
:param cmd: Command to run on the Node.
@@ -154,12 +151,10 @@ class SSH:
:param log_stdout_err: If True, stdout and stderr are logged. stdout
and stderr are logged also if the return code is not zero
independently of the value of log_stdout_err.
- :param export: If false, do not attempt JSON export.
Needed for calls outside Robot (e.g. from reservation script).
:type cmd: str or OptionString
:type timeout: int
:type log_stdout_err: bool
- :type export: bool
:returns: return_code, stdout, stderr
:rtype: tuple(int, str, str)
:raises SSHTimeout: If command is not finished in timeout time.
@@ -180,8 +175,6 @@ class SSH:
logger.trace(f"exec_command on {peer} with timeout {timeout}: {cmd}")
- if export:
- export_ssh_command(self._node[u"host"], self._node[u"port"], cmd)
start = monotonic()
chan.exec_command(cmd)
while not chan.exit_status_ready() and timeout is not None:
@@ -197,14 +190,6 @@ class SSH:
duration = monotonic() - start
if duration > timeout:
- if export:
- export_ssh_timeout(
- host=self._node[u"host"],
- port=self._node[u"port"],
- stdout=stdout,
- stderr=stderr,
- duration=duration,
- )
raise SSHTimeout(
f"Timeout exception during execution of command: {cmd}\n"
f"Current contents of stdout buffer: "
@@ -237,33 +222,21 @@ class SSH:
logger.trace(
f"return STDERR {stderr}"
)
- if export:
- export_ssh_result(
- host=self._node[u"host"],
- port=self._node[u"port"],
- code=return_code,
- stdout=stdout,
- stderr=stderr,
- duration=duration,
- )
return return_code, stdout, stderr
def exec_command_sudo(
- self, cmd, cmd_input=None, timeout=30, log_stdout_err=True,
- export=True):
+ self, cmd, cmd_input=None, timeout=30, log_stdout_err=True):
"""Execute SSH command with sudo on a new channel on the connected Node.
:param cmd: Command to be executed.
:param cmd_input: Input redirected to the command.
:param timeout: Timeout.
:param log_stdout_err: If True, stdout and stderr are logged.
- :param export: If false, do not attempt JSON export.
Needed for calls outside Robot (e.g. from reservation script).
:type cmd: str
:type cmd_input: str
:type timeout: int
:type log_stdout_err: bool
- :type export: bool
:returns: return_code, stdout, stderr
:rtype: tuple(int, str, str)
@@ -284,7 +257,7 @@ class SSH:
else:
command = f"sudo -E -S {cmd} <<< \"{cmd_input}\""
return self.exec_command(
- command, timeout, log_stdout_err=log_stdout_err, export=export
+ command, timeout, log_stdout_err=log_stdout_err
)
def exec_command_lxc(
@@ -442,7 +415,7 @@ class SSH:
def exec_cmd(
node, cmd, timeout=600, sudo=False, disconnect=False,
- log_stdout_err=True, export=True
+ log_stdout_err=True
):
"""Convenience function to ssh/exec/return rc, out & err.
@@ -456,7 +429,6 @@ def exec_cmd(
:param log_stdout_err: If True, stdout and stderr are logged. stdout
and stderr are logged also if the return code is not zero
independently of the value of log_stdout_err.
- :param export: If false, do not attempt JSON export.
Needed for calls outside Robot (e.g. from reservation script).
:type node: dict
:type cmd: str or OptionString
@@ -464,7 +436,6 @@ def exec_cmd(
:type sudo: bool
:type disconnect: bool
:type log_stdout_err: bool
- :type export: bool
:returns: RC, Stdout, Stderr.
:rtype: Tuple[int, str, str]
"""
@@ -486,13 +457,11 @@ def exec_cmd(
try:
if not sudo:
ret_code, stdout, stderr = ssh.exec_command(
- cmd, timeout=timeout, log_stdout_err=log_stdout_err,
- export=export
+ cmd, timeout=timeout, log_stdout_err=log_stdout_err
)
else:
ret_code, stdout, stderr = ssh.exec_command_sudo(
- cmd, timeout=timeout, log_stdout_err=log_stdout_err,
- export=export
+ cmd, timeout=timeout, log_stdout_err=log_stdout_err
)
except SSHException as err:
logger.error(repr(err))
@@ -506,7 +475,7 @@ def exec_cmd(
def exec_cmd_no_error(
node, cmd, timeout=600, sudo=False, message=None, disconnect=False,
- retries=0, include_reason=False, log_stdout_err=True, export=True
+ retries=0, include_reason=False, log_stdout_err=True
):
"""Convenience function to ssh/exec/return out & err.
@@ -526,7 +495,6 @@ def exec_cmd_no_error(
:param log_stdout_err: If True, stdout and stderr are logged. stdout
and stderr are logged also if the return code is not zero
independently of the value of log_stdout_err.
- :param export: If false, do not attempt JSON export.
Needed for calls outside Robot thread (e.g. parallel framework setup).
:type node: dict
:type cmd: str or OptionString
@@ -537,7 +505,6 @@ def exec_cmd_no_error(
:type retries: int
:type include_reason: bool
:type log_stdout_err: bool
- :type export: bool
:returns: Stdout, Stderr.
:rtype: tuple(str, str)
:raises RuntimeError: If bash return code is not 0.
@@ -545,7 +512,7 @@ def exec_cmd_no_error(
for _ in range(retries + 1):
ret_code, stdout, stderr = exec_cmd(
node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect,
- log_stdout_err=log_stdout_err, export=export
+ log_stdout_err=log_stdout_err
)
if ret_code == 0:
break
diff --git a/resources/libraries/python/topology.py b/resources/libraries/python/topology.py
index 454692807c..22ed3666c3 100644
--- a/resources/libraries/python/topology.py
+++ b/resources/libraries/python/topology.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -766,7 +766,9 @@ class Topology:
# find link
for node_data in nodes_info.values():
# skip self
- if node_data[u"host"] == node[u"host"]:
+ l_hash = node_data[u"host"]+str(node_data[u"port"])
+ r_hash = node[u"host"]+str(node[u"port"])
+ if l_hash == r_hash:
continue
for if_key, if_val \
in node_data[u"interfaces"].items():
@@ -1085,6 +1087,19 @@ class Topology:
except KeyError:
return None
+ def get_bus(node):
+ """Return bus configuration of the node.
+
+ :param node: Node created from topology.
+ :type node: dict
+ :returns: bus configuration string.
+ :rtype: str
+ """
+ try:
+ return node[u"bus"]
+ except KeyError:
+ return None
+
@staticmethod
def get_uio_driver(node):
"""Return uio-driver configuration of the node.