aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2023-10-17 16:31:35 +0200
committerVratko Polak <vrpolak@cisco.com>2023-10-18 08:10:06 +0000
commite5dbe10d9599b9a53fa07e6fadfaf427ba6d69e3 (patch)
tree147b7972bea35a093f6644e63c5f1fb4e4b2c9a0 /resources/libraries
parentc6dfb6c09c5dafd1d522f96b4b86c5ec5efc1c83 (diff)
feat(MLRsearch): MLRsearch v7
Replaces MLRv2, suitable for "big bang" upgrade across CSIT. PyPI metadata updated only partially (full edits will come separately). Pylint wants less complexity, but the differences are only minor. + Use the same (new CSIT) defaults everywhere, also in Python library. + Update also PLRsearch to use the new result class. + Make upper bound optional in UTI. + Fix ASTF approximate duration detection. + Do not keep approximated_receive_rate (for MRR) in result structure. Change-Id: I03406f32d5c93f56b527cb3f93791b61955dfd74 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/libraries')
-rw-r--r--resources/libraries/python/MLRsearch/AbstractMeasurer.py32
-rw-r--r--resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py48
-rw-r--r--resources/libraries/python/MLRsearch/MeasurementDatabase.py157
-rw-r--r--resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py485
-rw-r--r--resources/libraries/python/MLRsearch/PerDurationDatabase.py123
-rw-r--r--resources/libraries/python/MLRsearch/ProgressState.py60
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateInterval.py74
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py125
-rw-r--r--resources/libraries/python/MLRsearch/WidthArithmetics.py137
-rw-r--r--resources/libraries/python/MLRsearch/__init__.py16
-rw-r--r--resources/libraries/python/MLRsearch/candidate.py153
-rw-r--r--resources/libraries/python/MLRsearch/config.py179
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/dc_property.py173
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/field.py44
-rw-r--r--resources/libraries/python/MLRsearch/discrete_interval.py140
-rw-r--r--resources/libraries/python/MLRsearch/discrete_load.py316
-rw-r--r--resources/libraries/python/MLRsearch/discrete_result.py76
-rw-r--r--resources/libraries/python/MLRsearch/discrete_width.py197
-rw-r--r--resources/libraries/python/MLRsearch/expander.py102
-rw-r--r--resources/libraries/python/MLRsearch/global_width.py70
-rw-r--r--resources/libraries/python/MLRsearch/limit_handler.py198
-rw-r--r--resources/libraries/python/MLRsearch/load_rounding.py205
-rw-r--r--resources/libraries/python/MLRsearch/load_stats.py112
-rw-r--r--resources/libraries/python/MLRsearch/measurement_database.py126
-rw-r--r--resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py314
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/__init__.py24
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/classes.py34
-rw-r--r--resources/libraries/python/MLRsearch/relevant_bounds.py56
-rw-r--r--resources/libraries/python/MLRsearch/search_goal.py117
-rw-r--r--resources/libraries/python/MLRsearch/search_goal_tuple.py60
-rw-r--r--resources/libraries/python/MLRsearch/selector.py183
-rw-r--r--resources/libraries/python/MLRsearch/strategy/__init__.py35
-rw-r--r--resources/libraries/python/MLRsearch/strategy/base.py132
-rw-r--r--resources/libraries/python/MLRsearch/strategy/bisect.py193
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_hi.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_lo.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/halve.py83
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_hi.py55
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_lo.py53
-rw-r--r--resources/libraries/python/MLRsearch/target_scaling.py103
-rw-r--r--resources/libraries/python/MLRsearch/target_spec.py95
-rw-r--r--resources/libraries/python/MLRsearch/target_stat.py117
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py55
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py161
-rw-r--r--resources/libraries/python/MLRsearch/trimmed_stat.py78
-rw-r--r--resources/libraries/python/PLRsearch/PLRsearch.py28
-rw-r--r--resources/libraries/python/TrafficGenerator.py242
-rw-r--r--resources/libraries/robot/performance/performance_display.robot92
-rw-r--r--resources/libraries/robot/performance/performance_utils.robot59
51 files changed, 4444 insertions, 1463 deletions
diff --git a/resources/libraries/python/MLRsearch/AbstractMeasurer.py b/resources/libraries/python/MLRsearch/AbstractMeasurer.py
deleted file mode 100644
index da66b4e174..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractMeasurer.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractMeasurer class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractMeasurer(metaclass=ABCMeta):
- """Abstract class defining common API for measurement providers."""
-
- @abstractmethod
- def measure(self, duration, transmit_rate):
- """Perform trial measurement and return the result.
-
- :param duration: Trial duration [s].
- :param transmit_rate: Target transmit rate [tps].
- :type duration: float
- :type transmit_rate: float
- :returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
diff --git a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
deleted file mode 100644
index cca48ef798..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractSearchAlgorithm class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractSearchAlgorithm(metaclass=ABCMeta):
- """Abstract class defining common API for search algorithms."""
-
- def __init__(self, measurer):
- """Store the rate provider.
-
- :param measurer: Object able to perform trial or composite measurements.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- """
- self.measurer = measurer
-
- @abstractmethod
- def narrow_down_intervals(
- self, min_rate, max_rate, packet_loss_ratios):
- """Perform measurements to narrow down intervals, return them.
-
- :param min_rate: Minimal target transmit rate [tps].
- Usually, tests are set to fail if search reaches this or below.
- :param max_rate: Maximal target transmit rate [tps].
- Usually computed from line rate and various other limits,
- to prevent failures or duration stretching in Traffic Generator.
- :param packet_loss_ratios: Ratios of packet loss to search for,
- e.g. [0.0, 0.005] for NDR and PDR.
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratios: Iterable[float]
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: List[ReceiveRateInterval.ReceiveRateInterval]
- """
diff --git a/resources/libraries/python/MLRsearch/MeasurementDatabase.py b/resources/libraries/python/MLRsearch/MeasurementDatabase.py
deleted file mode 100644
index 2f601d6260..0000000000
--- a/resources/libraries/python/MLRsearch/MeasurementDatabase.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MeasurementDatabase class."""
-
-from .ReceiveRateInterval import ReceiveRateInterval
-from .PerDurationDatabase import PerDurationDatabase
-
-
-class MeasurementDatabase:
- """A structure holding measurement results.
-
- The implementation uses a dict from duration values
- to PerDurationDatabase instances.
-
- Several utility methods are added, accomplishing tasks useful for MLRsearch.
-
- This class contains the "find tightest bounds" parts of logic required
- by MLRsearch. One exception is lack of any special handling for maximal
- or minimal rates.
- """
-
- def __init__(self, measurements):
- """Store measurement results in per-duration databases.
-
- TODO: Move processing to a factory method,
- keep constructor only to store (presumably valid) values.
-
- If the measurements argument contains is a dict,
- the constructor assumes it contains the processed databases.
-
- :param measurements: The measurement results to store.
- :type measurements: Iterable[ReceiveRateMeasurement]
- """
- if isinstance(measurements, dict):
- self.data_for_duration = measurements
- else:
- self.data_for_duration = dict()
- # TODO: There is overlap with add() code. Worth extracting?
- for measurement in measurements:
- duration = measurement.duration
- if duration in self.data_for_duration:
- self.data_for_duration[duration].add(measurement)
- else:
- self.data_for_duration[duration] = PerDurationDatabase(
- duration, [measurement]
- )
- durations = sorted(self.data_for_duration.keys())
- self.current_duration = durations[-1] if duration else None
- self.previous_duration = durations[-2] if len(durations) > 1 else None
-
- def __repr__(self):
- """Return string executable to get equivalent instance.
-
- :returns: Code to construct equivalent instance.
- :rtype: str
- """
- return f"MeasurementDatabase(measurements={self.data_for_duration!r})"
-
- def set_current_duration(self, duration):
- """Remember what MLRsearch considers the current duration.
-
- Setting the same duration is allowed, setting smaller is not allowed.
-
- :param duration: Target trial duration of current phase, in seconds.
- :type duration: float
- :raises ValueError: If the duration is smaller than previous.
- """
- if duration < self.current_duration:
- raise ValueError(
- f"Duration {duration} shorter than current duration"
- f" {self.current_duration}"
- )
- if duration > self.current_duration:
- self.previous_duration = self.current_duration
- self.current_duration = duration
- self.data_for_duration[duration] = PerDurationDatabase(
- duration, list()
- )
- # Else no-op.
-
- def add(self, measurement):
- """Add a measurement. Duration has to match the set one.
-
- :param measurement: Measurement result to add to the database.
- :type measurement: ReceiveRateMeasurement
- """
- duration = measurement.duration
- if duration != self.current_duration:
- raise ValueError(
- f"{measurement!r} duration different than"
- f" {self.current_duration}"
- )
- self.data_for_duration[duration].add(measurement)
-
- def get_bounds(self, ratio):
- """Return 6 bounds: lower/upper, current/previous, tightest/second.
-
- Second tightest bounds are only returned for current duration.
- None instead of a measurement if there is no measurement of that type.
-
- The result cotains bounds in this order:
- 1. Tightest lower bound for current duration.
- 2. Tightest upper bound for current duration.
- 3. Tightest lower bound for previous duration.
- 4. Tightest upper bound for previous duration.
- 5. Second tightest lower bound for current duration.
- 6. Second tightest upper bound for current duration.
-
- :param ratio: Target ratio, valid has to be lower or equal.
- :type ratio: float
- :returns: Measurements acting as various bounds.
- :rtype: 6-tuple of Optional[PerDurationDatabase]
- """
- cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2 = [None] * 6
- duration = self.current_duration
- if duration is not None:
- data = self.data_for_duration[duration]
- cur_lo1, cur_hi1, cur_lo2, cur_hi2 = data.get_valid_bounds(ratio)
- duration = self.previous_duration
- if duration is not None:
- data = self.data_for_duration[duration]
- pre_lo, pre_hi, _, _ = data.get_valid_bounds(ratio)
- return cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2
-
- def get_results(self, ratio_list):
- """Return list of intervals for given ratios, from current duration.
-
- Attempt to construct valid intervals. If a valid bound is missing,
- use smallest/biggest target_tr for lower/upper bound.
- This can result in degenerate intervals.
-
- :param ratio_list: Ratios to create intervals for.
- :type ratio_list: Iterable[float]
- :returns: List of intervals.
- :rtype: List[ReceiveRateInterval]
- """
- ret_list = list()
- current_data = self.data_for_duration[self.current_duration]
- for ratio in ratio_list:
- lower_bound, upper_bound, _, _, _, _ = self.get_bounds(ratio)
- if lower_bound is None:
- lower_bound = current_data.measurements[0]
- if upper_bound is None:
- upper_bound = current_data.measurements[-1]
- ret_list.append(ReceiveRateInterval(lower_bound, upper_bound))
- return ret_list
diff --git a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
deleted file mode 100644
index 0e6c8cfa58..0000000000
--- a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
+++ /dev/null
@@ -1,485 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MultipleLossRatioSearch class."""
-
-import logging
-import math
-import time
-
-from .MeasurementDatabase import MeasurementDatabase
-from .ProgressState import ProgressState
-from .ReceiveRateInterval import ReceiveRateInterval
-from .WidthArithmetics import (
- multiply_relative_width,
- step_down,
- step_up,
- multiple_step_down,
- multiple_step_up,
- half_step_up,
-)
-
-
-class MultipleLossRatioSearch:
- """Optimized binary search algorithm for finding bounds for multiple ratios.
-
- This is unofficially a subclass of AbstractSearchAlgorithm,
- but constructor signature is different.
-
- Traditional binary search algorithm needs initial interval
- (lower and upper bound), and returns final interval after bisecting
- (until some exit condition is met).
- The exit condition is usually related to the interval width,
- (upper bound value minus lower bound value).
-
- The optimized algorithm contains several improvements
- aimed to reduce overall search time.
-
- One improvement is searching for multiple intervals at once.
- The intervals differ by the target loss ratio. Lower bound
- has to have equal or smaller loss ratio, upper bound has to have larger.
-
- Next improvement is that the initial interval does not need to be valid.
- Imagine initial interval (10, 11) where loss at 11 is smaller
- than the searched ratio.
- The algorithm will try (11, 13) interval next, and if 13 is still smaller,
- (13, 17) and so on, doubling width until the upper bound is valid.
- The part when interval expands is called external search,
- the part when interval is bisected is called internal search.
-
- Next improvement is that trial measurements at small trial duration
- can be used to find a reasonable interval for full trial duration search.
- This results in more trials performed, but smaller overall duration
- in general.
-
- Next improvement is bisecting in logarithmic quantities,
- so that exit criteria can be independent of measurement units.
-
- Next improvement is basing the initial interval on receive rates.
-
- Final improvement is exiting early if the minimal value
- is not a valid lower bound.
-
- The complete search consist of several phases,
- each phase performing several trial measurements.
- Initial phase creates initial interval based on receive rates
- at maximum rate and at maximum receive rate (MRR).
- Final phase and preceding intermediate phases are performing
- external and internal search steps,
- each resulting interval is the starting point for the next phase.
- The resulting intervals of final phase is the result of the whole algorithm.
-
- Each non-initial phase uses its own trial duration.
- Any non-initial phase stops searching (for all ratios independently)
- when minimum is not a valid lower bound (at current duration),
- or all of the following is true:
- Both bounds are valid, bounds are measured at the current phase
- trial duration, interval width is less than the width goal
- for current phase.
-
- TODO: Review and update this docstring according to rst docs.
- """
-
- def __init__(
- self, measurer, final_relative_width=0.005,
- final_trial_duration=30.0, initial_trial_duration=1.0,
- number_of_intermediate_phases=2, timeout=600.0, debug=None,
- expansion_coefficient=2.0):
- """Store the measurer object and additional arguments.
-
- :param measurer: Rate provider to use by this search object.
- :param final_relative_width: Final lower bound transmit rate
- cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
- :param initial_trial_duration: Trial duration for the initial phase
- and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
- to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
- :param debug: Callable to use instead of logging.debug().
- :param expansion_coefficient: External search multiplies width by this.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- :type final_relative_width: float
- :type final_trial_duration: float
- :type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
- :type debug: Optional[Callable[[str], None]]
- :type expansion_coefficient: float
- """
- self.measurer = measurer
- self.final_trial_duration = float(final_trial_duration)
- self.final_relative_width = float(final_relative_width)
- self.number_of_intermediate_phases = int(number_of_intermediate_phases)
- self.initial_trial_duration = float(initial_trial_duration)
- self.timeout = float(timeout)
- self.state = None
- self.debug = logging.debug if debug is None else debug
- self.expansion_coefficient = float(expansion_coefficient)
-
- def narrow_down_intervals(self, min_rate, max_rate, packet_loss_ratios):
- """Perform initial phase, create state object, proceed with next phases.
-
- The current implementation requires the ratios so be unique and sorted.
- Also non-empty.
-
- :param min_rate: Minimal target transmit rate [tps].
- :param max_rate: Maximal target transmit rate [tps].
- :param packet_loss_ratios: Target ratios of packets loss to locate.
- :type min_rate: float
- :type max_rate: float
- :type packet_loss_ratios: Iterable[float]
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: List[ReceiveRateInterval]
- :raises RuntimeError: If total duration is larger than timeout.
- Or if ratios list is (empty or) not sorted or unique.
- """
- min_rate = float(min_rate)
- max_rate = float(max_rate)
- packet_loss_ratios = [float(ratio) for ratio in packet_loss_ratios]
- if len(packet_loss_ratios) < 1:
- raise RuntimeError(u"At least one ratio is required!")
- if packet_loss_ratios != sorted(set(packet_loss_ratios)):
- raise RuntimeError(u"Input ratios have to be sorted and unique!")
- measurements = list()
- self.debug(f"First measurement at max rate: {max_rate}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=max_rate,
- )
- measurements.append(measured)
- initial_width_goal = self.final_relative_width
- for _ in range(self.number_of_intermediate_phases):
- initial_width_goal = multiply_relative_width(
- initial_width_goal, 2.0
- )
- max_lo = step_down(max_rate, initial_width_goal)
- mrr = max(min_rate, min(max_lo, measured.relative_receive_rate))
- self.debug(f"Second measurement at mrr: {mrr}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=mrr,
- )
- measurements.append(measured)
- # Attempt to get narrower width.
- if measured.loss_ratio > packet_loss_ratios[0]:
- max_lo = step_down(mrr, initial_width_goal)
- mrr2 = min(max_lo, measured.relative_receive_rate)
- else:
- mrr2 = step_up(mrr, initial_width_goal)
- if min_rate < mrr2 < max_rate:
- self.debug(f"Third measurement at mrr2: {mrr2}")
- measured = self.measurer.measure(
- duration=self.initial_trial_duration,
- transmit_rate=mrr2,
- )
- measurements.append(measured)
- # If mrr2 > mrr and mrr2 got zero loss,
- # it is better to do external search from mrr2 up.
- # To prevent bisection between mrr2 and max_rate,
- # we simply remove the max_rate measurement.
- # Similar logic applies to higher loss ratio goals.
- # Overall, with mrr2 measurement done, we never need
- # the first measurement done at max rate.
- measurements = measurements[1:]
- database = MeasurementDatabase(measurements)
- stop_time = time.monotonic() + self.timeout
- self.state = ProgressState(
- database, self.number_of_intermediate_phases,
- self.final_trial_duration, self.final_relative_width,
- packet_loss_ratios, min_rate, max_rate, stop_time
- )
- self.ndrpdr()
- return self.state.database.get_results(ratio_list=packet_loss_ratios)
-
- def ndrpdr(self):
- """Perform trials for this phase. State is updated in-place.
-
- Recursion to smaller durations is performed (if not performed yet).
-
- :raises RuntimeError: If total duration is larger than timeout.
- """
- state = self.state
- if state.phases > 0:
- # We need to finish preceding intermediate phases first.
- saved_phases = state.phases
- state.phases -= 1
- # Preceding phases have shorter duration.
- saved_duration = state.duration
- duration_multiplier = state.duration / self.initial_trial_duration
- phase_exponent = float(state.phases) / saved_phases
- state.duration = self.initial_trial_duration * math.pow(
- duration_multiplier, phase_exponent
- )
- # Shorter durations do not need that narrow widths.
- saved_width = state.width_goal
- state.width_goal = multiply_relative_width(saved_width, 2.0)
- # Recurse.
- self.ndrpdr()
- # Restore the state for current phase.
- state.width_goal = saved_width
- state.duration = saved_duration
- state.phases = saved_phases # Not needed, but just in case.
- self.debug(
- f"Starting phase with {state.duration} duration"
- f" and {state.width_goal} relative width goal."
- )
- failing_fast = False
- database = state.database
- database.set_current_duration(state.duration)
- while time.monotonic() < state.stop_time:
- for index, ratio in enumerate(state.packet_loss_ratios):
- new_tr = self._select_for_ratio(ratio)
- if new_tr is None:
- # Either this ratio is fine, or min rate got invalid result.
- # If fine, we will continue to handle next ratio.
- if index > 0:
- # First ratio passed, all next have a valid lower bound.
- continue
- lower_bound, _, _, _, _, _ = database.get_bounds(ratio)
- if lower_bound is None:
- failing_fast = True
- self.debug(u"No valid lower bound for this iteration.")
- break
- # First ratio is fine.
- continue
- # We have transmit rate to measure at.
- # We do not check duration versus stop_time here,
- # as some measurers can be unpredictably faster
- # than what duration suggests.
- measurement = self.measurer.measure(
- duration=state.duration,
- transmit_rate=new_tr,
- )
- database.add(measurement)
- # Restart ratio handling on updated database.
- break
- else:
- # No ratio needs measuring, we are done with this phase.
- self.debug(u"Phase done.")
- break
- # We have broken out of the for loop.
- if failing_fast:
- # Abort the while loop early.
- break
- # Not failing fast but database got updated, restart the while loop.
- else:
- # Time is up.
- raise RuntimeError(u"Optimized search takes too long.")
- # Min rate is not valid, but returning what we have
- # so next duration can recover.
-
- @staticmethod
- def improves(new_bound, lower_bound, upper_bound):
- """Return whether new bound improves upon old bounds.
-
- To improve, new_bound has to be not None,
- and between the old bounds (where the bound is not None).
-
- This piece of logic is commonly used, when we know old bounds
- from a primary source (e.g. current duration database)
- and new bound from a secondary source (e.g. previous duration database).
- Having a function allows "if improves(..):" construction to save space.
-
- :param new_bound: The bound we consider applying.
- :param lower_bound: Known bound, new_bound has to be higher to apply.
- :param upper_bound: Known bound, new_bound has to be lower to apply.
- :type new_bound: Optional[ReceiveRateMeasurement]
- :type lower_bound: Optional[ReceiveRateMeasurement]
- :type upper_bound: Optional[ReceiveRateMeasurement]
- :returns: Whether we can apply the new bound.
- :rtype: bool
- """
- if new_bound is None:
- return False
- if lower_bound is not None:
- if new_bound.target_tr <= lower_bound.target_tr:
- return False
- if upper_bound is not None:
- if new_bound.target_tr >= upper_bound.target_tr:
- return False
- return True
-
- def _select_for_ratio(self, ratio):
- """Return None or new target_tr to measure at.
-
- Returning None means either we have narrow enough valid interval
- for this ratio, or we are hitting min rate and should fail early.
-
- :param ratio: Loss ratio to ensure narrow valid bounds for.
- :type ratio: float
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- :raises RuntimeError: If database inconsistency is detected.
- """
- state = self.state
- data = state.database
- bounds = data.get_bounds(ratio)
- cur_lo1, cur_hi1, pre_lo, pre_hi, cur_lo2, cur_hi2 = bounds
- pre_lo_improves = self.improves(pre_lo, cur_lo1, cur_hi1)
- pre_hi_improves = self.improves(pre_hi, cur_lo1, cur_hi1)
- # TODO: Detect also the other case for initial bisect, see below.
- if pre_lo_improves and pre_hi_improves:
- # We allowed larger width for previous phase
- # as single bisect here guarantees only one re-measurement.
- new_tr = self._bisect(pre_lo, pre_hi)
- if new_tr is not None:
- self.debug(f"Initial bisect for {ratio}, tr: {new_tr}")
- return new_tr
- if pre_lo_improves:
- new_tr = pre_lo.target_tr
- self.debug(f"Re-measuring lower bound for {ratio}, tr: {new_tr}")
- return new_tr
- if pre_hi_improves:
- # This can also happen when we did not do initial bisect
- # for this ratio yet, but the previous duration lower bound
- # for this ratio got already re-measured as previous duration
- # upper bound for previous ratio.
- new_tr = pre_hi.target_tr
- self.debug(f"Re-measuring upper bound for {ratio}, tr: {new_tr}")
- return new_tr
- if cur_lo1 is None and cur_hi1 is None:
- raise RuntimeError(u"No results found in databases!")
- if cur_lo1 is None:
- # Upper bound exists (cur_hi1).
- # We already tried previous lower bound.
- # So, we want to extend down.
- new_tr = self._extend_down(
- cur_hi1, cur_hi2, pre_hi, second_needed=False
- )
- self.debug(
- f"Extending down for {ratio}:"
- f" old {cur_hi1.target_tr} new {new_tr}"
- )
- return new_tr
- if cur_hi1 is None:
- # Lower bound exists (cur_lo1).
- # We already tried previous upper bound.
- # So, we want to extend up.
- new_tr = self._extend_up(cur_lo1, cur_lo2, pre_lo)
- self.debug(
- f"Extending up for {ratio}:"
- f" old {cur_lo1.target_tr} new {new_tr}"
- )
- return new_tr
- # Both bounds exist (cur_lo1 and cur_hi1).
- # cur_lo1 might have been selected for this ratio (we are bisecting)
- # or for previous ratio (we are extending down for this ratio).
- # Compute both estimates and choose the higher value.
- bisected_tr = self._bisect(cur_lo1, cur_hi1)
- extended_tr = self._extend_down(
- cur_hi1, cur_hi2, pre_hi, second_needed=True
- )
- # Only if both are not None we need to decide.
- if bisected_tr and extended_tr and extended_tr > bisected_tr:
- self.debug(
- f"Extending down for {ratio}:"
- f" old {cur_hi1.target_tr} new {extended_tr}"
- )
- new_tr = extended_tr
- else:
- self.debug(
- f"Bisecting for {ratio}: lower {cur_lo1.target_tr},"
- f" upper {cur_hi1.target_tr}, new {bisected_tr}"
- )
- new_tr = bisected_tr
- return new_tr
-
- def _extend_down(self, cur_hi1, cur_hi2, pre_hi, second_needed=False):
- """Return extended width below, or None if hitting min rate.
-
- If no second tightest (nor previous) upper bound is available,
- the behavior is governed by second_needed argument.
- If true, return None. If false, start from width goal.
- This is useful, as if a bisect is possible,
- we want to give it a chance.
-
- :param cur_hi1: Tightest upper bound for current duration. Has to exist.
- :param cur_hi2: Second tightest current upper bound, may not exist.
- :param pre_hi: Tightest upper bound, previous duration, may not exist.
- :param second_needed: Whether second tightest bound is required.
- :type cur_hi1: ReceiveRateMeasurement
- :type cur_hi2: Optional[ReceiveRateMeasurement]
- :type pre_hi: Optional[ReceiveRateMeasurement]
- :type second_needed: bool
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- """
- state = self.state
- old_tr = cur_hi1.target_tr
- if state.min_rate >= old_tr:
- self.debug(u"Extend down hits min rate.")
- return None
- next_bound = cur_hi2
- if self.improves(pre_hi, cur_hi1, cur_hi2):
- next_bound = pre_hi
- if next_bound is None and second_needed:
- return None
- old_width = state.width_goal
- if next_bound is not None:
- old_width = ReceiveRateInterval(cur_hi1, next_bound).rel_tr_width
- old_width = max(old_width, state.width_goal)
- new_tr = multiple_step_down(
- old_tr, old_width, self.expansion_coefficient
- )
- new_tr = max(new_tr, state.min_rate)
- return new_tr
-
- def _extend_up(self, cur_lo1, cur_lo2, pre_lo):
- """Return extended width above, or None if hitting max rate.
-
- :param cur_lo1: Tightest lower bound for current duration. Has to exist.
- :param cur_lo2: Second tightest current lower bound, may not exist.
- :param pre_lo: Tightest lower bound, previous duration, may not exist.
- :type cur_lo1: ReceiveRateMeasurement
- :type cur_lo2: Optional[ReceiveRateMeasurement]
- :type pre_lo: Optional[ReceiveRateMeasurement]
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- """
- state = self.state
- old_tr = cur_lo1.target_tr
- if state.max_rate <= old_tr:
- self.debug(u"Extend up hits max rate.")
- return None
- next_bound = cur_lo2
- if self.improves(pre_lo, cur_lo2, cur_lo1):
- next_bound = pre_lo
- old_width = state.width_goal
- if next_bound is not None:
- old_width = ReceiveRateInterval(cur_lo1, next_bound).rel_tr_width
- old_width = max(old_width, state.width_goal)
- new_tr = multiple_step_up(old_tr, old_width, self.expansion_coefficient)
- new_tr = min(new_tr, state.max_rate)
- return new_tr
-
- def _bisect(self, lower_bound, upper_bound):
- """Return middle rate or None if width is narrow enough.
-
- :param lower_bound: Measurement to use as a lower bound. Has to exist.
- :param upper_bound: Measurement to use as an upper bound. Has to exist.
- :type lower_bound: ReceiveRateMeasurement
- :type upper_bound: ReceiveRateMeasurement
- :returns: The next target transmit rate to measure at.
- :rtype: Optional[float]
- :raises RuntimeError: If database inconsistency is detected.
- """
- state = self.state
- width = ReceiveRateInterval(lower_bound, upper_bound).rel_tr_width
- if width <= state.width_goal:
- self.debug(u"No more bisects needed.")
- return None
- new_tr = half_step_up(lower_bound.target_tr, width, state.width_goal)
- return new_tr
diff --git a/resources/libraries/python/MLRsearch/PerDurationDatabase.py b/resources/libraries/python/MLRsearch/PerDurationDatabase.py
deleted file mode 100644
index afdf48614b..0000000000
--- a/resources/libraries/python/MLRsearch/PerDurationDatabase.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining PerDurationDatabase class."""
-
-import copy
-
-
-class PerDurationDatabase:
- """List-like structure holding measurement results for one duration.
-
- This is a building block for MeasurementDatabase.
-
- This class hold measurements for single target duration value only,
- so the logic is quite simple.
-
- Several utility methods are added, accomplishing tasks useful for MLRsearch
- (to be called by MeasurementDatabase).
- """
-
- def __init__(self, duration, measurements):
- """Store (deep copy of) measurement results and normalize them.
-
- The results have to have the corresponding target duration,
- and there should be no duplicate target_tr values.
- Empty iterable (zero measurements) is an acceptable input.
-
- :param duration: All measurements have to have this target duration [s].
- :param measurements: The measurement results to store.
- :type duration: float
- :type measurements: Iterable[ReceiveRateMeasurement]
- :raises ValueError: If duration does not match or if TR duplicity.
- """
- self.duration = duration
- self.measurements = [copy.deepcopy(meas) for meas in measurements]
- self._normalize()
-
- def __repr__(self):
- """Return string executable to get equivalent instance.
-
- :returns: Code to construct equivalent instance.
- :rtype: str
- """
- return (
- u"PerDurationDatabase("
- f"duration={self.duration!r},"
- f"measurements={self.measurements!r})"
- )
-
- def _normalize(self):
- """Sort by target_tr, fail on detecting duplicate target_tr.
-
- Also set effective loss ratios.
-
- :raises ValueError: If duration does not match or if TR duplicity.
- """
- measurements = self.measurements
- measurements.sort(key=lambda measurement: measurement.target_tr)
- # Detect duplicated TRs.
- previous_tr = None
- for measurement in measurements:
- current_tr = measurement.target_tr
- if current_tr == previous_tr:
- raise ValueError(
- u"Transmit rate conflict:"
- f" {measurement!r} {previous_tr!r}"
- )
- previous_tr = current_tr
- # Update effective ratios.
- ratio_previous = None
- for measurement in measurements:
- if ratio_previous is None:
- ratio_previous = measurement.loss_ratio
- measurement.effective_loss_ratio = ratio_previous
- continue
- ratio_previous = max(ratio_previous, measurement.loss_ratio)
- measurement.effective_loss_ratio = ratio_previous
-
- def add(self, measurement):
- """Add measurement and normalize.
-
- :param measurement: Measurement result to add to the database.
- :type measurement: ReceiveRateMeasurement
- """
- # TODO: We should deepcopy either everywhere or nowhere.
- self.measurements.append(measurement)
- self._normalize()
-
- def get_valid_bounds(self, ratio):
- """Return None or a valid measurement for two tightest bounds.
-
- The validity of a measurement to act as a bound is determined
- by comparing the argument ratio with measurement's effective loss ratio.
-
- Both lower and upper bounds are returned, both tightest and second
- tightest. If some value is not available, None is returned instead.
-
- :param ratio: Target ratio, valid has to be lower or equal.
- :type ratio: float
- :returns: Tightest lower bound, tightest upper bound,
- second tightest lower bound, second tightest upper bound.
- :rtype: 4-tuple of Optional[ReceiveRateMeasurement]
- """
- lower_1, upper_1, lower_2, upper_2 = None, None, None, None
- for measurement in self.measurements:
- if measurement.effective_loss_ratio > ratio:
- if upper_1 is None:
- upper_1 = measurement
- continue
- upper_2 = measurement
- break
- lower_1, lower_2 = measurement, lower_1
- return lower_1, upper_1, lower_2, upper_2
diff --git a/resources/libraries/python/MLRsearch/ProgressState.py b/resources/libraries/python/MLRsearch/ProgressState.py
deleted file mode 100644
index 3610638990..0000000000
--- a/resources/libraries/python/MLRsearch/ProgressState.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ProgressState class."""
-
-
-class ProgressState:
- """Structure containing data to be passed around in recursion.
-
- This is basically a private class of MultipleRatioSearch,
- but keeping it in a separate file makes things more readable.
- """
-
- def __init__(
- self, database, phases, duration, width_goal, packet_loss_ratios,
- min_rate, max_rate, stop_time):
- """Convert and store the argument values.
-
- Also initializa the stored width for external search.
-
- :param result: Structure containing measured results.
- :param phases: How many intermediate phases to perform
- before the current one.
- :param duration: Trial duration to use in the current phase [s].
- :param width_goal: The goal relative width for the curreent phase.
- :param packet_loss_ratios: List of ratios for the current search.
- :param min_rate: Minimal target transmit rate available
- for the current search [tps].
- :param max_rate: Maximal target transmit rate available
- for the current search [tps].
- :param stop_time: Monotonic time [s] when we should fail on timeout.
- :type result: MeasurementDatabase
- :type phases: int
- :type duration: float
- :type width_goal: float
- :type packet_loss_ratios: Iterable[float]
- :type min_rate: float
- :type max_rate: float
- :type stop_time: float
- """
- self.database = database
- self.phases = int(phases)
- self.duration = float(duration)
- self.width_goal = float(width_goal)
- self.packet_loss_ratios = [
- float(ratio) for ratio in packet_loss_ratios
- ]
- self.min_rate = float(min_rate)
- self.max_rate = float(max_rate)
- self.stop_time = float(stop_time)
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
deleted file mode 100644
index 993561e396..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateInterval class."""
-
-import math
-
-
-class ReceiveRateInterval:
- """Structure defining two Rr measurements, and their relation."""
-
- def __init__(self, measured_low, measured_high):
- """Store the bound measurements and call sort.
-
- :param measured_low: Measurement for the lower bound.
- :param measured_high: Measurement for the upper bound.
- :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
- self.measured_low = measured_low
- self.measured_high = measured_high
- # Declare secondary quantities to appease pylint.
- self.abs_tr_width = None
- """Absolute width of target transmit rate. Upper minus lower."""
- self.rel_tr_width = None
- """Relative width of target transmit rate. Absolute divided by upper."""
- self.sort()
-
- def sort(self):
- """Sort bounds by target Tr, compute secondary quantities."""
- if self.measured_low.target_tr > self.measured_high.target_tr:
- self.measured_low, self.measured_high = (
- self.measured_high, self.measured_low
- )
- self.abs_tr_width = (
- self.measured_high.target_tr - self.measured_low.target_tr
- )
- self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
-
- def __str__(self):
- """Return string as half-open interval."""
- return f"[{self.measured_low!s};{self.measured_high!s})"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \
- f"measured_high={self.measured_high!r})"
-
- def width_in_goals(self, relative_width_goal):
- """Return float value.
-
- Relative width goal is some (negative) value on logarithmic scale.
- Current relative width is another logarithmic value.
- Return the latter divided by the former.
- This is useful when investigating how did surprising widths come to be.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Current width as logarithmic multiple of goal width [1].
- :rtype: float
- """
- return math.log(1.0 - self.rel_tr_width) / math.log(
- 1.0 - relative_width_goal)
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
deleted file mode 100644
index c52934530e..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateMeasurement class."""
-
-
-class ReceiveRateMeasurement:
- """Structure defining the result of single Rr measurement."""
-
- def __init__(
- self, duration, target_tr, transmit_count, loss_count,
- approximated_duration=0.0, partial_transmit_count=0,
- effective_loss_ratio=None):
- """Constructor, normalize primary and compute secondary quantities.
-
- If approximated_duration is nonzero, it is stored.
- If approximated_duration is zero, duration value is stored.
- Either way, additional secondary quantities are computed
- from the store value.
-
- If there is zero transmit_count, ratios are set to zero.
-
- In some cases, traffic generator does not attempt all the needed
- transactions. In that case, nonzero partial_transmit_count
- holds (an estimate of) count of the actually attempted transactions.
- This is used to populate some secondary quantities.
-
- TODO: Use None instead of zero?
-
- Field effective_loss_ratio is specific for use in MLRsearch,
- where measurements with lower loss ratio at higher target_tr
- cannot be relied upon if there is a measurement with higher loss ratio
- at lower target_tr. In this case, the higher loss ratio from
- other measurement is stored as effective loss ratio in this measurement.
- If None, the computed loss ratio of this measurement is used.
- If not None, the computed ratio can still be apllied if it is larger.
-
- :param duration: Measurement duration [s].
- :param target_tr: Target transmit rate [pps].
- If bidirectional traffic is measured, this is bidirectional rate.
- :param transmit_count: Number of packets transmitted [1].
- :param loss_count: Number of packets transmitted but not received [1].
- :param approximated_duration: Estimate of the actual time of the trial.
- :param partial_transmit_count: Estimate count of actually attempted
- transactions.
- :param effective_loss_ratio: None or highest loss ratio so far.
- :type duration: float
- :type target_tr: float
- :type transmit_count: int
- :type loss_count: int
- :type approximated_duration: float
- :type partial_transmit_count: int
- """
- self.duration = float(duration)
- self.target_tr = float(target_tr)
- self.transmit_count = int(transmit_count)
- self.loss_count = int(loss_count)
- self.receive_count = transmit_count - loss_count
- self.transmit_rate = transmit_count / self.duration
- self.loss_rate = loss_count / self.duration
- self.receive_rate = self.receive_count / self.duration
- self.loss_ratio = (
- float(self.loss_count) / self.transmit_count
- if self.transmit_count > 0 else 1.0
- )
- self.effective_loss_ratio = self.loss_ratio
- if effective_loss_ratio is not None:
- if effective_loss_ratio > self.loss_ratio:
- self.effective_loss_ratio = float(effective_loss_ratio)
- self.receive_ratio = (
- float(self.receive_count) / self.transmit_count
- if self.transmit_count > 0 else 0.0
- )
- self.approximated_duration = (
- float(approximated_duration) if approximated_duration
- else self.duration
- )
- self.approximated_receive_rate = (
- self.receive_count / self.approximated_duration
- if self.approximated_duration > 0.0 else 0.0
- )
- # If the traffic generator is unreliable and sends less packets,
- # the absolute receive rate might be too low for next target.
- self.partial_transmit_count = (
- int(partial_transmit_count) if partial_transmit_count
- else self.transmit_count
- )
- self.partial_receive_ratio = (
- float(self.receive_count) / self.partial_transmit_count
- if self.partial_transmit_count > 0 else 0.0
- )
- self.partial_receive_rate = (
- self.target_tr * self.partial_receive_ratio
- )
- # We use relative packet ratios in order to support cases
- # where target_tr is in transactions per second,
- # but there are multiple packets per transaction.
- self.relative_receive_rate = (
- self.target_tr * self.receive_count / self.transmit_count
- )
-
- def __str__(self):
- """Return string reporting input and loss ratio."""
- return f"d={self.duration!s},Tr={self.target_tr!s}," \
- f"Df={self.loss_ratio!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateMeasurement(duration={self.duration!r}," \
- f"target_tr={self.target_tr!r}," \
- f"transmit_count={self.transmit_count!r}," \
- f"loss_count={self.loss_count!r}," \
- f"approximated_duration={self.approximated_duration!r}," \
- f"partial_transmit_count={self.partial_transmit_count!r}," \
- f"effective_loss_ratio={self.effective_loss_ratio!r})"
diff --git a/resources/libraries/python/MLRsearch/WidthArithmetics.py b/resources/libraries/python/MLRsearch/WidthArithmetics.py
deleted file mode 100644
index 21316c5441..0000000000
--- a/resources/libraries/python/MLRsearch/WidthArithmetics.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining utility functions for manipulating intervals."""
-
-import math
-
-
-ROUNDING_CONSTANT = 0.999999
-
-def multiply_relative_width(relative_width, coefficient):
- """Return relative width corresponding to multiplied logarithmic width.
-
- The multiplication happens in logarithmic space,
- so the resulting relative width is always less than 1.
-
- :param relative_width: The base relative width to multiply.
- :param coefficient: Multiply by this in logarithmic space.
- :type relative_width: float
- :type coefficient: float
- :returns: The relative width of multiplied logarithmic size.
- :rtype: float
- """
- old_log_width = math.log(1.0 - relative_width)
- # Slight decrease to prevent rounding errors from prolonging the search.
- # TODO: Make the nines configurable.
- new_log_width = old_log_width * coefficient * ROUNDING_CONSTANT
- return 1.0 - math.exp(new_log_width)
-
-def halve_relative_width(relative_width, goal_width):
- """Return relative width corresponding to half logarithmic width.
-
- The logic attempts to save some halvings in future by performing
- uneven split. If rounding error risk is detected,
- even split is used.
-
- :param relative_width: The base relative width to halve.
- :param goal_width: Width goal for final phase.
- :type relative_width: float
- :type goal_width: float
- :returns: The relative width of half logarithmic size.
- :rtype: float
- """
- fallback_width = 1.0 - math.sqrt(1.0 - relative_width)
- # Wig means Width In Goals.
- wig = math.log(1.0 - relative_width) / math.log(1.0 - goal_width)
- cwig = 2.0 * math.ceil(wig / 2.0)
- fwig = 2.0 * math.ceil(wig * ROUNDING_CONSTANT / 2.0)
- if wig <= 2.0 or cwig != fwig:
- # Avoid too uneven splits.
- return fallback_width
- coefficient = cwig / 2
- new_width = multiply_relative_width(goal_width, coefficient)
- return new_width
-
-def step_down(current_bound, relative_width):
- """Return rate of logarithmic width below.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to use.
- :type current_bound: float
- :type relative_width: float
- :returns: Transmit rate smaller by relative width [pps].
- :rtype: float
- """
- return current_bound * (1.0 - relative_width)
-
-def step_up(current_bound, relative_width):
- """Return rate of logarithmic width above.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to use.
- :type current_bound: float
- :type relative_width: float
- :returns: Transmit rate larger by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound / (1.0 - relative_width)
-
-def multiple_step_down(current_bound, relative_width, coefficient):
- """Return rate of multiplied logarithmic width below.
-
- The multiplication happens in logarithmic space,
- so the resulting applied relative width is always less than 1.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :param coefficient: Multiply by this in logarithmic space.
- :type relative_width: float
- :type current_bound: float
- :type coefficient: float
- :returns: Transmit rate smaller by logarithmically multiplied width [pps].
- :rtype: float
- """
- new_width = multiply_relative_width(relative_width, coefficient)
- return step_down(current_bound, new_width)
-
-def multiple_step_up(current_bound, relative_width, coefficient):
- """Return rate of double logarithmic width above.
-
- The multiplication happens in logarithmic space,
- so the resulting applied relative width is always less than 1.
-
- :param current_bound: The current target transmit rate to move [pps].
- :param relative_width: The base relative width to double.
- :param coefficient: Multiply by this in logarithmic space.
- :type current_bound: float
- :type relative_width: float
- :type coefficient: float
- :returns: Transmit rate larger by logarithmically multiplied width [pps].
- :rtype: float
- """
- new_width = multiply_relative_width(relative_width, coefficient)
- return step_up(current_bound, new_width)
-
-def half_step_up(current_bound, relative_width, goal_width):
- """Return rate of half logarithmic width above.
-
- :param relative_width: The base relative width to halve.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically half width [pps].
- :rtype: float
- """
- new_width = halve_relative_width(relative_width, goal_width)
- return step_up(current_bound, new_width)
diff --git a/resources/libraries/python/MLRsearch/__init__.py b/resources/libraries/python/MLRsearch/__init__.py
index 35ef812179..349a9a96b3 100644
--- a/resources/libraries/python/MLRsearch/__init__.py
+++ b/resources/libraries/python/MLRsearch/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,3 +14,17 @@
"""
__init__ file for Python package "MLRsearch".
"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+# Import user-facing (API) stuff, so users do not need to know submodules.
+from .config import Config
+from .search_goal import SearchGoal
+from .multiple_loss_ratio_search import MultipleLossRatioSearch
+from .pep3140 import Pep3140Dict
+from .trial_measurement import AbstractMeasurer, MeasurementResult
+from .trimmed_stat import TrimmedStat
diff --git a/resources/libraries/python/MLRsearch/candidate.py b/resources/libraries/python/MLRsearch/candidate.py
new file mode 100644
index 0000000000..16bbe60bae
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/candidate.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Candidate class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from functools import total_ordering
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .selector import Selector
+
+
+@total_ordering
+@dataclass(frozen=True)
+class Candidate:
+ """Class describing next trial inputs, as nominated by a selector.
+
+ As each selector is notified by the controller when its nominated load
+ becomes the winner, a reference to the selector is also included here.
+
+ The rest of the code focuses on defining the ordering between candidates.
+ When two instances are compared, the lesser has higher priority
+ for choosing which trial is actually performed next.
+
+ As Python implicitly converts values to bool in many places
+ (e.g. in "if" statement), any instance is called "truthy" if it converts
+ to True, and "falsy" if it converts to False.
+ To make such places nice and readable, __bool__ method is implemented
+ in a way that a candidate instance is falsy if its load is None.
+ As a falsy candidate never gets measured,
+ other fields of a falsy instance are irrelevant.
+ """
+
+ load: Optional[DiscreteLoad] = None
+ """Measure at this intended load. None if no load nominated by selector."""
+ duration: float = None
+ """Trial duration as chosen by the selector."""
+ width: Optional[DiscreteWidth] = None
+ """Set the global width to this when this candidate becomes the winner."""
+ selector: Selector = None
+ """Reference to the selector instance which nominated this candidate."""
+
+ def __str__(self) -> str:
+ """Convert trial inputs into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"d={self.duration},l={self.load}"
+
+ def __eq__(self, other: Candidate) -> bool:
+ """Return wheter self is identical to the other candidate.
+
+ This is just a pretense for total ordering wrapper to work.
+ In reality, MLRsearch shall never test equivalence,
+ so we save space by just raising RuntimeError if this is ever called.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if the instances are equivalent.
+ :rtype: bool
+ :raises RuntimeError: Always, to prevent unintended usage.
+ """
+ raise RuntimeError("Candidate equality comparison shall not be needed.")
+
+ def __lt__(self, other: Candidate) -> bool:
+ """Return whether self should be measured before other.
+
+ In the decreasing order of importance:
+ Non-None load is preferred.
+ Self is less than other when both loads are None.
+ Lower offered load is preferred.
+ Longer trial duration is preferred.
+ Non-none width is preferred.
+ Larger width is preferred.
+ Self is preferred.
+
+ The logic comes from the desire to save time and being conservative.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if self should be measured sooner.
+ :rtype: bool
+ """
+ if not self.load:
+ if other.load:
+ return False
+ return True
+ if not other.load:
+ return True
+ if self.load < other.load:
+ return True
+ if self.load > other.load:
+ return False
+ if self.duration > other.duration:
+ return True
+ if self.duration < other.duration:
+ return False
+ if not self.width:
+ if other.width:
+ return False
+ return True
+ if not other.width:
+ return True
+ return self.width >= other.width
+
+ def __bool__(self) -> bool:
+ """Does this candidate choose to perform any trial measurement?
+
+ :returns: True if yes, it does choose to perform.
+ :rtype: bool
+ """
+ return bool(self.load)
+
+ @staticmethod
+ def nomination_from(selector: Selector) -> Candidate:
+ """Call nominate on selector, wrap into Candidate instance to return.
+
+ We avoid dependency cycle while letting candidate depend on selector,
+ therefore selector cannot know how to wrap its nomination
+ into a full candidate instance.
+ This factory method finishes the wrapping.
+
+ :param selector: Selector to call.
+ :type selector: Selector
+ :returns: Newly created Candidate instance with nominated trial inputs.
+ :rtype: Candidate
+ """
+ load, duration, width = selector.nominate()
+ return Candidate(
+ load=load,
+ duration=duration,
+ width=width,
+ selector=selector,
+ )
+
+ def won(self) -> None:
+ """Inform selector its candidate became a winner."""
+ self.selector.won(self.load)
diff --git a/resources/libraries/python/MLRsearch/config.py b/resources/libraries/python/MLRsearch/config.py
new file mode 100644
index 0000000000..7aa8ed75a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/config.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Config class."""
+
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Optional
+
+from .dataclass import DataclassProperty
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+
+
+@dataclass
+class Config:
+ """Structure containing several static config items.
+
+ The main MLRsearch algorithm uses multiple customizable values.
+ Pylint complains if the values appear as long argument lists
+ or multiple local variables.
+
+ This class offers a storage for values which do not contain
+ internally mutable state and are set at an unknown time
+ before the search starts. This way users can override only some values,
+ and do it over multiple calls.
+ All "official" user inputs are contained here.
+
+ Properties are defined to enforce the requirements on allowed values.
+ All fields have default values, so instances can be created without any.
+ It is still recommended to set all values after instantiation,
+ as the defaults may change in the next version.
+
+ As some relations between values of different fields are required,
+ users must take care to set them in the correct order.
+
+ For example, min_load has to be set to a value smaller
+ than the current value of max_load.
+ """
+
+ # Externally visible "fields" (but in fact redefined as properties).
+ goals: SearchGoalTuple = SearchGoalTuple((SearchGoal(),))
+ """Container holding search goals."""
+ min_load: float = 9001.0
+ """Each trial measurement must have intended load at least this [tps]."""
+ max_load: float = 1e9
+ """Each trial measurement must have intended load at most this [tps]."""
+ search_duration_max: float = 1200.0
+ """The search will end as a failure this long [s] after it is started."""
+ warmup_duration: float = 1.0
+ """If specified, one trial at max load and this duration is performed
+ before the usual search starts. None converts to zero and means no warmup.
+ The results of that one trial are ignored."""
+
+ @DataclassProperty
+ def goals(self) -> SearchGoalTuple:
+ """Return the reference to the current container of goals.
+
+ :returns: The current container instance.
+ :rtype: SearchGoalTuple
+ """
+ return self._goals
+
+ @goals.setter
+ def goals(self, goals: Iterable[SearchGoal]) -> None:
+ """Create and store the goal container.
+
+ :param goals: Search goals to add to the container to store.
+ :type goals: Iterable[SearchGoal]
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If any of the goals is not a SearchGoal.
+ """
+ self._goals = SearchGoalTuple(goals)
+
+ @DataclassProperty
+ def min_load(self) -> float:
+ """Getter for min load, no logic here.
+
+ :returns: Currently set minimal intended load [tps].
+ :rtype: float
+ """
+ return self._min_load
+
+ @min_load.setter
+ def min_load(self, load: float) -> None:
+ """Set min load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= 0.0:
+ raise ValueError(f"Min load {load} must be positive.")
+ # At the time init is first called, _max_load is not set yet.
+ if hasattr(self, "_max_load") and load >= self.max_load:
+ raise ValueError(f"Min load {load} must be smaller.")
+ self._min_load = load
+
+ @DataclassProperty
+ def max_load(self) -> float:
+ """Getter for max load, no logic here.
+
+ :returns: Currently set maximal intended load [tps].
+ :rtype: float
+ """
+ return self._max_load
+
+ @max_load.setter
+ def max_load(self, load: float) -> None:
+ """Set max load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= self.min_load:
+ raise ValueError(f"Max load {load} must be bigger.")
+ self._max_load = load
+
+ @DataclassProperty
+ def search_duration_max(self) -> float:
+ """Getter for max search duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._search_duration_max
+
+ @search_duration_max.setter
+ def search_duration_max(self, duration: float) -> None:
+ """Set max search duration after converting and checking value.
+
+ :param duration: Search duration maximum [s] to set.
+ :type duration: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ duration = float(duration)
+ if duration <= 0.0:
+ raise ValueError(f"Search duration max too small: {duration}")
+ self._search_duration_max = duration
+
+ @DataclassProperty
+ def warmup_duration(self) -> float:
+ """Getter for warmup duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._warmup_duration
+
+ @warmup_duration.setter
+ def warmup_duration(self, duration: Optional[float]) -> None:
+ """Set warmup duration after converting and checking value.
+
+ Zero duration is treated as None, meaning no warmup trial.
+
+ :param duration: Warmup duration [s] to set.
+ :type duration: Optional(float)
+ :raises ValueError: If the argument is found invalid.
+ """
+ if duration:
+ duration = float(duration)
+ if duration < 0.0:
+ raise ValueError(f"Warmup duration too small: {duration}")
+ else:
+ duration = 0.0
+ self._warmup_duration = duration
diff --git a/resources/libraries/python/MLRsearch/dataclass/__init__.py b/resources/libraries/python/MLRsearch/dataclass/__init__.py
new file mode 100644
index 0000000000..e546b090c9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "dataclass_property".
+"""
+
+from .dc_property import DataclassProperty
+from .field import secondary_field
diff --git a/resources/libraries/python/MLRsearch/dataclass/dc_property.py b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
new file mode 100644
index 0000000000..7f3b49aeb8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DataclassProperty class.
+
+The main issue that needs support is dataclasses with properties
+(including setters) and with (immutable) default values.
+
+First, this explains how property ends up passed as default constructor value:
+https://florimond.dev/en/posts/2018/10/
+/reconciling-dataclasses-and-properties-in-python/
+TL;DR: By the time __init__ is generated, original class variable (type hint)
+is replaced by property (method definition).
+
+Second, there are ways to deal with that:
+https://stackoverflow.com/a/61480946
+TL;DR: It relies on the underscored field being replaced by the value.
+
+But that does not work for field which use default_factory (or no default)
+(the underscored class field is deleted instead).
+So another way is needed to cover those cases,
+ideally without the need to define both original and underscored field.
+
+This implementation relies on a fact that decorators are executed
+when the class fields do yet exist, and decorated function
+does know its name, so the decorator can get the value stored in
+the class field, and store it as an additional attribute of the getter function.
+Then for setter, the property contains the getter (as an unbound function),
+so it can access the additional attribute to get the value.
+
+This approach circumvents the precautions dataclasses take to prevent mishaps
+when a single mutable object is shared between multiple instances.
+So it is up to setters to create an appropriate copy of the default object
+if the default value is mutable.
+
+The default value cannot be MISSING nor Field nor DataclassProperty,
+otherwise the intended logic breaks.
+"""
+
+from __future__ import annotations
+
+from dataclasses import Field, MISSING
+from functools import wraps
+from inspect import stack
+from typing import Callable, Optional, TypeVar, Union
+
+
+Self = TypeVar("Self")
+"""Type for the dataclass instances being created using properties."""
+Value = TypeVar("Value")
+"""Type for the value the property (getter, setter) handles."""
+
+
+def _calling_scope_variable(name: str) -> Value:
+ """Get a variable from a higher scope.
+
+ This feels dirty, but without this the syntactic sugar
+ would not be sweet enough.
+
+ The implementation is copied from https://stackoverflow.com/a/14694234
+ with the difference of raising RuntimeError (instead of returning None)
+ if no variable of that name is found in any of the scopes.
+
+ :param name: Name of the variable to access.
+ :type name: str
+ :returns: The value of the found variable.
+ :rtype: Value
+ :raises RuntimeError: If the variable is not found in any calling scope.
+ """
+ frame = stack()[1][0]
+ while name not in frame.f_locals:
+ frame = frame.f_back
+ if frame is None:
+ raise RuntimeError(f"Field {name} value not found.")
+ return frame.f_locals[name]
+
+
+class DataclassProperty(property):
+ """Subclass of property, handles default values for dataclass fields.
+
+ If a dataclass field does not specify a default value (nor default_factory),
+ this is not needed, and in fact it will not work (so use built-in property).
+
+ This implementation seemlessly finds and inserts the default value
+ (can be mutable) into a new attribute of the getter function.
+ Before calling a setter function in init (recognized by type),
+ the default value is retrieved and passed transparently to the setter.
+ It is the responsibilty of the setter to appropriately clone the value,
+ in order to prevent multiple instances sharing the same mutable value.
+ """
+
+ def __init__(
+ self,
+ fget: Optional[Callable[[], Value]] = None,
+ fset: Optional[Callable[[Self, Value], None]] = None,
+ fdel: Optional[Callable[[], None]] = None,
+ doc: Optional[str] = None,
+ ):
+ """Find and store the default value, construct the property.
+
+ See this for how the superclass property works:
+ https://docs.python.org/3/howto/descriptor.html#properties
+
+ :param fget: Getter (unbound) function to use, if any.
+ :param fset: Setter (unbound) function to use, if any.
+ :param fdel: Deleter (unbound) function to use, if any.
+ :param doc: Docstring to display when examining the property.
+ :type fget: Optional[Callable[[Self], Value]]
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :type fdel: Optional[Callable[[Self], None]]
+ :type doc: Optional[str]
+ """
+ variable_found = _calling_scope_variable(fget.__name__)
+ if not isinstance(variable_found, DataclassProperty):
+ if isinstance(variable_found, Field):
+ if variable_found.default is not MISSING:
+ fget.default_value = variable_found.default
+ # Else do not store any default value.
+ else:
+ fget.default_value = variable_found
+ # Else this is the second time init is called (when setting setter),
+ # in which case the default is already stored into fget.
+ super().__init__(fget=fget, fset=fset, fdel=fdel, doc=doc)
+
+ def setter(
+ self,
+ fset: Optional[Callable[[Self, Value], None]],
+ ) -> DataclassProperty:
+ """Return new instance with a wrapped setter function set.
+
+ If the argument is None, call superclass method.
+
+ The wrapped function recognizes when it is called in init
+ (by the fact the value argument is of type DataclassProperty)
+ and in that case it extracts the stored default and passes that
+ to the user-defined setter function.
+
+ :param fset: Setter function to wrap and apply.
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :returns: New property instance with correct setter function set.
+ :rtype: DataclassProperty
+ """
+ if fset is None:
+ return super().setter(fset)
+
+ @wraps(fset)
+ def wrapped(sel_: Self, val: Union[Value, DataclassProperty]) -> None:
+ """Extract default from getter if needed, call the user setter.
+
+ The sel_ parameter is listed explicitly, to signify
+ this is an unbound function, not a bounded method yet.
+
+ :param sel_: Instance of dataclass (not of DataclassProperty)
+ to set the value on.
+ :param val: Set this value, or the default value stored there.
+ :type sel_: Self
+ :type val: Union[Value, DataclassProperty]
+ """
+ if isinstance(val, DataclassProperty):
+ val = val.fget.default_value
+ fset(sel_, val)
+
+ return super().setter(wrapped)
diff --git a/resources/libraries/python/MLRsearch/dataclass/field.py b/resources/libraries/python/MLRsearch/dataclass/field.py
new file mode 100644
index 0000000000..55d9d0879f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/field.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining secondary_field function.
+
+Just a shrothand for frequently repeated expression.
+
+The main point is that this dataclass field is not used in init.
+Maybe it is a derived value of a frozen dataclass.
+Maybe it is a cache to help avoiding repeated computation.
+Maybe it is a temporary value stored in one method and read in another method.
+In any case, the caller does not need to know it is here,
+so it is excluded from repr, hashing, ordering and similar.
+"""
+
+from dataclasses import Field, field
+
+
+def secondary_field() -> Field:
+ """Return newly created Field with non-default arguments
+
+ In practice, it seems to be fine to reuse the resulting Field instance
+ when defining multiple dataclass fields,
+ but we keep this as a function to improve readability.
+
+ :returns: A new Field instance useful for secondary fields.
+ :rtype: Field
+ """
+ return field(
+ default=None,
+ init=False,
+ repr=False,
+ compare=False,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_interval.py b/resources/libraries/python/MLRsearch/discrete_interval.py
new file mode 100644
index 0000000000..0a3bf443a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_interval.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteInterval class."""
+
+from dataclasses import dataclass
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+# TODO: Can this be frozen?
+@dataclass
+class DiscreteInterval:
+ """Interval class with more computations available.
+
+ Along discrete form of width,
+ a MLR specific way for halving the interval is also included.
+
+ The two primary field values do not have to be valid relevant bounds,
+ but at the end of the search, they usually are.
+
+ The load values must be round.
+ """
+
+ lower_bound: DiscreteLoad
+ """Value for the lower intended load (or load stats or similar)."""
+ upper_bound: DiscreteLoad
+ """Value for the higher intended load (or load stats or similar)."""
+ # Primary fields above, derived below.
+ discrete_width: DiscreteWidth = secondary_field()
+ """Discrete width between intended loads (upper_bound minus lower_bound)."""
+
+ def __post_init__(self) -> None:
+ """Sort bounds by intended load, compute secondary quantities.
+
+ :raises RuntimeError: If a result used non-rounded load.
+ """
+ if not self.lower_bound.is_round:
+ raise RuntimeError(f"Non-round lower bound: {self.lower_bound!r}")
+ if not self.upper_bound.is_round:
+ raise RuntimeError(f"Non-round upper bound: {self.upper_bound!r}")
+ if self.lower_bound > self.upper_bound:
+ tmp = self.lower_bound
+ self.lower_bound = self.upper_bound
+ self.upper_bound = tmp
+ self.discrete_width = self.upper_bound - self.lower_bound
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lower_bound=({self.lower_bound}),upper_bound=({self.upper_bound})"
+ )
+
+ # TODO: Use "target" instad of "goal" in argument and variable names.
+
+ def width_in_goals(self, goal: DiscreteWidth) -> float:
+ """Return relative width as a multiple of the given goal (int form).
+
+ Integer forms are used for computation, safe as loads are rounded.
+ The result is a float, as self int may not be divisible by goal int.
+
+ :param goal: A relative width amount to be used as a unit.
+ :type goal: DiscreteWidth
+ :returns: Self width in multiples of (integer form of) goal width.
+ :rtype: float
+ """
+ return int(self.discrete_width) / int(goal)
+
+ def middle(self, goal: DiscreteWidth) -> DiscreteLoad:
+ """Return new intended load (discrete form) in the middle.
+
+ All calculations are based on int forms.
+
+ One of the halfs is rounded to a power-of-two multiple of the goal.
+ The power that leads to most even split is used.
+ Lower width is the smaller one (if not exactly even).
+
+ This approach prefers lower loads (to remain conservative) and can save
+ some measurements (when all middle measurements have high loss).
+ Note that when competing with external search from above,
+ that search is already likely to produce widths that are
+ power-of-two multiples of the target width.
+
+ If the interval width is one goal (or less), RuntimeError is raised.
+ If the interval width is between one and two goals (not including),
+ a more even split is attempted (using half the goal value).
+
+ :param goal: Target width goal to use for uneven halving.
+ :type goal: DiscreteWidth
+ :returns: New load to use for bisecting.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ int_self, int_goal = int(self.discrete_width), int(goal)
+ if int_self <= int_goal:
+ raise RuntimeError(f"Do not halve small enough interval: {self!r}")
+ if int_self == 2 * int_goal:
+ # Even split, return here simplifies the while loop below.
+ return self.lower_bound + goal
+ if int_self < 2 * int_goal:
+ # This can only happen when int_goal >= 2.
+ # In this case, we do not have good enough split at this width goal,
+ # but maybe this is not the final target, so we can attempt
+ # a split at half width goal.
+ if not int_goal % 2:
+ return self.middle(goal=goal.half_rounded_down())
+ # Odd int_goal, so this must by the last phase. Do even split.
+ lo_width = self.discrete_width.half_rounded_down()
+ return self.lower_bound + lo_width
+ hi_width = goal
+ lo_width = self.discrete_width - hi_width
+ # We know lo_width > hi_width because we did not do the even split.
+ while 1:
+ hi2_width = hi_width * 2
+ lo2_width = self.discrete_width - hi2_width
+ if lo2_width <= hi2_width:
+ break
+ hi_width, lo_width = hi2_width, lo2_width
+ # Which of the two options is more even? Product decides.
+ if int(hi_width) * int(lo_width) > int(hi2_width) * int(lo2_width):
+ # Previous attempt was more even, but hi_width was the smaller one.
+ lo2_width = hi_width
+ # Else lo2_width is more even and no larger than hi2_width.
+ return self.lower_bound + lo2_width
diff --git a/resources/libraries/python/MLRsearch/discrete_load.py b/resources/libraries/python/MLRsearch/discrete_load.py
new file mode 100644
index 0000000000..a75b4acf96
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_load.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteLoad class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from functools import total_ordering
+from typing import Callable, Optional, Union
+
+from .load_rounding import LoadRounding
+from .discrete_width import DiscreteWidth
+
+
+@total_ordering
+@dataclass
+class DiscreteLoad:
+ """Structure to store load value together with its rounded integer form.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+ Conversion methods and factories are added for convenience.
+
+ In general, the float form is allowed to differ from conversion from int.
+
+ Comparisons are supported, acting on the float load component.
+ Additive operations are supported, acting on int form.
+ Multiplication by a float constant is supported, acting on float form.
+
+ As for all user defined classes by default, all instances are truthy.
+ That is useful when dealing with Optional values, as None is falsy.
+
+ This dataclass is effectively frozen, but cannot be marked as such
+ as that would prevent LoadStats from being its subclass.
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_load: float = None
+ """Float form of intended load [tps], usable for measurer."""
+ int_load: int = field(compare=False, default=None)
+ """Integer form, usable for exact computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float load to differ from
+ conversion from int load. MLRsearch should round explicitly later,
+ based on its additional information.
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_load is None and self.int_load is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_load is None:
+ self.int_load = int(self.int_load)
+ self.float_load = self.rounding.int2float(self.int_load)
+ else:
+ self.float_load = float(self.float_load)
+ self.int_load = self.rounding.float2int(self.float_load)
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_load={int(self)}"
+
+ # Explicit comparison operators.
+ # Those generated with dataclass order=True do not allow subclass instances.
+
+ def __eq__(self, other: Optional[DiscreteLoad]) -> bool:
+ """Return whether the other instance has the same float form.
+
+ None is effectively considered to be an unequal instance.
+
+ :param other: Other instance to compare to, or None.
+ :type other: Optional[DiscreteLoad]
+ :returns: True only if float forms are exactly equal.
+ :rtype: bool
+ """
+ if other is None:
+ return False
+ return float(self) == float(other)
+
+ def __lt__(self, other: DiscreteLoad) -> bool:
+ """Return whether self has smaller float form than the other instance.
+
+ None is not supported, as MLRsearch does not need that
+ (so when None appears we want to raise).
+
+ :param other: Other instance to compare to.
+ :type other: DiscreteLoad
+ :returns: True only if float forms of self is strictly smaller.
+ :rtype: bool
+ """
+ return float(self) < float(other)
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ @property
+ def is_round(self) -> bool:
+ """Return whether float load matches converted int load.
+
+ :returns: False if float load is not rounded.
+ :rtype: bool
+ """
+ expected = self.rounding.int2float(self.int_load)
+ return expected == self.float_load
+
+ def __int__(self) -> int:
+ """Return the int value.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_load
+
+ def __float__(self) -> float:
+ """Return the float value.
+
+ :returns: The float field value [tps].
+ :rtype: float
+ """
+ return self.float_load
+
+ @staticmethod
+ def int_conver(rounding: LoadRounding) -> Callable[[int], DiscreteLoad]:
+ """Return a factory that turns an int load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from int.
+ :rtype: Callable[[int], DiscreteLoad]
+ """
+
+ def factory_int(int_load: int) -> DiscreteLoad:
+ """Use rounding and int load to create discrete load.
+
+ :param int_load: Intended load in integer form.
+ :type int_load: int
+ :returns: New discrete load instance matching the int load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, int_load=int_load)
+
+ return factory_int
+
+ @staticmethod
+ def float_conver(rounding: LoadRounding) -> Callable[[float], DiscreteLoad]:
+ """Return a factory that turns a float load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from float.
+ :rtype: Callable[[float], DiscreteLoad]
+ """
+
+ def factory_float(float_load: float) -> DiscreteLoad:
+ """Use rounding instance and float load to create discrete load.
+
+ The float form is not rounded yet.
+
+ :param int_load: Intended load in float form [tps].
+ :type int_load: float
+ :returns: New discrete load instance matching the float load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, float_load=float_load)
+
+ return factory_float
+
+ def rounded_down(self) -> DiscreteLoad:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, int_load=int(self))
+
+ def hashable(self) -> DiscreteLoad:
+ """Return new equivalent instance.
+
+ This is mainly useful for conversion from unhashable subclasses,
+ such as LoadStats.
+ Rounding instance (reference) is copied from self.
+
+ :returns: New instance with values based on float form of self.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, float_load=float(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance with width added to int load.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding two loads by mistake
+ (or adding int to load and similar).
+
+ :param width: Value to add to int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load + int(width),
+ )
+
+ def __sub__(
+ self, other: Union[DiscreteWidth, DiscreteLoad]
+ ) -> Union[DiscreteLoad, DiscreteWidth]:
+ """Return result based on the argument type.
+
+ Load minus load is width, load minus width is load.
+ This allows the same operator to support both operations.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: Union[DiscreteWidth, DiscreteLoad]
+ :returns: Resulting width or load.
+ :rtype: Union[DiscreteLoad, DiscreteWidth]
+ :raises RuntimeError: If the argument type is not supported.
+ """
+ if isinstance(other, DiscreteWidth):
+ return self._minus_width(other)
+ if isinstance(other, DiscreteLoad):
+ return self._minus_load(other)
+ raise RuntimeError(f"Unsupported type {other!r}")
+
+ def _minus_width(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance, width subtracted from int load.
+
+ Rounding instance (reference) is copied from self.
+
+ :param width: Value to subtract from int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load - int(width),
+ )
+
+ def _minus_load(self, other: DiscreteLoad) -> DiscreteWidth:
+ """Return newly constructed width instance, difference of int loads.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: DiscreteLoad
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_load - int(other),
+ )
+
+ def __mul__(self, coefficient: float) -> DiscreteLoad:
+ """Return newly constructed instance, float load multiplied by argument.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Value to multiply float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ if not isinstance(coefficient, float):
+ raise RuntimeError(f"Not float: {coefficient!r}")
+ if coefficient <= 0.0:
+ raise RuntimeError(f"Not positive: {coefficient!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ float_load=self.float_load * coefficient,
+ )
+
+ def __truediv__(self, coefficient: float) -> DiscreteLoad:
+ """Call multiplication with inverse argument.
+
+ :param coefficient: Value to divide float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ return self * (1.0 / coefficient)
diff --git a/resources/libraries/python/MLRsearch/discrete_result.py b/resources/libraries/python/MLRsearch/discrete_result.py
new file mode 100644
index 0000000000..882b6081c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_result.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .trial_measurement import MeasurementResult
+
+
+@dataclass
+class DiscreteResult(MeasurementResult):
+ """A measurement result where intended load is also given as discrete load.
+
+ The discrete load has to be round and has to match the intended load.
+ """
+
+ # Must have default as superclass has fields with default values.
+ discrete_load: DiscreteLoad = None
+ """Intended load [tps]; discrete, round and equal to intended load."""
+
+ def __post_init__(self) -> None:
+ """Call super, verify intended and discrete loads are the same.
+
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ super().__post_init__()
+ if not isinstance(self.discrete_load, DiscreteLoad):
+ raise TypeError(f"Not a discrete load: {self.discrete_load!r}")
+ if not self.discrete_load.is_round:
+ raise ValueError(f"Discrete load not round: {self.discrete_load!r}")
+ if float(self.discrete_load) != self.intended_load:
+ raise ValueError(f"Load mismatch: {self!r}")
+
+ @staticmethod
+ def with_load(
+ result: MeasurementResult, load: DiscreteLoad
+ ) -> DiscreteResult:
+ """Return result with added load.
+
+ :param result: A result, possibly without discrete load.
+ :param load: Discrete load to add.
+ :type result: MeasurementResult
+ :type load: DiscreteLoad
+ :returns: Equivalent result with matching discrete load.
+ :rtype: DiscreteResult
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ return DiscreteResult(
+ intended_duration=result.intended_duration,
+ intended_load=result.intended_load,
+ offered_count=result.offered_count,
+ loss_count=result.loss_count,
+ forwarding_count=result.forwarding_count,
+ offered_duration=result.offered_duration,
+ duration_with_overheads=result.duration_with_overheads,
+ intended_count=result.intended_count,
+ discrete_load=load,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_width.py b/resources/libraries/python/MLRsearch/discrete_width.py
new file mode 100644
index 0000000000..8a4845a83f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_width.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteWidth class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+
+from .load_rounding import LoadRounding
+
+
+# TODO: Make properly frozen.
+@dataclass(order=True)
+class DiscreteWidth:
+ """Structure to store float width together with its rounded integer form.
+
+ The width does not have to be positive, i.e. the computed integer width
+ does not have to be larger than zero.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+
+ Conversion and arithmetic methods are added for convenience.
+ Division and non-integer multiplication are intentionally not supported,
+ as MLRsearch should not seek unround widths when round ones are available.
+
+ The instance is effectively immutable, but not hashable as it refers
+ to the rounding instance, which is implemented as mutable
+ (although the mutations are not visible).
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_width: float = None
+ """Relative width of float intended load.
+ This is treated as a constructor argument, and does not need to match
+ the int width. Int width is computed to be no wider than this."""
+ int_width: int = field(compare=False, default=None)
+ """Integer form, difference of integer loads.
+ This is the primary quantity used by most computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float width to be slightly larger
+ than the implied int width.
+
+ If both forms are specified, the float form is taken as primary
+ (thus the integer form is recomputed to match).
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_width is None and self.int_width is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_width is None:
+ self.int_width = int(self.int_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = self.rounding.int2float(self.int_width)
+ self.float_width = (increased_load - min_load) / increased_load
+ return
+ self.float_width = float(self.float_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = min_load / (1.0 - self.float_width)
+ int_load = self.rounding.float2int(increased_load)
+ verify_load = self.rounding.int2float(int_load)
+ if verify_load > increased_load:
+ int_load -= 1
+ self.int_width = int_load
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_width={int(self)}"
+
+ def __int__(self) -> int:
+ """Return the integer form.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_width
+
+ def __float__(self) -> float:
+ """Return the float form.
+
+ :returns: The float field value.
+ :rtype: float
+ """
+ return self.float_width
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ def rounded_down(self) -> DiscreteWidth:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(rounding=self.rounding, int_width=int(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths added.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+
+ :param width: Value to add to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width + int(width),
+ )
+
+ def __sub__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths subtracted.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+ Non-positive results are disallowed by constructor.
+
+ :param width: Value to subtract to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {type(width)}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width - int(width),
+ )
+
+ def __mul__(self, coefficient: int) -> DiscreteWidth:
+ """Construct new instance with int value multiplied.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Constant to multiply int width with.
+ :type coefficient: int
+ :returns: New instance with multiplied int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If argument value does not meet requirements.
+ """
+ if not isinstance(coefficient, int):
+ raise RuntimeError(f"Coefficient not int: {coefficient!r}")
+ if coefficient < 1:
+ raise RuntimeError(f"Coefficient not positive: {coefficient!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width * coefficient,
+ )
+
+ def half_rounded_down(self) -> DiscreteWidth:
+ """Contruct new instance of half the integer width.
+
+ If the current integer width is odd, round the half width down.
+
+ :returns: New instance with half int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If the resulting integerl width is not positive.
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width // 2,
+ )
diff --git a/resources/libraries/python/MLRsearch/expander.py b/resources/libraries/python/MLRsearch/expander.py
new file mode 100644
index 0000000000..0e6800477e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/expander.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetExpander class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetedExpander:
+ """Utility class to track expanding width during external search.
+
+ One instance per selector but takes into consideration global current width.
+
+ Generally, many strategies may limit next_width immediately,
+ but next_width expands only after measurement
+ when external search fails to find its bound (global width is also bumped).
+ See strategy classes for specific details on external and internal search.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if initial upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if initial lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ next_width: DiscreteWidth = secondary_field()
+ """This will be used in next search step if no strategy intervenes."""
+
+ def __post_init__(self) -> None:
+ """Prepare next width."""
+ self.next_width = self.target.discrete_width
+ if self.initial_lower_load and self.initial_upper_load:
+ interval_width = self.initial_upper_load - self.initial_lower_load
+ self.next_width = max(self.next_width, interval_width)
+ self.expand(bump_global=False)
+
+ def expand(self, bump_global: bool = True) -> None:
+ """Multiply next width by expansion coefficient.
+
+ The global current width should be bumped when external search
+ is done but load is not the bound we were looking for.
+
+ For global width shrinking, set the field directly.
+
+ :param bump_global: False if called from limit or post init.
+ :type bump_global: bool
+ """
+ self.next_width *= self.target.expansion_coefficient
+ if bump_global:
+ self.global_width.width = self.next_width
+
+ def get_width(self) -> DiscreteWidth:
+ """Return next width corrected by global current width.
+
+ :returns: The width to use, see GlobalWidth.
+ :rtype: DiscreteWidth
+ """
+ return self.global_width.or_larger(self.next_width)
+
+ def limit(self, last_width: DiscreteWidth) -> None:
+ """Decrease the prepared next width.
+
+ This is called by other strategies when bounds are getting narrower.
+
+ Global current width is not updated yet,
+ as the other strategy may not end up becoming the winner
+ and we want to avoid interfering with other selector strategies.
+
+ :param last_width: As applied by other strategy, smaller of two halves.
+ :type last_width: DiscreteWidth
+ """
+ self.next_width = max(last_width, self.target.discrete_width)
+ self.expand(bump_global=False)
diff --git a/resources/libraries/python/MLRsearch/global_width.py b/resources/libraries/python/MLRsearch/global_width.py
new file mode 100644
index 0000000000..6f7df8b894
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/global_width.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GlobalWidth class."""
+
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+@dataclass
+class GlobalWidth:
+ """Primarily used to synchronize external search steps across selectors.
+
+ The full name is global current width, but that is too long for identifiers.
+
+ While each selector tracks its "local" (per goal) width using expander,
+ it is important we do not interleave upper external search for two goals.
+ That is why all selector instances refer to a singleton instance of this.
+
+ In general, this value remains constant when main loop iterates over
+ selectors and when selector iterates over strategies.
+ After winner is measured, this width is set to winner width value
+ and for some strategies that width is expanded when external search says so.
+
+ The two methods are not really worth creating a new class,
+ but the main reason is having a name for type hints
+ that distinguish this from various other "width" and "current" values.
+ """
+
+ width: DiscreteWidth
+ """Minimum width to apply at next external search step."""
+ # TODO: Add a setter, so it is easier to add debug logging.
+
+ @staticmethod
+ def from_loads(load0: DiscreteLoad, load1: DiscreteLoad) -> GlobalWidth:
+ """Initialize the value based on two loads from initial trials.
+
+ :param load0: Lower (or equal) load from the two most recent trials.
+ :param load1: Higher (or equal) load from the two most recent trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :returns: Newly created instance with computed width.
+ :rtype: GlobalWidth
+ """
+ return GlobalWidth(load1 - load0)
+
+ def or_larger(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return width from argument or self, whichever is larger.
+
+ :param width: A selector (strategy) asks if this width is large enough.
+ :type width: DiscreteWidth
+ :returns: Argument or current width.
+ :rtype: DiscreteWidth
+ """
+ return width if width > self.width else self.width
diff --git a/resources/libraries/python/MLRsearch/limit_handler.py b/resources/libraries/python/MLRsearch/limit_handler.py
new file mode 100644
index 0000000000..5919f398f3
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/limit_handler.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LimitHandler class."""
+
+from dataclasses import dataclass
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_interval import DiscreteInterval
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+
+
+@dataclass
+class LimitHandler:
+ """Encapsulated methods for logic around handling limits.
+
+ In multiple places within MLRsearch code, an intended load value
+ is only useful if it is far enough from possible known values.
+ All such places can be served with the handle method
+ with appropriate arguments.
+ """
+
+ rounding: LoadRounding
+ """Rounding instance to use."""
+ debug: Callable[[str], None]
+ """Injectable logging function."""
+ # The two fields below are derived, extracted from rounding as a shortcut.
+ min_load: DiscreteLoad = secondary_field()
+ """Minimal load, as prescribed by Config."""
+ max_load: DiscreteLoad = secondary_field()
+ """Maximal load, as prescribed by Config."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived quantities."""
+ from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.min_load = from_float(self.rounding.min_load)
+ self.max_load = from_float(self.rounding.max_load)
+
+ def handle(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ clo: Optional[DiscreteLoad],
+ chi: Optional[DiscreteLoad],
+ ) -> Optional[DiscreteLoad]:
+ """Return new intended load after considering limits and bounds.
+
+ Not only we want to avoid measuring outside minmax interval,
+ we also want to avoid measuring too close to known limits and bounds.
+ We either round or return None, depending on hints from bound loads.
+
+ When rounding away from hard limits, we may end up being
+ too close to an already measured bound.
+ In this case, pick a midpoint between the bound and the limit.
+
+ The last two arguments are just loads (not full measurement results)
+ to allow callers to exclude some load without measuring them.
+ As a convenience, full results are also supported,
+ so that callers do not need to care about None when extracting load.
+
+ :param load: Intended load candidate, initial or from a load selector.
+ :param width: Relative width goal, considered narrow enough for now.
+ :param clo: Intended load of current relevant lower bound.
+ :param chi: Intended load of current relevant upper bound.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type clo: Optional[DiscreteLoad]
+ :type chi: Optional[DiscreteLoad]
+ :return: Adjusted load to measure at, or None if narrow enough already.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If unsupported corner case is detected.
+ """
+ if not load:
+ raise RuntimeError("Got None load to handle.")
+ load = load.rounded_down()
+ min_load, max_load = self.min_load, self.max_load
+ if clo and not clo.is_round:
+ raise RuntimeError(f"Clo {clo} should have been round.")
+ if chi and not chi.is_round:
+ raise RuntimeError(f"Chi {chi} should have been round.")
+ if not clo and not chi:
+ load = self._handle_load_with_excludes(
+ load, width, min_load, max_load, min_ex=False, max_ex=False
+ )
+ # The "return load" lines are separate from load computation,
+ # so that logging can be added more easily when debugging.
+ return load
+ if chi and not clo:
+ if chi <= min_load:
+ # Expected when hitting the min load.
+ return None
+ if load >= chi:
+ # This can happen when mrr2 forward rate is rounded to mrr2.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, min_load, chi, min_ex=False, max_ex=True
+ )
+ return load
+ if clo and not chi:
+ if clo >= max_load:
+ raise RuntimeError("Lower load expected.")
+ if load <= clo:
+ raise RuntimeError("Higher load expected.")
+ load = self._handle_load_with_excludes(
+ load, width, clo, max_load, min_ex=True, max_ex=False
+ )
+ return load
+ # We have both clo and chi defined.
+ if not clo < load < chi:
+ # Happens when bisect compares with bounded extend.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, clo, chi, min_ex=True, max_ex=True
+ )
+ return load
+
+ def _handle_load_with_excludes(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ minimum: DiscreteLoad,
+ maximum: DiscreteLoad,
+ min_ex: bool,
+ max_ex: bool,
+ ) -> Optional[DiscreteLoad]:
+ """Adjust load if too close to limits, respecting exclusions.
+
+ This is a reusable block.
+ Limits may come from previous bounds or from hard load limits.
+ When coming from bounds, rounding to that is not allowed.
+ When coming from hard limits, rounding to the limit value
+ is allowed in general (given by the setting the _ex flag).
+
+ :param load: The candidate intended load before accounting for limits.
+ :param width: Relative width of area around the limits to avoid.
+ :param minimum: The lower limit to round around.
+ :param maximum: The upper limit to round around.
+ :param min_ex: If false, rounding to the minimum is allowed.
+ :param max_ex: If false, rounding to the maximum is allowed.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type minimum: DiscreteLoad
+ :type maximum: DiscreteLoad
+ :type min_ex: bool
+ :type max_ex: bool
+ :returns: Adjusted load value, or None if narrow enough.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if not minimum <= load <= maximum:
+ raise RuntimeError(
+ "Internal error: load outside limits:"
+ f" load {load} min {minimum} max {maximum}"
+ )
+ max_width = maximum - minimum
+ if width >= max_width:
+ self.debug("Warning: Handling called with wide width.")
+ if not min_ex:
+ self.debug("Minimum not excluded, rounding to it.")
+ return minimum
+ if not max_ex:
+ self.debug("Maximum not excluded, rounding to it.")
+ return maximum
+ self.debug("Both limits excluded, narrow enough.")
+ return None
+ soft_min = minimum + width
+ soft_max = maximum - width
+ if soft_min > soft_max:
+ self.debug("Whole interval is less than two goals.")
+ middle = DiscreteInterval(minimum, maximum).middle(width)
+ soft_min = soft_max = middle
+ if load < soft_min:
+ if min_ex:
+ self.debug("Min excluded, rounding to soft min.")
+ return soft_min
+ self.debug("Min not excluded, rounding to minimum.")
+ return minimum
+ if load > soft_max:
+ if max_ex:
+ self.debug("Max excluded, rounding to soft max.")
+ return soft_max
+ self.debug("Max not excluded, rounding to maximum.")
+ return maximum
+ # Far enough from all limits, no additional adjustment is needed.
+ return load
diff --git a/resources/libraries/python/MLRsearch/load_rounding.py b/resources/libraries/python/MLRsearch/load_rounding.py
new file mode 100644
index 0000000000..0ac4487be9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_rounding.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadRounding class."""
+
+import math
+
+from dataclasses import dataclass
+from typing import List, Tuple
+
+from .dataclass import secondary_field
+
+
+@dataclass
+class LoadRounding:
+ """Class encapsulating stateful utilities that round intended load values.
+
+ For MLRsearch algorithm logic to be correct, it is important that
+ interval width expansion and narrowing are exactly reversible,
+ which is not true in general for floating point number arithmetics.
+
+ This class offers conversion to and from an integer quantity.
+ Operations in the integer realm are guaranteed to be reversible,
+ so the only risk is when converting between float and integer realm.
+
+ Which relative width corresponds to the unit integer
+ is computed in initialization from width goals,
+ striking a balance between memory requirements and precision.
+
+ There are two quality knobs. One restricts how far
+ can an integer be from the exact float value.
+ The other restrict how close it can be. That is to make sure
+ even with unpredictable rounding errors during the conversion,
+ the converted integer value is never bigger than the intended float value,
+ to ensure the intervals returned from MLRsearch will always
+ meet the relative width goal.
+
+ An instance of this class is mutable only in the sense it contains
+ a growing cache of previously computed values.
+ """
+
+ # TODO: Hide the cache and present as frozen hashable object.
+
+ min_load: float
+ """Minimal intended load [tps] to support, must be positive."""
+ max_load: float
+ """Maximal intended load [tps] to support, must be bigger than min load."""
+ float_goals: Tuple[float]
+ """Relative width goals to approximate, each must be positive
+ and smaller than one. Deduplicated and sorted in post init."""
+ quality_lower: float = 0.99
+ """Minimal multiple of each goal to be achievable."""
+ quality_upper: float = 0.999999
+ """Maximal multiple of each goal to be achievable."""
+ # Primary fields above, computed fields below.
+ max_int_load: int = secondary_field()
+ """Integer for max load (min load int is zero)."""
+ _int2load: List[Tuple[int, float]] = secondary_field()
+ """Known int values (sorted) and their float equivalents."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, perform checks, initialize conversion structures.
+
+ :raises RuntimeError: If a requirement is not met.
+ """
+ self.min_load = float(self.min_load)
+ self.max_load = float(self.max_load)
+ if not 0.0 < self.min_load < self.max_load:
+ raise RuntimeError("Load limits not supported: {self}")
+ self.quality_lower = float(self.quality_lower)
+ self.quality_upper = float(self.quality_upper)
+ if not 0.0 < self.quality_lower < self.quality_upper < 1.0:
+ raise RuntimeError("Qualities not supported: {self}")
+ goals = []
+ for goal in self.float_goals:
+ goal = float(goal)
+ if not 0.0 < goal < 1.0:
+ raise RuntimeError(f"Goal width {goal} is not supported.")
+ goals.append(goal)
+ self.float_goals = tuple(sorted(set(goals)))
+ self.max_int_load = self._find_ints()
+ self._int2load = []
+ self._int2load.append((0, self.min_load))
+ self._int2load.append((self.max_int_load, self.max_load))
+
+ def _find_ints(self) -> int:
+ """Find and return value for max_int_load.
+
+ Separated out of post init, as this is less conversion and checking,
+ and more math and searching.
+
+ A dumb implementation would start with 1 and kept increasing by 1
+ until all goals are within quality limits.
+ An actual implementation is smarter with the increment,
+ so it is expected to find the resulting values somewhat faster.
+
+ :returns: Value to be stored as max_int_load.
+ :rtype: int
+ """
+ minmax_log_width = math.log(self.max_load) - math.log(self.min_load)
+ log_goals = [-math.log1p(-goal) for goal in self.float_goals]
+ candidate = 1
+ while 1:
+ log_width_unit = minmax_log_width / candidate
+ # Fallback to increment by one if rounding errors make tries bad.
+ next_tries = [candidate + 1]
+ acceptable = True
+ for log_goal in log_goals:
+ units = log_goal / log_width_unit
+ int_units = math.floor(units)
+ quality = int_units / units
+ if not self.quality_lower <= quality <= self.quality_upper:
+ acceptable = False
+ target = (int_units + 1) / self.quality_upper
+ next_try = (target / units) * candidate
+ next_tries.append(next_try)
+ # Else quality acceptable, not bumping the candidate.
+ if acceptable:
+ return candidate
+ candidate = int(math.ceil(max(next_tries)))
+
+ def int2float(self, int_load: int) -> float:
+ """Convert from int to float tps load. Expand internal table as needed.
+
+ Too low or too high ints result in min or max load respectively.
+
+ :param int_load: Integer quantity to turn back into float load.
+ :type int_load: int
+ :returns: Converted load in tps.
+ :rtype: float
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if int_load <= 0:
+ return self.min_load
+ if int_load >= self.max_int_load:
+ return self.max_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_int < int_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_int > int_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_load
+ raise RuntimeError("Bisect in int2float failed.")
+
+ def float2int(self, float_load: float) -> int:
+ """Convert and round from tps load to int. Maybe expand internal table.
+
+ Too low or too high load result in zero or max int respectively.
+
+ Result value is rounded down to an integer.
+
+ :param float_load: Tps quantity to convert into int.
+ :type float_load: float
+ :returns: Converted integer value suitable for halving.
+ :rtype: int
+ """
+ if float_load <= self.min_load:
+ return 0
+ if float_load >= self.max_load:
+ return self.max_int_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_load < float_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_load > float_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_int
+ return lo_int
diff --git a/resources/libraries/python/MLRsearch/load_stats.py b/resources/libraries/python/MLRsearch/load_stats.py
new file mode 100644
index 0000000000..5f4757f488
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_stats.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStats class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .target_stat import TargetStat
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+
+
+# The eq=False part is needed to make sure comparison is inherited properly.
+@dataclass(eq=False)
+class LoadStats(DiscreteLoad):
+ """An offered load together with stats for all possible targets.
+
+ As LoadStats is frequently passed instead of plan DiscreteLoad,
+ equality and ordering is dictated by the float load.
+ """
+
+ target_to_stat: Dict[TargetSpec, TargetStat] = None
+ """Mapping from target specification to its current stat for this load."""
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there are targets to track."""
+ super().__post_init__()
+ if not self.target_to_stat:
+ raise ValueError(f"No targets: {self.target_to_stat!r}")
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ This works well only for trimmed stats,
+ as only the stat for the first target present is shown.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"fl={self.float_load}"
+ f",s=({next(iter(self.target_to_stat.values()))})"
+ )
+
+ def __hash__(self) -> int:
+ """Raise as stats are mutable by definition.
+
+ :returns: Hash value for this instance if possible.
+ :rtype: int
+ :raises TypeError: Not immutable.
+ """
+ raise TypeError("Loadstats are mutable so constant hash is impossible.")
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial measurement result.
+
+ :param result: The result to take into account.
+ :type result: DiscreteResult
+ :raises RuntimeError: If result load does is not equal to the self load.
+ """
+ if result.intended_load != float(self):
+ raise RuntimeError(
+ f"Attempting to add load {result.intended_load}"
+ f" to result set for {float(self)}"
+ )
+ for stat in self.target_to_stat.values():
+ stat.add(result)
+
+ @staticmethod
+ def new_empty(load: DiscreteLoad, targets: Tuple[TargetSpec]) -> LoadStats:
+ """Factory method to initialize mapping for given targets.
+
+ :param load: The intended load value for the new instance.
+ :param targets: The target specifications to track stats for.
+ :type load: DiscreteLoad
+ :type targets: Tuple[TargetSpec]
+ :returns: New instance with empty stats initialized.
+ :rtype: LoadStats
+ :raise ValueError: Is the load is not rounded.
+ """
+ if not load.is_round:
+ raise ValueError(f"Not round: {load!r}")
+ return LoadStats(
+ rounding=load.rounding,
+ int_load=int(load),
+ target_to_stat={target: TargetStat(target) for target in targets},
+ )
+
+ def estimates(self, target: TargetSpec) -> Tuple[bool, bool]:
+ """Classify this load according to given target.
+
+ :param target: According to which target this should be classified.
+ :type target: TargetSpec
+ :returns: Tuple of two estimates whether load can be lower bound.
+ (True, False) means target is not reached yet.
+ :rtype: Tuple[bool, bool]
+ """
+ return self.target_to_stat[target].estimates()
diff --git a/resources/libraries/python/MLRsearch/measurement_database.py b/resources/libraries/python/MLRsearch/measurement_database.py
new file mode 100644
index 0000000000..7a6618c0da
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/measurement_database.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementDatabase class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .load_stats import LoadStats
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class MeasurementDatabase:
+ """Structure holding measurement results for multiple durations and loads.
+
+ Several utility methods are added, accomplishing tasks useful for MLRsearch.
+
+ While TargetStats can decide when a single load is a lower bound (or upper),
+ it does not deal with loss inversion (higher load with less load).
+
+ This class introduces the concept of relevant bounds.
+ Relevant upper bound is simply the lowest load classified as an upper bound.
+ But relevant lower bound is only chosen from lower bound loads
+ strictly smaller than the relevant upper bound.
+ This way any higher loads with good results are ignored,
+ so relevant bound give conservative estimate of SUT true performance.
+ """
+
+ targets: Tuple[TargetSpec] = None
+ """Targets to track stats for."""
+ load_to_stats: Dict[DiscreteLoad, LoadStats] = None
+ """Mapping from loads to stats."""
+
+ def __post_init__(self) -> None:
+ """Check and sort initial values.
+
+ If no stats yet, initialize empty ones.
+
+ :raises ValueError: If there are no targets.
+ """
+ if not self.targets:
+ raise ValueError(f"Database needs targets: {self.targets!r}")
+ if not self.load_to_stats:
+ self.load_to_stats = {}
+ self._sort()
+
+ def _sort(self) -> None:
+ """Sort keys from low to high load."""
+ self.load_to_stats = dict(sorted(self.load_to_stats.items()))
+
+ def __getitem__(self, key: DiscreteLoad) -> LoadStats:
+ """Allow access to stats as if self was load_to_stats.
+
+ This also accepts LoadStats as key, so callers do not need
+ to care about hashability.
+
+ :param key: The load to get stats for.
+ :type key: DiscreteLoad
+ :returns: Stats for the given load.
+ :rtype LoadStats:
+ """
+ return self.load_to_stats[key.hashable()]
+
+ def add(self, result: DiscreteResult) -> None:
+ """Incorporate given trial measurement result.
+
+ :param result: Measurement result to add to the database.
+ :type result: DiscreteResult
+ """
+ discrete_load = result.discrete_load.hashable()
+ if not discrete_load.is_round:
+ raise ValueError(f"Not round load: {discrete_load!r}")
+ if discrete_load not in self.load_to_stats:
+ self.load_to_stats[discrete_load] = LoadStats.new_empty(
+ load=discrete_load,
+ targets=self.targets,
+ )
+ self._sort()
+ self.load_to_stats[discrete_load].add(result)
+
+ def get_relevant_bounds(self, target: TargetSpec) -> RelevantBounds:
+ """Return None or a valid trimmed stat, for the two relevant bounds.
+
+ A load is valid only if both optimistic and pessimistic estimates agree.
+
+ If some value is not available, None is returned instead.
+ The returned stats are trimmed to the argument target.
+
+ The implementation starts from low loads
+ and the search stops at lowest upper bound,
+ thus conforming to the conservative definition of relevant bounds.
+
+ :param target: Target to classify loads when finding bounds.
+ :type target: TargetSpec
+ :returns: Relevant lower bound, relevant upper bound.
+ :rtype: RelevantBounds
+ """
+ lower_bound, upper_bound = None, None
+ for load_stats in self.load_to_stats.values():
+ opt, pes = load_stats.estimates(target)
+ if opt != pes:
+ continue
+ if not opt:
+ upper_bound = load_stats
+ break
+ lower_bound = load_stats
+ if lower_bound:
+ lower_bound = TrimmedStat.for_target(lower_bound, target)
+ if upper_bound:
+ upper_bound = TrimmedStat.for_target(upper_bound, target)
+ return RelevantBounds(clo=lower_bound, chi=upper_bound)
diff --git a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
new file mode 100644
index 0000000000..b48e2e7547
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
@@ -0,0 +1,314 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MultipleLossRatioSearch class."""
+
+import logging
+import time
+
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple
+
+from .candidate import Candidate
+from .config import Config
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .expander import GlobalWidth
+from .limit_handler import LimitHandler
+from .load_rounding import LoadRounding
+from .measurement_database import MeasurementDatabase
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .selector import Selector
+from .target_scaling import TargetScaling
+from .trial_measurement import AbstractMeasurer
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class MultipleLossRatioSearch:
+ """Optimized binary search algorithm for finding conditional throughputs.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final narrow bounds
+ (related to its search goal) after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm in this class contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for bounds for multiple search goals at once.
+ Specifically, the trial measurement results influence bounds for all goals,
+ even though the selection of trial inputs for next measurement
+ focuses only on one goal. The focus can switch between goals frequently.
+
+ Next improvement is that results of trial measurements
+ with small trial duration can be used to find a reasonable starting interval
+ for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+ Internally, such shorter trials come from "preceding targets",
+ handled in a same way as a search goal "final target".
+ Related improvement is that the "current" interval does not need to be valid
+ (e.g. one of the bounds is missing).
+ In that case, this algorithm will move and expand the interval,
+ in a process called external search. Only when both bounds are found,
+ the interval bisection (called internal search) starts making it narrow.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that target relative width is independent of measurement units.
+
+ Next improvement is basing the initial interval on forwarding rates
+ of few initial measurements, starting at max load and using forwarding rates
+ seen so far.
+
+ Next improvement is to allow the use of multiple shorter trials
+ instead one big trial, allowing a percentage of trials
+ to exceed the loss ratio target.
+ This makes the result more stable in practice.
+ Conservative behavior (single long trial, zero exceed ratio)
+ is still available using corresponding goal definitions.
+
+ Final improvement is exiting early if the minimal load
+ is not a valid lower bound (at final duration)
+ and also exiting if the overall search duration is too long.
+
+ There are also subtle optimizations related to candidate selection
+ and uneven splitting of intervals, too numerous to list here.
+ """
+
+ config: Config
+ """Arguments required at construction time."""
+ # End of fields required at intance creation.
+ measurer: AbstractMeasurer = secondary_field()
+ """Measurer to use, set at calling search()."""
+ debug: Callable[[str], None] = secondary_field()
+ """Object to call for logging, None means logging.debug."""
+ # Fields below are computed from data above
+ rounding: LoadRounding = secondary_field()
+ """Derived from goals. Instance to use for intended load rounding."""
+ from_float: Callable[[float], DiscreteLoad] = secondary_field()
+ """Conversion method from float [tps] intended load values."""
+ limit_handler: LimitHandler = secondary_field()
+ """Load post-processing utility based on config and rounding."""
+ scaling: TargetScaling = secondary_field()
+ """Utility for creating target chains for search goals."""
+ database: MeasurementDatabase = secondary_field()
+ """Storage for (stats of) measurement results so far."""
+ stop_time: float = secondary_field()
+ """Monotonic time value at which the search should end with failure."""
+
+ def search(
+ self,
+ measurer: AbstractMeasurer,
+ debug: Optional[Callable[[str], None]] = None,
+ ) -> Pep3140Dict[SearchGoal, Optional[TrimmedStat]]:
+ """Perform initial trials, create state object, proceed with main loop.
+
+ Stateful arguments (measurer and debug) are stored.
+ Derived objects are constructed from config.
+
+ :param measurer: Measurement provider to use by this search object.
+ :param debug: Callable to optionally use instead of logging.debug().
+ :returns: Structure containing conditional throughputs and other stats,
+ one for each search goal.
+ :type measurer: AbstractMeasurer
+ :type debug: Optional[Callable[[str], None]]
+ :returns: Mapping from goal to lower bound (none if min load is hit).
+ :rtype: Pep3140Dict[SearchGoal, Optional[TrimmedStat]]
+ :raises RuntimeError: If total duration is larger than timeout,
+ or if min load becomes an upper bound for a search goal
+ that has fail fast true.
+ """
+ self.measurer = measurer
+ self.debug = logging.debug if debug is None else debug
+ self.rounding = LoadRounding(
+ min_load=self.config.min_load,
+ max_load=self.config.max_load,
+ float_goals=[goal.relative_width for goal in self.config.goals],
+ )
+ self.from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.limit_handler = LimitHandler(
+ rounding=self.rounding,
+ debug=self.debug,
+ )
+ self.scaling = TargetScaling(
+ goals=self.config.goals,
+ rounding=self.rounding,
+ )
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.stop_time = time.monotonic() + self.config.search_duration_max
+ result0, result1 = self.run_initial_trials()
+ self.main_loop(result0.discrete_load, result1.discrete_load)
+ ret_dict = Pep3140Dict()
+ for goal in self.config.goals:
+ target = self.scaling.goal_to_final_target[goal]
+ bounds = self.database.get_relevant_bounds(target=target)
+ ret_dict[goal] = bounds.clo
+ return ret_dict
+
+ def measure(self, duration: float, load: DiscreteLoad) -> DiscreteResult:
+ """Call measurer and put the result to appropriate form in database.
+
+ Also check the argument types and load roundness,
+ and return the result to the caller.
+
+ :param duration: Intended duration for the trial measurement.
+ :param load: Intended load for the trial measurement:
+ :type duration: float
+ :type load: DiscreteLoad
+ :returns: The trial results.
+ :rtype: DiscreteResult
+ :raises RuntimeError: If an argument doed not have the required type.
+ """
+ if not isinstance(duration, float):
+ raise RuntimeError(f"Duration has to be float: {duration!r}")
+ if not isinstance(load, DiscreteLoad):
+ raise RuntimeError(f"Load has to be discrete: {load!r}")
+ if not load.is_round:
+ raise RuntimeError(f"Told to measure unrounded: {load!r}")
+ self.debug(f"Measuring at d={duration},il={int(load)}")
+ result = self.measurer.measure(
+ intended_duration=duration,
+ intended_load=float(load),
+ )
+ self.debug(f"Measured lr={result.loss_ratio}")
+ result = DiscreteResult.with_load(result=result, load=load)
+ self.database.add(result)
+ return result
+
+ def run_initial_trials(self) -> Tuple[DiscreteResult, DiscreteResult]:
+ """Perform trials to get enough data to start the selectors.
+
+ Measurements are done with all initial targets in mind,
+ based on smallest target loss ratio, largest initial trial duration,
+ and largest initial target width.
+
+ Forwarding rate is used as a hint for next intended load.
+ The relative quantity is used, as load can use different units.
+ When the smallest target loss ratio is non-zero, a correction is needed
+ (forwarding rate is only a good hint for zero loss ratio load).
+ The correction is conservative (all increase in load turns to losses).
+
+ Also, warmup trial (if configured) is performed,
+ all other trials are added to the database.
+
+ This could return the initial width, but from implementation perspective
+ it is easier to return two measurements (or the same one twice) here
+ and compute width later. The "one value twice" happens when max load
+ has small loss, or when min load has big loss.
+
+ :returns: Two last measured values, in any order. Or one value twice.
+ :rtype: Tuple[DiscreteResult, DiscreteResult]
+ """
+ max_load = self.limit_handler.max_load
+ ratio, duration, width = None, None, None
+ for target in self.scaling.targets:
+ if target.preceding:
+ continue
+ if ratio is None or ratio > target.loss_ratio:
+ ratio = target.loss_ratio
+ if not duration or duration < target.trial_duration:
+ duration = target.trial_duration
+ if not width or width < target.discrete_width:
+ width = target.discrete_width
+ self.debug(f"Init ratio {ratio} duration {duration} width {width}")
+ if self.config.warmup_duration:
+ self.debug("Warmup trial.")
+ self.measure(self.config.warmup_duration, max_load)
+ # Warmup should not affect the real results, reset the database.
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.debug(f"First trial at max rate: {max_load}")
+ result0 = self.measure(duration, max_load)
+ rfr = result0.relative_forwarding_rate
+ corrected_rfr = (self.from_float(rfr) / (1.0 - ratio)).rounded_down()
+ if corrected_rfr >= max_load:
+ self.debug("Small loss, no other initial trials are needed.")
+ return result0, result0
+ mrr = self.limit_handler.handle(corrected_rfr, width, None, max_load)
+ self.debug(f"Second trial at (corrected) mrr: {mrr}")
+ result1 = self.measure(duration, mrr)
+ # Attempt to get narrower width.
+ result_ratio = result1.loss_ratio
+ if result_ratio > ratio:
+ rfr2 = result1.relative_forwarding_rate
+ crfr2 = (self.from_float(rfr2) / (1.0 - ratio)).rounded_down()
+ mrr2 = self.limit_handler.handle(crfr2, width, None, mrr)
+ else:
+ mrr2 = mrr + width
+ mrr2 = self.limit_handler.handle(mrr2, width, mrr, max_load)
+ if not mrr2:
+ self.debug("Close enough, measuring at mrr2 is not needed.")
+ return result1, result1
+ self.debug(f"Third trial at (corrected) mrr2: {mrr2}")
+ result2 = self.measure(duration, mrr2)
+ return result1, result2
+
+ def main_loop(self, load0: DiscreteLoad, load1: DiscreteLoad) -> None:
+ """Initialize selectors and keep measuring the winning candidate.
+
+ Selectors are created, the two input loads are useful starting points.
+
+ The search ends when no selector nominates any candidate,
+ or if the search takes too long (or if a selector raises).
+
+ Winner is selected according to ordering defined in Candidate class.
+ In case of a tie, selectors for earlier goals are preferred.
+
+ As a selector is only allowed to update current width as the winner,
+ the update is done here explicitly.
+
+ :param load0: Discrete load of one of results from run_initial_trials.
+ :param load1: Discrete load of other of results from run_initial_trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :raises RuntimeError: If the search takes too long,
+ or if min load becomes an upper bound for any search goal
+ """
+ if load1 < load0:
+ load0, load1 = load1, load0
+ global_width = GlobalWidth.from_loads(load0, load1)
+ selectors = []
+ for target in self.scaling.goal_to_final_target.values():
+ selector = Selector(
+ final_target=target,
+ global_width=global_width,
+ initial_lower_load=load0,
+ initial_upper_load=load1,
+ database=self.database,
+ handler=self.limit_handler,
+ debug=self.debug,
+ )
+ selectors.append(selector)
+ while time.monotonic() < self.stop_time:
+ winner = Candidate()
+ for selector in selectors:
+ # Order of arguments is important
+ # when two targets nominate the same candidate.
+ winner = min(Candidate.nomination_from(selector), winner)
+ if not winner:
+ break
+ # We do not check duration versus stop_time here,
+ # as some measurers can be unpredictably faster
+ # than their intended duration suggests.
+ self.measure(duration=winner.duration, load=winner.load)
+ # Delayed updates.
+ if winner.width:
+ global_width.width = winner.width
+ winner.won()
+ else:
+ raise RuntimeError("Optimized search takes too long.")
+ self.debug("Search done.")
diff --git a/resources/libraries/python/MLRsearch/pep3140/__init__.py b/resources/libraries/python/MLRsearch/pep3140/__init__.py
new file mode 100644
index 0000000000..f8e2ffaa8f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "pep3140".
+"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+from .classes import Pep3140Dict
diff --git a/resources/libraries/python/MLRsearch/pep3140/classes.py b/resources/libraries/python/MLRsearch/pep3140/classes.py
new file mode 100644
index 0000000000..9ab6e25c7c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/classes.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining a subclass of dict with an alternative str method."""
+
+
+class Pep3140Dict(dict):
+ """A dict with str support as proposed in PEP 3140.
+
+ Python implemented str acting on dict such that the resulting string
+ shows both keys and values in their repr form.
+ Therefore, str() of a dict gives the same result as repr().
+
+ This class shows both keys and values their str form instead.
+ """
+
+ def __str__(self) -> str:
+ """Return comma+space separated str of items in curly brackets.
+
+ :returns: PEP 3140 string form of the dict data.
+ :rtype: str
+ """
+ body = ", ".join(f"{key}: {value}" for key, value in self.items())
+ return f"{{{body}}}"
diff --git a/resources/libraries/python/MLRsearch/relevant_bounds.py b/resources/libraries/python/MLRsearch/relevant_bounds.py
new file mode 100644
index 0000000000..4bc6796f71
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/relevant_bounds.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RelevantBounds class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class RelevantBounds:
+ """Container for the pair of relevant bounds for a target.
+
+ If there is no valid bound, None is used.
+
+ Relevant upper bound is smallest load acting as an upper bound.
+ Relevant lower bound acts as a lower bound, has to be strictly smaller
+ than the relevant upper bound, and is largest among such loads.
+
+ The short names "clo" and "chi" are also commonly used
+ in logging and technical comments.
+
+ Trimming could be done here, but it needs to known the target explicitly,
+ so it is done in MeasurementDatabase instead.
+ """
+
+ clo: Optional[TrimmedStat]
+ """The relevant lower bound (trimmed) for the current target."""
+ chi: Optional[TrimmedStat]
+ """The relevant upper bound (trimmed) for the current target."""
+
+ # TODO: Check types in post init?
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ clo = int(self.clo) if self.clo else None
+ chi = int(self.chi) if self.chi else None
+ return f"clo={clo},chi={chi}"
diff --git a/resources/libraries/python/MLRsearch/search_goal.py b/resources/libraries/python/MLRsearch/search_goal.py
new file mode 100644
index 0000000000..7d7fd69841
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoal class."""
+
+from dataclasses import dataclass
+
+
+@dataclass(frozen=True, eq=True)
+class SearchGoal:
+ """This is the part of controller inputs that can be repeated
+ with different values. MLRsearch saves time by searching
+ for conditional throughput for each goal at the same time,
+ compared to repeated calls with separate goals.
+
+ Most fields (called attributes) of this composite
+ are relevant to the definition of conditional throughput.
+ The rest does not, but can affect the overal search time.
+ """
+
+ loss_ratio: float = 0.0
+ """The goal loss ratio.
+ A trial can satisfy the goal only when its trial loss ratio is not higher
+ than this. See MeasurementResult.loss_ratio for details.
+ A trial that does not satisfy this goal is called a bad trial."""
+ exceed_ratio: float = 0.5
+ """What portion of the duration sum can consist of bad trial seconds
+ while still being classified as lower bound (assuming no short trials)."""
+ relative_width: float = 0.005
+ """Target is achieved when the relevant lower bound
+ is no more than this (in units of the tightest upper bound) far
+ from the relevant upper bound."""
+ initial_trial_duration: float = 1.0
+ """Shortest trial duration employed when searching for this goal."""
+ final_trial_duration: float = 1.0
+ """Longest trial duration employed when searching for this goal."""
+ duration_sum: float = 20.0
+ """Minimal sum of durations of relevant trials sufficient to declare a load
+ to be upper or lower bound for this goal."""
+ preceding_targets: int = 2
+ """Number of increasingly coarser search targets to insert,
+ hoping to speed up searching for the final target of this goal."""
+ expansion_coefficient: int = 2
+ """External search multiplies width (in logarithmic space) by this."""
+ fail_fast: bool = True
+ """If true and min load is not an upper bound, raise.
+ If false, search will return None instead of lower bound."""
+
+ def __post_init__(self) -> None:
+ """Convert fields to correct types and call validate."""
+ super().__setattr__("loss_ratio", float(self.loss_ratio))
+ super().__setattr__("exceed_ratio", float(self.exceed_ratio))
+ super().__setattr__("relative_width", float(self.relative_width))
+ super().__setattr__(
+ "final_trial_duration", float(self.final_trial_duration)
+ )
+ super().__setattr__(
+ "initial_trial_duration", float(self.initial_trial_duration)
+ )
+ super().__setattr__("duration_sum", float(self.duration_sum))
+ super().__setattr__("preceding_targets", int(self.preceding_targets))
+ super().__setattr__(
+ "expansion_coefficient", int(self.expansion_coefficient)
+ )
+ super().__setattr__("fail_fast", bool(self.fail_fast))
+ self.validate()
+
+ def validate(self) -> None:
+ """Make sure the initialized values conform to requirements.
+
+ :raises ValueError: If a field value is outside allowed bounds.
+ """
+ if self.loss_ratio < 0.0:
+ raise ValueError(f"Loss ratio cannot be negative: {self}")
+ if self.loss_ratio >= 1.0:
+ raise ValueError(f"Loss ratio must be lower than 1: {self}")
+ if self.exceed_ratio < 0.0:
+ raise ValueError(f"Exceed ratio cannot be negative: {self}")
+ if self.exceed_ratio >= 1.0:
+ raise ValueError(f"Exceed ratio must be lower than 1: {self}")
+ if self.relative_width <= 0.0:
+ raise ValueError(f"Relative width must be positive: {self}")
+ if self.relative_width >= 1.0:
+ raise ValueError(f"Relative width must be less than 1: {self}")
+ if self.initial_trial_duration <= 0.0:
+ raise ValueError(f"Initial trial duration must be positive: {self}")
+ if self.final_trial_duration < self.initial_trial_duration:
+ raise ValueError(
+ f"Single duration max must be at least initial: {self}"
+ )
+ if self.duration_sum < self.final_trial_duration:
+ raise ValueError(
+ "Min duration sum cannot be smaller"
+ f" than final trial duration: {self}"
+ )
+ if self.expansion_coefficient <= 1:
+ raise ValueError(f"Expansion coefficient is too small: {self}")
+ too_small = False
+ if self.preceding_targets < 0:
+ too_small = True
+ elif self.preceding_targets < 1:
+ if self.initial_trial_duration < self.duration_sum:
+ too_small = True
+ if too_small:
+ raise ValueError(
+ f"Number of preceding targets is too small: {self}"
+ )
diff --git a/resources/libraries/python/MLRsearch/search_goal_tuple.py b/resources/libraries/python/MLRsearch/search_goal_tuple.py
new file mode 100644
index 0000000000..d40ba99b4b
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal_tuple.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoalTuple class."""
+
+from collections.abc import Iterator
+from dataclasses import dataclass
+from typing import Tuple
+
+from .search_goal import SearchGoal
+
+
+@dataclass(frozen=True)
+class SearchGoalTuple:
+ """Container class holding multiple search goals.
+
+ Just a convenience for checking their number and types.
+ """
+
+ goals: Tuple[SearchGoal, ...]
+ """Goals extracted from user-provided Iterable of search goals."""
+
+ def __post_init__(self) -> None:
+ """Check type and number of search goals.
+
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If a goal is not a SearchGoal.
+ """
+ super().__setattr__("goals", tuple(self.goals))
+ if not self.goals:
+ raise ValueError(f"Cannot be empty: {self.goals}")
+ for goal in self.goals:
+ if not isinstance(goal, SearchGoal):
+ raise TypeError(f"Must be a SearchGoal instance: {goal}")
+ copied = list(self.goals)
+ deduplicated = set(self.goals)
+ for goal in copied:
+ if goal not in deduplicated:
+ raise ValueError(f"Duplicate goal: {goal}")
+ deduplicated.remove(goal)
+ if deduplicated:
+ raise ValueError(f"Error processing goals: {deduplicated}")
+
+ def __iter__(self) -> Iterator[SearchGoal]:
+ """Enable itertion over goals.
+
+ :returns: Iterator iteratinc over contained goals.
+ :rtype: Iterator[SearchGoal]
+ """
+ return iter(self.goals)
diff --git a/resources/libraries/python/MLRsearch/selector.py b/resources/libraries/python/MLRsearch/selector.py
new file mode 100644
index 0000000000..4a6d2e2574
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/selector.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Selector class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, List, Optional, Tuple
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .expander import TargetedExpander
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .measurement_database import MeasurementDatabase
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .strategy import StrategyBase, STRATEGY_CLASSES
+
+
+@dataclass
+class Selector:
+ """A selector is an abstraction that focuses on only one of search goals.
+
+ While lower-level logic is hidden in strategy classes,
+ the code in this class is responsible for initializing strategies
+ and shifting targets towards the final target.
+
+ While the public methods have the same names and meaning as the ones
+ in strategy classes, their signature is different.
+ Selector adds the current target trial duration to the output of nominate(),
+ and adds the current bounds to the input of won().
+
+ The nominate method does not return a complete Candidate instance,
+ as we need to avoid circular dependencies
+ (candidate will refer to selector).
+ """
+
+ final_target: TargetSpec
+ """The target this selector is trying to ultimately achieve."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: DiscreteLoad
+ """Smaller of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ initial_upper_load: DiscreteLoad
+ """Larger of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ database: MeasurementDatabase = field(repr=False)
+ """Reference to the common database used by all selectors."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ current_target: TargetSpec = secondary_field()
+ """The target the selector is focusing on currently."""
+ target_stack: List[TargetSpec] = secondary_field()
+ """Stack of targets. When current target is achieved, next is popped."""
+ strategies: Tuple[StrategyBase] = secondary_field()
+ """Instances implementing particular selection strategies."""
+ current_strategy: Optional[StrategyBase] = secondary_field()
+ """Reference to strategy used for last nomination, needed for won()."""
+ # Cache.
+ bounds: RelevantBounds = secondary_field()
+ """New relevant bounds for this round of candidate selection."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived values."""
+ self.target_stack = [self.final_target]
+ while preceding_target := self.target_stack[-1].preceding:
+ self.target_stack.append(preceding_target)
+ self.current_target = self.target_stack.pop()
+ self._recreate_strategies()
+
+ def _recreate_strategies(self) -> None:
+ """Recreate strategies after current target has changed.
+
+ Width expander is recreated as target width is now smaller.
+ For convenience, strategies get injectable debug
+ which prints also the current target.
+ """
+ expander = TargetedExpander(
+ target=self.current_target,
+ global_width=self.global_width,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=self.debug,
+ )
+
+ def wrapped_debug(text: str) -> None:
+ """Call self debug with current target info prepended.
+
+ :param text: Message to log at debug level.
+ :type text: str
+ """
+ self.debug(f"Target {self.current_target}: {text}")
+
+ self.strategies = tuple(
+ cls(
+ target=self.current_target,
+ expander=expander,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=wrapped_debug,
+ )
+ for cls in STRATEGY_CLASSES
+ )
+ self.current_strategy = None
+ self.debug(f"Created strategies for: {self.current_target}")
+
+ def _update_bounds(self) -> None:
+ """Before each iteration, call this to update bounds cache."""
+ self.bounds = self.database.get_relevant_bounds(self.current_target)
+
+ def nominate(
+ self,
+ ) -> Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]:
+ """Find first strategy that wants to nominate, return trial inputs.
+
+ Returned load is None if no strategy wants to nominate.
+
+ Current target is shifted when (now preceding) target is reached.
+ As each strategy never becomes done before at least one
+ bound relevant to the current target becomes available,
+ it is never needed to revert to the preceding target after the shift.
+
+ As the initial trials had inputs relevant to all initial targets,
+ the only way for this not to nominate a load
+ is when the final target is reached (including hitting min or max load).
+ The case of hitting min load raises, so search fails early.
+
+ :returns: Nominated load, duration, and global width to set if winning.
+ :rtype: Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]
+ :raises RuntimeError: If internal inconsistency is detected,
+ or if min load becomes an upper bound.
+ """
+ self._update_bounds()
+ self.current_strategy = None
+ while 1:
+ for strategy in self.strategies:
+ load, width = strategy.nominate(self.bounds)
+ if load:
+ self.current_strategy = strategy
+ return load, self.current_target.trial_duration, width
+ if not self.bounds.clo and not self.bounds.chi:
+ raise RuntimeError("Internal error: no clo nor chi.")
+ if not self.target_stack:
+ if not self.bounds.clo and self.current_target.fail_fast:
+ raise RuntimeError(f"No lower bound: {self.bounds.chi!r}")
+ self.debug(f"Goal {self.current_target} reached: {self.bounds}")
+ return None, self.current_target.trial_duration, None
+ # Everything is ready for next target in the chain.
+ self.current_target = self.target_stack.pop()
+ # Debug logs look better if we forget bounds are TrimmedStat.
+ # Abuse rounding (if not None) to convert to pure DiscreteLoad.
+ clo, chi = self.bounds.clo, self.bounds.chi
+ self.initial_lower_load = clo.rounded_down() if clo else clo
+ self.initial_upper_load = chi.rounded_down() if chi else chi
+ self._update_bounds()
+ self._recreate_strategies()
+
+ def won(self, load: DiscreteLoad) -> None:
+ """Update any private info when candidate became a winner.
+
+ :param load: The load previously nominated by current strategy.
+ :type load: DiscreteLoad
+ """
+ self._update_bounds()
+ self.current_strategy.won(bounds=self.bounds, load=load)
diff --git a/resources/libraries/python/MLRsearch/strategy/__init__.py b/resources/libraries/python/MLRsearch/strategy/__init__.py
new file mode 100644
index 0000000000..a1e0225a17
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "strategy".
+"""
+
+from .base import StrategyBase
+from .bisect import BisectStrategy
+from .extend_hi import ExtendHiStrategy
+from .extend_lo import ExtendLoStrategy
+from .halve import HalveStrategy
+from .refine_hi import RefineHiStrategy
+from .refine_lo import RefineLoStrategy
+
+
+STRATEGY_CLASSES = (
+ HalveStrategy,
+ RefineLoStrategy,
+ RefineHiStrategy,
+ ExtendLoStrategy,
+ ExtendHiStrategy,
+ BisectStrategy,
+)
+"""Tuple of strategy constructors, in order of priority decreasing."""
diff --git a/resources/libraries/python/MLRsearch/strategy/base.py b/resources/libraries/python/MLRsearch/strategy/base.py
new file mode 100644
index 0000000000..0724f882bf
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/base.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining StrategyBase class."""
+
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Callable, Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..expander import TargetedExpander
+from ..limit_handler import LimitHandler
+from ..relevant_bounds import RelevantBounds
+from ..target_spec import TargetSpec
+
+
+@dataclass
+class StrategyBase(ABC):
+ """Abstract class encompassing data common to most strategies.
+
+ A strategy is one piece of logic a selector may use
+ when nominating a candidate according to its current target.
+
+ The two initial bound arguments may not be bounds at all.
+ For initial targets, the two values are usually mrr and mrr2.
+ For subsequent targets, the initial values are usually
+ the relevant bounds of the preceding target,
+ but one of them may be None if hitting min or max load.
+
+ The initial values are mainly used as stable alternatives
+ to relevant bounds of preceding target,
+ because those bounds may have been unpredictably altered
+ by nominations from unrelated search goals.
+ This greatly simplifies reasoning about strategies making progress.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ expander: TargetedExpander
+ """Instance to track width expansion during search (if applicable)."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the limit handler instance."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+
+ @abstractmethod
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate if the conditions activate this strategy.
+
+ A complete candidate refers also to the nominating selector.
+ To prevent circular dependence (selector refers to nominating strategy),
+ this function returns only duration and width.
+
+ Width should only be non-None if global current width should be updated
+ when the candidate based on this becomes winner.
+ But currently all strategies return non-None width
+ if they return non-None load.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ return None, None
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Notify the strategy its candidate became the winner.
+
+ Most strategies have no use for this information,
+ but some strategies may need to update their private information.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ return
+
+ def not_worth(self, bounds: RelevantBounds, load: DiscreteLoad) -> bool:
+ """A check on bounds common for multiple strategies.
+
+ The load is worth measuring only if it can create or improve
+ either relevant bound.
+
+ Each strategy is designed to create a relevant bound for current target,
+ which is only needed if that (or better) bound does not exist yet.
+ Conversely, if a strategy does not nominate, it is because
+ the load it would nominate (if any) is found not worth by this method.
+
+ :param bounds: Current relevant bounds.
+ :param load: Load of a possible candidate.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ :returns: True if the load should NOT be nominated.
+ :rtype: bool
+ """
+ if bounds.clo and bounds.clo >= load:
+ return True
+ if bounds.chi and bounds.chi <= load:
+ return True
+ if bounds.clo and bounds.chi:
+ # We are not hitting min nor max load.
+ # Measuring at this load will create or improve clo or chi.
+ # The only reason not to nominate is if interval is narrow already.
+ wig = DiscreteInterval(
+ lower_bound=bounds.clo,
+ upper_bound=bounds.chi,
+ ).width_in_goals(self.target.discrete_width)
+ if wig <= 1.0:
+ return True
+ return False
diff --git a/resources/libraries/python/MLRsearch/strategy/bisect.py b/resources/libraries/python/MLRsearch/strategy/bisect.py
new file mode 100644
index 0000000000..894544695e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/bisect.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining BisectStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class BisectStrategy(StrategyBase):
+ """Strategy to use when both bounds relevant to curent target are present.
+
+ Primarily, this strategy is there to perform internal search.
+ As powers of two are fiendly to binary search,
+ this strategy relies on the splitting logic described in DiscreteInterval.
+
+ The main reason why this class is so long is that a mere existence
+ of a valid bound for the current target does not imply
+ that bound is a good approximation of the final conditional throughput.
+ The bound might become valid due to efforts of a strategy
+ focusing on an entirely different search goal.
+
+ On the other hand, initial bounds may be better approximations,
+ but they also may be bad approximations (for example
+ when SUT behavior strongly depends on trial duration).
+
+ Based on comparison of existing current bounds to intial bounds,
+ this strategy also mimics what would external search do
+ (if the one current bound was missing and other initial bound was current).
+ In case that load value is closer to appropriate inital bound
+ (compared to how far the simple bisect between current bounds is),
+ that load is nominated.
+
+ It turns out those "conditional" external search nominations
+ are quite different from unconditional ones,
+ at least when it comes to handling limits
+ and tracking when width expansion should be applied.
+ That is why that logic is here
+ and not in some generic external search class.
+ """
+
+ expand_on_clo: bool = False
+ """If extending up, width should be expanded when load becomes clo."""
+ expand_on_chi: bool = False
+ """If extending down, width should be expanded when load becomes chi."""
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate between bounds or extending from them.
+
+ The external search logic is offloaded into private methods.
+ If they return a truthy load, that is returned from here as well.
+
+ Only if the actual bisect is selected,
+ the per-selector expander is limited to the (smaller) new width.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ if not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ interval = DiscreteInterval(bounds.clo, bounds.chi)
+ if interval.width_in_goals(self.target.discrete_width) <= 1.0:
+ return None, None
+ bisect_load = interval.middle(self.target.discrete_width)
+ load, width = self._extend_lo(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = False, True
+ self.debug(f"Preferring to extend down: {load}")
+ return load, width
+ load, width = self._extend_hi(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = True, False
+ self.debug(f"Preferring to extend up: {load}")
+ return load, width
+ load = bisect_load
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.expand_on_clo, self.expand_on_chi = False, False
+ self.debug(f"Preferring to bisect: {load}")
+ width_lo = DiscreteInterval(bounds.clo, load).discrete_width
+ width_hi = DiscreteInterval(load, bounds.chi).discrete_width
+ width = min(width_lo, width_hi)
+ self.expander.limit(width)
+ return load, width
+
+ def _extend_lo(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending down, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_upper_load:
+ return None, None
+ if bisect_load >= self.initial_upper_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.chi - width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load <= bisect_load:
+ return None, None
+ if load >= self.initial_upper_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def _extend_hi(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending up, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_lower_load:
+ return None, None
+ if bisect_load <= self.initial_lower_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.clo + width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load >= bisect_load:
+ return None, None
+ if load <= self.initial_lower_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width when appropriate.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if self.expand_on_clo and load == bounds.clo:
+ self.expander.expand()
+ elif self.expand_on_chi and load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_hi.py b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
new file mode 100644
index 0000000000..79c4ad7cf2
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendHiStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant upper bound.
+
+ Typically this is needed after RefineHiStrategy turned initial upper bound
+ into a current relevant lower bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant lower bound plus expander width.
+
+ This performs external search in upwards direction,
+ until a valid upper bound for the current target is found,
+ or until max load is hit.
+ Limit handling is used to avoid nominating too close
+ (or above) the max rate.
+
+ Width expansion is only applied if the candidate becomes a lower bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.chi or not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.clo + width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No chi, extending up: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became the new lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.clo:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_lo.py b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
new file mode 100644
index 0000000000..68d20b6a6a
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendLoStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant lower bound.
+
+ Typically this is needed after RefineLoStrategy turned initial lower bound
+ into a current relevant upper bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant upper bound minus expander width.
+
+ This performs external search in downwards direction,
+ until a valid lower bound for the current target is found,
+ or until min load is hit.
+ Limit handling is used to avoid nominating too close
+ (or below) the min rate.
+
+ Width expansion is only applied if the candidate becomes an upper bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.clo or not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.chi - width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No clo, extending down: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became new upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/halve.py b/resources/libraries/python/MLRsearch/strategy/halve.py
new file mode 100644
index 0000000000..3188a041c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/halve.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining HalveStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class HalveStrategy(StrategyBase):
+ """First strategy to apply for a new current target.
+
+ Pick a load between initial lower bound and initial upper bound,
+ nominate it if it is (still) worth it.
+
+ In a sense, this can be viewed as an extension of preceding target's
+ bisect strategy. But as the current target may require a different
+ trial duration, it is better to do it for the new target.
+
+ Alternatively, this is a way to save one application
+ of subsequent refine strategy, thus avoiding reducing risk of triggering
+ an external search (slight time saver for highly unstable SUTs).
+ Either way, minor time save is achieved by preceding target
+ only needing to reach double of current target width.
+
+ If the distance between initial bounds is already at or below
+ current target width, the middle point is not nominated.
+ The reasoning is that in this case external search is likely
+ to get triggered by the subsequent refine strategies,
+ so attaining a relevant bound here is not as likely to help.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the middle between initial lower and upper bound.
+
+ The returned width is the target width, even if initial bounds
+ happened to be closer together.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not self.initial_lower_load or not self.initial_upper_load:
+ return None, None
+ interval = DiscreteInterval(
+ lower_bound=self.initial_lower_load,
+ upper_bound=self.initial_upper_load,
+ )
+ wig = interval.width_in_goals(self.target.discrete_width)
+ if wig > 2.0:
+ # Can happen for initial target.
+ return None, None
+ if wig <= 1.0:
+ # Already was narrow enough, refinements shall be sufficient.
+ return None, None
+ load = interval.middle(self.target.discrete_width)
+ if self.not_worth(bounds, load):
+ return None, None
+ self.debug(f"Halving available: {load}")
+ # TODO: Report possibly smaller width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_hi.py b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
new file mode 100644
index 0000000000..caa8fc4a7d
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineHiStrategy(StrategyBase):
+ """If initial upper bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant lower bound,
+ or if there was no halving (and RefineLoStrategy confirmed initial
+ lower bound became a relevant lower bound for the new current target).
+
+ This either ensures a matching upper bound (target is achieved)
+ or moves the relevant lower bound higher (triggering external search).
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_upper_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Upperbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_lo.py b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
new file mode 100644
index 0000000000..7927798505
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineLoStrategy(StrategyBase):
+ """If initial lower bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant upper bound,
+ or if there was no halving.
+ This ensures a relevant bound (upper or lower) for the current target
+ exists.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_lower_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Lowerbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/target_scaling.py b/resources/libraries/python/MLRsearch/target_scaling.py
new file mode 100644
index 0000000000..25114c311c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_scaling.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetScaling class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .dataclass import secondary_field
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetScaling:
+ """Encapsulate targets derived from goals.
+
+ No default values for primaries, contructor call has to specify everything.
+ """
+
+ goals: SearchGoalTuple
+ """Set of goals to generate targets for."""
+ rounding: LoadRounding
+ """Rounding instance to use (targets have discrete width)."""
+ # Derived quantities.
+ targets: Tuple[TargetSpec] = secondary_field()
+ """The generated targets, linked into chains."""
+ goal_to_final_target: Dict[SearchGoal, TargetSpec] = secondary_field()
+ """Mapping from a goal to its corresponding final target."""
+
+ def __post_init__(self) -> None:
+ """For each goal create final, and non-final targets and link them."""
+ linked_targets = []
+ self.goal_to_final_target = {}
+ for goal in self.goals:
+ standalone_targets = []
+ # Final target.
+ width = DiscreteWidth(
+ rounding=self.rounding,
+ float_width=goal.relative_width,
+ ).rounded_down()
+ duration_sum = goal.duration_sum
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=goal.final_trial_duration,
+ duration_sum=duration_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=goal.fail_fast,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Non-final targets.
+ preceding_targets = goal.preceding_targets
+ multiplier = (
+ pow(
+ goal.initial_trial_duration / duration_sum,
+ 1.0 / preceding_targets,
+ )
+ if preceding_targets
+ else 1.0
+ )
+ for count in range(preceding_targets):
+ preceding_sum = duration_sum * pow(multiplier, count + 1)
+ if count + 1 >= preceding_targets:
+ preceding_sum = goal.initial_trial_duration
+ trial_duration = min(goal.final_trial_duration, preceding_sum)
+ width *= 2
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=trial_duration,
+ duration_sum=preceding_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=False,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Link preceding targets.
+ preceding_target = None
+ for target in reversed(standalone_targets):
+ linked_target = target.with_preceding(preceding_target)
+ linked_targets.append(linked_target)
+ preceding_target = linked_target
+ # Associate final target to the goal.
+ self.goal_to_final_target[goal] = linked_targets[-1]
+ # Store all targets as a tuple.
+ self.targets = tuple(linked_targets)
diff --git a/resources/libraries/python/MLRsearch/target_spec.py b/resources/libraries/python/MLRsearch/target_spec.py
new file mode 100644
index 0000000000..5279ba00a1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_spec.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetSpec class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Optional
+
+from .discrete_width import DiscreteWidth
+
+
+@dataclass(frozen=True, eq=True)
+class TargetSpec:
+ """Composite object holding attributes specifying one search target.
+
+ Abstractly, this has several similar meanings.
+ With discrete_width attribute this specifies when a selector is Done.
+ With expansion_coefficient attribute it tells selector how quickly
+ should it expand interval in external search.
+ With "preceding" attribute it helps selector, so it does not need to point
+ to preceding target separately from its current target.
+ Without those three attributes this object is still sufficient
+ for LoadStats to classify loads as lower bound, upper bound, or unknown.
+ """
+
+ loss_ratio: float
+ """Target loss ratio. Equal and directly analogous to goal loss ratio,
+ but applicable also for non-final targets."""
+ exceed_ratio: float
+ """Target exceed ratio. Equal and directly analogous to goal exceed ratio,
+ but applicable also for non-final targets."""
+ discrete_width: DiscreteWidth
+ """Target relative width. Analogous to goal relative width,
+ but coarser for non-final targets."""
+ trial_duration: float
+ """Duration to use for trials for this target. Shorter trials have lesser
+ (and more complicated) impact when determining upper and lower bounds."""
+ duration_sum: float
+ """Sum of trial durations sufficient to classify a load
+ as an upper or lower bound.
+ For non-final targets, this is shorter than goal duration_sum."""
+ expansion_coefficient: int = field(repr=False)
+ """Equal and directly analogous to goal expansion coefficient,
+ but applicable also for non-final targets."""
+ fail_fast: bool = field(repr=False)
+ """Copied from goal. If true and min load is not an upper bound, raise."""
+ preceding: Optional[TargetSpec] = field(repr=False)
+ """Reference to next coarser target (if any) belonging to the same goal."""
+
+ # No conversions or validations, as this is an internal structure.
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lr={self.loss_ratio},er={self.exceed_ratio}"
+ f",ds={self.duration_sum}"
+ )
+
+ def with_preceding(self, preceding: Optional[TargetSpec]) -> TargetSpec:
+ """Create an equivalent instance but with different preceding field.
+
+ This is useful in initialization. Create semi-initialized targets
+ starting from final one, than add references in reversed order.
+
+ :param preceding: New value for preceding field, cannot be None.
+ :type preceding: Optional[TargetSpec]
+ :returns: Instance with the new value applied.
+ :rtype: TargetSpec
+ """
+ return TargetSpec(
+ loss_ratio=self.loss_ratio,
+ exceed_ratio=self.exceed_ratio,
+ discrete_width=self.discrete_width,
+ trial_duration=self.trial_duration,
+ duration_sum=self.duration_sum,
+ expansion_coefficient=self.expansion_coefficient,
+ fail_fast=self.fail_fast,
+ preceding=preceding,
+ )
diff --git a/resources/libraries/python/MLRsearch/target_stat.py b/resources/libraries/python/MLRsearch/target_stat.py
new file mode 100644
index 0000000000..9d30d51b9c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_stat.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStat class."""
+
+from dataclasses import dataclass, field
+from typing import Tuple
+
+from .target_spec import TargetSpec
+from .discrete_result import DiscreteResult
+
+
+@dataclass
+class TargetStat:
+ """Class for aggregating trial results for a single load and target.
+
+ Reference to the target is included for convenience.
+
+ The main usage is for load classification, done in estimates method.
+ If both estimates agree, the load is classified as either a lower bound
+ or an upper bound. For additional logic for dealing with loss inversion
+ see MeasurementDatabase.
+
+ Besides the duration sums needed for determining upper and lower bound,
+ a field useful for computing the conditional throughput is also included.
+ The conditional throughput is average of the (relative) forwarding rates
+ of good long trials weighted by gool long trial durations.
+ As the intended load is stored elsewhere, the one additional field here
+ has a peculiar unit, it is a sum of products of seconds and loss ratios.
+ """
+
+ target: TargetSpec = field(repr=False)
+ """The target for which this instance is aggregating results."""
+ good_long: float = 0.0
+ """Sum of durations of long enough trials satisfying target loss ratio."""
+ bad_long: float = 0.0
+ """Sum of durations of long trials not satisfying target loss ratio."""
+ good_short: float = 0.0
+ """Sum of durations of shorter trials satisfying target loss ratio."""
+ bad_short: float = 0.0
+ """Sum of durations of shorter trials not satisfying target loss ratio."""
+ dur_rat_sum: float = 0.0
+ """Sum over good long trials, of duration multiplied by loss ratio."""
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"gl={self.good_long},bl={self.bad_long}"
+ f",gs={self.good_short},bs={self.bad_short}"
+ )
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial result.
+
+ Use intended duration for deciding between long and short trials,
+ but use offered duation (with overheads) to increase the duration sums.
+
+ :param result: The trial result to add to the stats.
+ :type result: DiscreteResult
+ """
+ dwo = result.duration_with_overheads
+ if result.intended_duration >= self.target.trial_duration:
+ if result.loss_ratio > self.target.loss_ratio:
+ self.bad_long += dwo
+ else:
+ self.good_long += dwo
+ self.dur_rat_sum += dwo * result.loss_ratio
+ else:
+ if result.loss_ratio > self.target.loss_ratio:
+ self.bad_short += dwo
+ else:
+ self.good_short += dwo
+
+ def estimates(self) -> Tuple[bool, bool]:
+ """Return whether this load can become a lower bound.
+
+ This returns two estimates, hence the weird nonverb name of this method.
+ One estimate assumes all following results will satisfy the loss ratio,
+ the other assumes all results will not satisfy the loss ratio.
+ The sum of durations of the assumed results
+ is the minimum to reach target duration sum, or zero if already reached.
+
+ If both estimates are the same, it means the load is a definite bound.
+ This may happen even when the sum of durations of already
+ measured trials is less than the target, when the missing measurements
+ cannot change the classification.
+
+ :returns: Tuple of two estimates whether the load can be a lower bound.
+ (True, False) means more trial results are needed.
+ :rtype: Tuple[bool, bool]
+ """
+ coeff = self.target.exceed_ratio
+ decrease = self.good_short * coeff / (1.0 - coeff)
+ short_excess = self.bad_short - decrease
+ effective_excess = self.bad_long + max(0.0, short_excess)
+ effective_dursum = max(
+ self.good_long + effective_excess,
+ self.target.duration_sum,
+ )
+ limit_dursum = effective_dursum * self.target.exceed_ratio
+ optimistic = effective_excess <= limit_dursum
+ pessimistic = (effective_dursum - self.good_long) <= limit_dursum
+ return optimistic, pessimistic
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/__init__.py b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
new file mode 100644
index 0000000000..034ae41819
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "trial_measurement".
+"""
+
+from .abstract_measurer import AbstractMeasurer
+from .measurement_result import MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
new file mode 100644
index 0000000000..6fab79c8dc
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining AbstractMeasurer class."""
+
+from abc import ABCMeta, abstractmethod
+
+from .measurement_result import MeasurementResult as Result
+
+
+class AbstractMeasurer(metaclass=ABCMeta):
+ """Abstract class defining common API for trial measurement providers.
+
+ The original use of this class was in the realm of
+ RFC 2544 Throughput search, which explains the teminology
+ related to networks, frames, packets, offered load, forwarding rate
+ and similar.
+
+ But the same logic can be used in higher level networking scenarios
+ (e.g. https requests) or even outside networking (database transactions).
+
+ The current code uses language from packet forwarding,
+ docstring sometimes mention transactions as an alternative view.
+ """
+
+ @abstractmethod
+ def measure(self, intended_duration: float, intended_load: float) -> Result:
+ """Perform trial measurement and return the result.
+
+ It is assumed the measurer got already configured with anything else
+ needed to perform the measurement (e.g. traffic profile
+ or transaction limit).
+
+ Duration and load are the only values expected to vary
+ during the search.
+
+ :param intended_duration: Intended trial duration [s].
+ :param intended_load: Intended rate of transactions (packets) [tps].
+ It is a per-port rate, e.g. uni-directional for SUTs
+ with two ports.
+ :type intended_duration: float
+ :type intended_load: float
+ :returns: Structure detailing the result of the measurement.
+ :rtype: measurement_result.MeasurementResult
+ """
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
new file mode 100644
index 0000000000..9dc1ccf5f1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementResult class."""
+
+from dataclasses import dataclass
+
+
+@dataclass
+class MeasurementResult:
+ """Structure defining the result of a single trial measurement.
+
+ There are few primary (required) quantities. Various secondary (derived)
+ quantities are calculated and can be queried.
+
+ The constructor allows broader argument types,
+ the post init function converts to the stricter types.
+
+ Integer quantities (counts) are preferred, as float values
+ can suffer from rounding errors, and sometimes they are measured
+ at unknown (possibly very limited) precision and accuracy.
+
+ There are relations between the counts (e.g. offered count
+ should be equal to a sum of forwarding count and loss count).
+ This implementation does not perform consistency checks, but uses them
+ for computing quantities the caller left unspecified.
+
+ In some cases, the units of intended load are different from units
+ of loss count (e.g. load in transactions but loss in packets).
+ Quantities with relative_ prefix can be used to get load candidates
+ from forwarding results.
+
+ Sometimes, the measurement provider is unable to reach the intended load,
+ and it can react by spending longer than intended duration
+ to reach its intended count. To signal irregular situations like this,
+ several optional fields can be given, and various secondary quantities
+ are populated, so the measurement consumer can query the quantity
+ it wants to rely on in these irregular situations.
+
+ The current implementation intentionally limits the secondary quantities
+ to the few that proved useful in practice.
+ """
+
+ # Required primary quantities.
+ intended_duration: float
+ """Intended trial measurement duration [s]."""
+ intended_load: float
+ """Intended load [tps]. If bidirectional (or multi-port) traffic is used,
+ most users will put unidirectional (single-port) value here,
+ as bandwidth and pps limits are usually per-port."""
+ # Two of the next three primary quantities are required.
+ offered_count: int = None
+ """Number of packets actually transmitted (transactions attempted).
+ This should be the aggregate (bidirectional, multi-port) value,
+ so that asymmetric trafic profiles are supported."""
+ loss_count: int = None
+ """Number of packets transmitted but not received (transactions failed)."""
+ forwarding_count: int = None
+ """Number of packets successfully forwarded (transactions succeeded)."""
+ # Optional primary quantities.
+ offered_duration: float = None
+ """Estimate of the time [s] the trial was actually transmitting traffic."""
+ duration_with_overheads: float = None
+ """Estimate of the time [s] it took to get the trial result
+ since the measurement started."""
+ intended_count: int = None
+ """Expected number of packets to transmit. If not known,
+ the value of offered_count is used."""
+
+ def __post_init__(self) -> None:
+ """Convert types, compute missing values.
+
+ Current caveats:
+ A failing assumption looks like a conversion error.
+ Negative counts are allowed, which can lead to errors later.
+ """
+ self.intended_duration = float(self.intended_duration)
+ if self.offered_duration is None:
+ self.offered_duration = self.intended_duration
+ else:
+ self.offered_duration = float(self.offered_duration)
+ if self.duration_with_overheads is None:
+ self.duration_with_overheads = self.offered_duration
+ else:
+ self.duration_with_overheads = float(self.duration_with_overheads)
+ self.intended_load = float(self.intended_load)
+ if self.forwarding_count is None:
+ self.forwarding_count = int(self.offered_count) - int(
+ self.loss_count
+ )
+ else:
+ self.forwarding_count = int(self.forwarding_count)
+ if self.offered_count is None:
+ self.offered_count = self.forwarding_count + int(self.loss_count)
+ else:
+ self.offered_count = int(self.offered_count)
+ if self.loss_count is None:
+ self.loss_count = self.offered_count - self.forwarding_count
+ else:
+ self.loss_count = int(self.loss_count)
+ if self.intended_count is None:
+ self.intended_count = self.offered_count
+ else:
+ self.intended_count = int(self.intended_count)
+ # TODO: Handle (somehow) situations where offered > intended?
+
+ @property
+ def unsent_count(self) -> int:
+ """How many packets were not transmitted (transactions not started).
+
+ :return: Intended count minus offered count.
+ :rtype: int
+ """
+ return self.intended_count - self.offered_count
+
+ @property
+ def loss_ratio(self) -> float:
+ """Bad count divided by overall count, zero if the latter is zero.
+
+ The bad count includes not only loss count, but also unsent count.
+ If unsent count is negative, its absolute value is used.
+ The overall count is intended count or offered count,
+ whichever is bigger.
+
+ Together, the resulting formula tends to increase loss ratio
+ (but not above 100%) in irregular situations,
+ thus guiding search algorithms towards lower loads
+ where there should be less irregularities.
+ The zero default is there to prevent search algorithms from
+ getting stuck on a too low intended load.
+
+ :returns: Bad count divided by overall count.
+ :rtype: float
+ """
+ overall = max(self.offered_count, self.intended_count)
+ bad = abs(self.loss_count) + abs(self.unsent_count)
+ return bad / overall if overall else 0.0
+
+ @property
+ def relative_forwarding_rate(self) -> float:
+ """Forwarding rate in load units as if duration and load was intended.
+
+ The result is based purely on intended load and loss ratio.
+ While the resulting value may be far from what really happened,
+ it has nice behavior with respect to common assumptions
+ of search algorithms.
+
+ :returns: Forwarding rate in load units estimated from loss ratio.
+ :rtype: float
+ """
+ return self.intended_load * (1.0 - self.loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/trimmed_stat.py b/resources/libraries/python/MLRsearch/trimmed_stat.py
new file mode 100644
index 0000000000..0076644aa3
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trimmed_stat.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TrimmedStat class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .load_stats import LoadStats
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TrimmedStat(LoadStats):
+ """Load stats trimmed to a single target.
+
+ Useful mainly for reporting the overall results.
+ """
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there is one target to track."""
+ super().__post_init__()
+ if len(self.target_to_stat) != 1:
+ raise ValueError(f"No single target: {self.target_to_stat!r}")
+
+ @staticmethod
+ def for_target(stats: LoadStats, target: TargetSpec) -> TrimmedStat:
+ """Return new instance with only one target in the mapping.
+
+ :param stats: The load stats instance to trim.
+ :param target: The one target which should remain in the mapping.
+ :type stats: LoadStats
+ :type target: TargetSpec
+ :return: Newly created instance.
+ :rtype: TrimmedStat
+ """
+ return TrimmedStat(
+ rounding=stats.rounding,
+ int_load=stats.int_load,
+ target_to_stat={target: stats.target_to_stat[target]},
+ )
+
+ @property
+ def conditional_throughput(self) -> Optional[DiscreteLoad]:
+ """Compute conditional throughput from the load.
+
+ Target stat has dur_rat_sum and good_long.
+ The code here adds intended load and handles the case min load is hit.
+ If min load is not a lower bound, None is returned.
+
+ :return: Conditional throughput assuming self is a relevant lower bound.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If target is unclear or load is spurious.
+ """
+ target = list(self.target_to_stat.keys())[0]
+ _, pes = self.estimates(target)
+ if not pes:
+ if int(self):
+ raise RuntimeError(f"Not a lower bound: {self}")
+ return None
+ # TODO: Verify self is really the clo?
+ stat = self.target_to_stat[target]
+ loss_ratio = stat.dur_rat_sum / stat.good_long
+ ret = self * (1.0 - loss_ratio)
+ return ret
diff --git a/resources/libraries/python/PLRsearch/PLRsearch.py b/resources/libraries/python/PLRsearch/PLRsearch.py
index 0314a80efb..7599a9e64d 100644
--- a/resources/libraries/python/PLRsearch/PLRsearch.py
+++ b/resources/libraries/python/PLRsearch/PLRsearch.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -195,8 +195,8 @@ class PLRsearch:
# exponential impact. Make it configurable, or is 4:3 good enough?
if measurement.loss_ratio >= self.packet_loss_ratio_target:
for _ in range(4 * zeros):
- lossy_loads.append(measurement.target_tr)
- if measurement.loss_count > 0:
+ lossy_loads.append(measurement.intended_load)
+ if measurement.loss_ratio > 0.0:
zeros = 0
lossy_loads.sort()
if stop_time <= time.time():
@@ -205,7 +205,7 @@ class PLRsearch:
if (trial_number - self.trial_number_offset) <= 1:
next_load = max_rate
elif (trial_number - self.trial_number_offset) <= 3:
- next_load = (measurement.relative_receive_rate / (
+ next_load = (measurement.relative_forwarding_rate / (
1.0 - self.packet_loss_ratio_target))
else:
next_load = (avg1 + avg2) / 2.0
@@ -441,7 +441,7 @@ class PLRsearch:
Instead, the expected average loss is scaled according to the number
of packets actually sent.
- TODO: Copy ReceiveRateMeasurement from MLRsearch.
+ TODO: Copy MeasurementResult from MLRsearch.
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
@@ -450,7 +450,7 @@ class PLRsearch:
:param spread: The spread parameter for the fitting function.
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type mrr: float
:type spread: float
:returns: Logarithm of result weight for given function and parameters.
@@ -460,17 +460,17 @@ class PLRsearch:
trace(u"log_weight for mrr", mrr)
trace(u"spread", spread)
for result in trial_result_list:
- trace(u"for tr", result.target_tr)
+ trace(u"for tr", result.intended_load)
trace(u"lc", result.loss_count)
- trace(u"d", result.duration)
- # _rel_ values use units of target_tr (transactions per second).
+ trace(u"d", result.intended_duration)
+ # _rel_ values use units of intended_load (transactions per second).
log_avg_rel_loss_per_second = lfit_func(
- trace, result.target_tr, mrr, spread
+ trace, result.intended_load, mrr, spread
)
# _abs_ values use units of loss count (maybe packets).
# There can be multiple packets per transaction.
log_avg_abs_loss_per_trial = log_avg_rel_loss_per_second + math.log(
- result.transmit_count / result.target_tr
+ result.offered_count / result.intended_load
)
# Geometric probability computation for logarithms.
log_trial_likelihood = log_plus(0.0, -log_avg_abs_loss_per_trial)
@@ -524,7 +524,7 @@ class PLRsearch:
:param max_samples: Limit for integrator samples, for debugging.
:type trial_duration: float
:type transmit_rate: float
- :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.MeasurementResult
:type min_rate: float
:type max_rate: float
:type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker
@@ -705,7 +705,7 @@ class PLRsearch:
:param measurement: The trial measurement obtained during computation.
:param stretch_result: Computation output for stretch fitting function.
:param erf_result: Computation output for erf fitting function.
- :type measurement: ReceiveRateMeasurement
+ :type measurement: MeasurementResult
:type stretch_result: _PartialResult
:type erf_result: _PartialResult
:returns: Combined results.
@@ -754,7 +754,7 @@ _ComputeResult = namedtuple(
:param stretch_exp_avg: Stretch fitting function estimate average exponentiated.
:param erf_exp_avg: Erf fitting function estimate average, exponentiated.
:param trackers: Pair of focus trackers to start next iteration with.
-:type measurement: ReceiveRateMeasurement
+:type measurement: MeasurementResult
:type avg: float
:type stdev: float
:type stretch_exp_avg: float
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py
index e036448f77..7d6fdead64 100644
--- a/resources/libraries/python/TrafficGenerator.py
+++ b/resources/libraries/python/TrafficGenerator.py
@@ -16,14 +16,17 @@
import math
import time
+from typing import Callable, List, Optional, Union
+
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from .Constants import Constants
from .DropRateSearch import DropRateSearch
-from .MLRsearch.AbstractMeasurer import AbstractMeasurer
-from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
-from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
+from .MLRsearch import (
+ AbstractMeasurer, Config, MeasurementResult,
+ MultipleLossRatioSearch, SearchGoal, TrimmedStat,
+)
from .PLRsearch.PLRsearch import PLRsearch
from .OptionString import OptionString
from .ssh import exec_cmd_no_error, exec_cmd
@@ -513,7 +516,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Stop all traffic on TG.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -533,7 +536,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Compute duration for profile driver.
The final result is influenced by transaction scale and duration limit.
- It is assumed a higher level function has already set those to self.
+ It is assumed a higher level function has already set those on self.
The duration argument is the target value from search point of view,
before the overrides are applied here.
@@ -853,7 +856,7 @@ class TrafficGenerator(AbstractMeasurer):
:type state_timeout: float
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
self.set_rate_provider_defaults(
@@ -900,7 +903,7 @@ class TrafficGenerator(AbstractMeasurer):
:type rate: float
:type async_call: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
subtype = check_subtype(self._node)
@@ -951,7 +954,7 @@ class TrafficGenerator(AbstractMeasurer):
:type async_call: bool
:type ramp_up_only: bool
:returns: TG results.
- :rtype: ReceiveRateMeasurement or None
+ :rtype: MeasurementResult or None
:raises ValueError: If TG traffic profile is not supported.
"""
complete = False
@@ -1006,7 +1009,7 @@ class TrafficGenerator(AbstractMeasurer):
trial_end = time.monotonic()
if self.ramp_up_rate:
# Optimization: No loss acts as a good ramp-up, if it was complete.
- if complete and result is not None and result.loss_count == 0:
+ if complete and result is not None and result.loss_ratio == 0.0:
logger.debug(u"Good trial acts as a ramp-up")
self.ramp_up_start = trial_start
self.ramp_up_stop = trial_end
@@ -1178,19 +1181,20 @@ class TrafficGenerator(AbstractMeasurer):
int(self._result.get(u"server_tcp_rx_bytes", 0))
def _get_measurement_result(self):
- """Return the result of last measurement as ReceiveRateMeasurement.
+ """Return the result of last measurement as MeasurementResult.
Separate function, as measurements can end either by time
or by explicit call, this is the common block at the end.
- The target_tr field of ReceiveRateMeasurement is in
+ The intended_load field of MeasurementResult is in
transactions per second. Transmit count and loss count units
depend on the transaction type. Usually they are in transactions
per second, or aggregated packets per second.
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
"""
+ duration_with_overheads = time.monotonic() - self._start_time
try:
# Client duration seems to include a setup period
# where TRex does not send any packets yet.
@@ -1230,7 +1234,7 @@ class TrafficGenerator(AbstractMeasurer):
expected_attempt_count = max(expected_attempt_count, self._sent)
unsent = expected_attempt_count - self._sent
pass_count = self._received
- fail_count = expected_attempt_count - pass_count
+ loss_count = self._loss
elif self.transaction_type == u"udp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_cps.")
@@ -1239,7 +1243,7 @@ class TrafficGenerator(AbstractMeasurer):
expected_attempt_count = self.transaction_scale
unsent = expected_attempt_count - partial_attempt_count
pass_count = self._l7_data[u"client"][u"received"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"tcp_cps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_cps.")
@@ -1252,14 +1256,14 @@ class TrafficGenerator(AbstractMeasurer):
# but we are testing NAT session so client/connects counts that
# (half connections from TCP point of view).
pass_count = self._l7_data[u"client"][u"tcp"][u"connects"]
- fail_count = expected_attempt_count - pass_count
+ loss_count = partial_attempt_count - pass_count
elif self.transaction_type == u"udp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit udp_pps.")
partial_attempt_count = self._sent
expected_attempt_count = self.transaction_scale * self.ppta
unsent = expected_attempt_count - self._sent
- fail_count = self._loss + unsent
+ loss_count = self._loss
elif self.transaction_type == u"tcp_pps":
if not self.transaction_scale:
raise RuntimeError(u"Add support for no-limit tcp_pps.")
@@ -1273,29 +1277,30 @@ class TrafficGenerator(AbstractMeasurer):
# Probability of retransmissions exactly cancelling
# packets unsent due to duration stretching is quite low.
unsent = abs(expected_attempt_count - self._sent)
- fail_count = self._loss + unsent
+ loss_count = self._loss
else:
raise RuntimeError(f"Unknown parsing {self.transaction_type!r}")
if unsent and isinstance(self._approximated_duration, float):
# Do not report unsent for "manual".
logger.debug(f"Unsent packets/transactions: {unsent}")
- if fail_count < 0 and not self.negative_loss:
- fail_count = 0
- measurement = ReceiveRateMeasurement(
- duration=target_duration,
- target_tr=transmit_rate,
- transmit_count=expected_attempt_count,
- loss_count=fail_count,
- approximated_duration=approximated_duration,
- partial_transmit_count=partial_attempt_count,
+ if loss_count < 0 and not self.negative_loss:
+ loss_count = 0
+ measurement = MeasurementResult(
+ intended_duration=target_duration,
+ intended_load=transmit_rate,
+ offered_count=partial_attempt_count,
+ loss_count=loss_count,
+ offered_duration=approximated_duration,
+ duration_with_overheads=duration_with_overheads,
+ intended_count=expected_attempt_count,
)
measurement.latency = self.get_latency_int()
return measurement
- def measure(self, duration, transmit_rate):
+ def measure(self, intended_duration, intended_load):
"""Run trial measurement, parse and return results.
- The input rate is for transactions. Stateles bidirectional traffic
+ The intended load is for transactions. Stateles bidirectional traffic
is understood as sequence of (asynchronous) transactions,
two packets each.
@@ -1303,33 +1308,32 @@ class TrafficGenerator(AbstractMeasurer):
the count either transactions or packets (aggregated over directions).
Optionally, this method sleeps if measurement finished before
- the time specified as duration.
+ the time specified as intended_duration (PLRsearch needs time for math).
- :param duration: Trial duration [s].
- :param transmit_rate: Target rate in transactions per second.
- :type duration: float
- :type transmit_rate: float
+ :param intended_duration: Trial duration [s].
+ :param intended_load: Target rate in transactions per second.
+ :type intended_duration: float
+ :type intended_load: float
:returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement
+ :rtype: MeasurementResult
:raises RuntimeError: If TG is not set or if node is not TG
or if subtype is not specified.
:raises NotImplementedError: If TG is not supported.
"""
- duration = float(duration)
+ intended_duration = float(intended_duration)
time_start = time.monotonic()
- time_stop = time_start + duration
+ time_stop = time_start + intended_duration
if self.resetter:
self.resetter()
result = self._send_traffic_on_tg_with_ramp_up(
- duration=duration,
- rate=transmit_rate,
+ duration=intended_duration,
+ rate=intended_load,
async_call=False,
)
logger.debug(f"trial measurement result: {result!r}")
# In PLRsearch, computation needs the specified time to complete.
if self.sleep_till_duration:
- sleeptime = time_stop - time.monotonic()
- if sleeptime > 0.0:
+ while (sleeptime := time_stop - time.monotonic()) > 0.0:
time.sleep(sleeptime)
return result
@@ -1399,7 +1403,7 @@ class TrafficGenerator(AbstractMeasurer):
self.frame_size = frame_size
self.traffic_profile = str(traffic_profile)
self.resetter = resetter
- self.ppta = ppta
+ self.ppta = int(ppta)
self.traffic_directions = int(traffic_directions)
self.transaction_duration = float(transaction_duration)
self.transaction_scale = int(transaction_scale)
@@ -1421,29 +1425,30 @@ class OptimizedSearch:
"""
@staticmethod
- def perform_optimized_ndrpdr_search(
- frame_size,
- traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
- packet_loss_ratio=0.005,
- final_relative_width=0.005,
- final_trial_duration=30.0,
- initial_trial_duration=1.0,
- number_of_intermediate_phases=2,
- timeout=1200.0,
- ppta=1,
- resetter=None,
- traffic_directions=2,
- transaction_duration=0.0,
- transaction_scale=0,
- transaction_type=u"packet",
- use_latency=False,
- ramp_up_rate=None,
- ramp_up_duration=None,
- state_timeout=240.0,
- expansion_coefficient=4.0,
- ):
+ def perform_mlr_search(
+ frame_size: Union[int, str],
+ traffic_profile: str,
+ min_load: float,
+ max_load: float,
+ loss_ratio: float = 0.005,
+ relative_width: float = 0.005,
+ initial_trial_duration: float = 1.0,
+ final_trial_duration: float = 1.0,
+ duration_sum: float = 20.0,
+ expansion_coefficient: int = 2,
+ preceding_targets: int = 2,
+ search_duration_max: float = 1200.0,
+ ppta: int = 1,
+ resetter: Optional[Callable[[], None]] = None,
+ traffic_directions: int = 2,
+ transaction_duration: float = 0.0,
+ transaction_scale: int = 0,
+ transaction_type: str = "packet",
+ use_latency: bool = False,
+ ramp_up_rate: float = 0.0,
+ ramp_up_duration: float = 0.0,
+ state_timeout: float = 240.0,
+ ) -> List[TrimmedStat]:
"""Setup initialized TG, perform optimized search, return intervals.
If transaction_scale is nonzero, all init and non-init trial durations
@@ -1455,18 +1460,20 @@ class OptimizedSearch:
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
- :param packet_loss_ratio: Ratio of packets lost, for PDR [1].
- :param final_relative_width: Final lower bound transmit rate
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
+ :param loss_ratio: Ratio of packets lost, for PDR [1].
+ :param relative_width: Final lower bound intended load
cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
:param initial_trial_duration: Trial duration for the initial phase
and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param duration_sum: Max sum of duration for deciding [s].
+ :param expansion_coefficient: In external search multiply width by this.
+ :param preceding_targets: Number of intermediate phases
to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
+ :param search_duration_max: The search will fail itself
+ when not finished before this overall time [s].
:param ppta: Packets per transaction, aggregated over directions.
Needed for udp_pps which does not have a good transaction counter,
so we need to compute expected number of packets.
@@ -1485,17 +1492,18 @@ class OptimizedSearch:
:param ramp_up_rate: Rate to use in ramp-up trials [pps].
:param ramp_up_duration: Duration of ramp-up trials [s].
:param state_timeout: Time of life of DUT state [s].
- :param expansion_coefficient: In external search multiply width by this.
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- :type packet_loss_ratio: float
- :type final_relative_width: float
- :type final_trial_duration: float
+ :type min_load: float
+ :type max_load: float
+ :type loss_ratio: float
+ :type relative_width: float
:type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
+ :type final_trial_duration: float
+ :type duration_sum: float
+ :type expansion_coefficient: int
+ :type preceding_targets: int
+ :type search_duration_max: float
:type ppta: int
:type resetter: Optional[Callable[[], None]]
:type traffic_directions: int
@@ -1506,11 +1514,11 @@ class OptimizedSearch:
:type ramp_up_rate: float
:type ramp_up_duration: float
:type state_timeout: float
- :type expansion_coefficient: float
:returns: Structure containing narrowed down NDR and PDR intervals
and their measurements.
- :rtype: List[Receiverateinterval]
- :raises RuntimeError: If total duration is larger than timeout.
+ :rtype: List[TrimmedStat]
+ :raises RuntimeError: If search duration exceeds search_duration_max
+ or if min load becomes an upper bound for any search goal.
"""
# we need instance of TrafficGenerator instantiated by Robot Framework
# to be able to use trex_stl-*()
@@ -1521,8 +1529,9 @@ class OptimizedSearch:
if transaction_scale:
initial_trial_duration = 1.0
final_trial_duration = 1.0
- number_of_intermediate_phases = 0
- timeout += transaction_scale * 3e-4
+ preceding_targets = 1
+ # TODO: Move the value to Constants.py?
+ search_duration_max += transaction_scale * 3e-4
tg_instance.set_rate_provider_defaults(
frame_size=frame_size,
traffic_profile=traffic_profile,
@@ -1538,34 +1547,43 @@ class OptimizedSearch:
ramp_up_duration=ramp_up_duration,
state_timeout=state_timeout,
)
- algorithm = MultipleLossRatioSearch(
- measurer=tg_instance,
- final_trial_duration=final_trial_duration,
- final_relative_width=final_relative_width,
- number_of_intermediate_phases=number_of_intermediate_phases,
- initial_trial_duration=initial_trial_duration,
- timeout=timeout,
- debug=logger.debug,
- expansion_coefficient=expansion_coefficient,
- )
- if packet_loss_ratio:
- packet_loss_ratios = [0.0, packet_loss_ratio]
+ if loss_ratio:
+ loss_ratios = [0.0, loss_ratio]
+ exceed_ratio = 0.5
else:
# Happens in reconf tests.
- packet_loss_ratios = [packet_loss_ratio]
- results = algorithm.narrow_down_intervals(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
- packet_loss_ratios=packet_loss_ratios,
- )
- return results
+ loss_ratios = [0.0]
+ exceed_ratio = 0.0
+ goals = [
+ SearchGoal(
+ loss_ratio=loss_ratio,
+ exceed_ratio=exceed_ratio,
+ relative_width=relative_width,
+ initial_trial_duration=initial_trial_duration,
+ final_trial_duration=final_trial_duration,
+ duration_sum=duration_sum,
+ preceding_targets=preceding_targets,
+ expansion_coefficient=expansion_coefficient,
+ fail_fast=True,
+ )
+ for loss_ratio in loss_ratios
+ ]
+ config = Config()
+ config.goals = goals
+ config.min_load = min_load
+ config.max_load = max_load
+ config.search_duration_max = search_duration_max
+ config.warmup_duration = 1.0
+ algorithm = MultipleLossRatioSearch(config)
+ results = algorithm.search(measurer=tg_instance, debug=logger.debug)
+ return [results[goal] for goal in goals]
@staticmethod
def perform_soak_search(
frame_size,
traffic_profile,
- minimum_transmit_rate,
- maximum_transmit_rate,
+ min_load,
+ max_load,
plr_target=1e-7,
tdpt=0.1,
initial_count=50,
@@ -1587,8 +1605,8 @@ class OptimizedSearch:
:param frame_size: Frame size identifier or value [B].
:param traffic_profile: Module name as a traffic profile identifier.
See GPL/traffic_profiles/trex for implemented modules.
- :param minimum_transmit_rate: Minimal load in transactions per second.
- :param maximum_transmit_rate: Maximal load in transactions per second.
+ :param min_load: Minimal load in transactions per second.
+ :param max_load: Maximal load in transactions per second.
:param plr_target: Ratio of packets lost to achieve [1].
:param tdpt: Trial duration per trial.
The algorithm linearly increases trial duration with trial number,
@@ -1622,8 +1640,8 @@ class OptimizedSearch:
:param state_timeout: Time of life of DUT state [s].
:type frame_size: str or int
:type traffic_profile: str
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
+ :type min_load: float
+ :type max_load: float
:type plr_target: float
:type initial_count: int
:type timeout: float
@@ -1672,7 +1690,7 @@ class OptimizedSearch:
trace_enabled=trace_enabled,
)
result = algorithm.search(
- min_rate=minimum_transmit_rate,
- max_rate=maximum_transmit_rate,
+ min_rate=min_load,
+ max_rate=max_load,
)
return result
diff --git a/resources/libraries/robot/performance/performance_display.robot b/resources/libraries/robot/performance/performance_display.robot
index d230bcb35a..791a45db62 100644
--- a/resources/libraries/robot/performance/performance_display.robot
+++ b/resources/libraries/robot/performance/performance_display.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2022 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,35 +17,6 @@
| ... | This includes checks to fail test.
*** Keywords ***
-| Check NDRPDR interval validity
-| | [Documentation]
-| | ... | Extract loss ratio of lower bound of the interval.
-| | ... | Fail if it does not reach the allowed value.
-| |
-| | ... | *Arguments:*
-| | ... | - interval - Measured interval. Type: ReceiveRateInterval
-| | ... | - packet_loss_ratio - Accepted loss (0.0 for NDR). Type: float
-| |
-| | ... | *Example:*
-| |
-| | ... | \| Check NDRPDR interval validity \| \${result.pdr_interval} \
-| | ... | \| \${0.005} \|
-| |
-| | [Arguments] | ${interval} | ${packet_loss_ratio}=${0.0}
-| |
-| | ${lower_bound} = | Set Variable | ${interval.measured_low}
-| | ${lower_bound_lr} = | Set Variable | ${lower_bound.loss_ratio}
-| | Return From Keyword If | ${lower_bound_lr} <= ${packet_loss_ratio}
-| | Set Test Variable | \${rate_for_teardown} | ${lower_bound.target_tr}
-| | ${message}= | Catenate | SEPARATOR=${SPACE}
-| | ... | Minimal rate loss ratio ${lower_bound_lr}
-| | ... | does not reach target ${packet_loss_ratio}.
-| | ${message_zero} = | Set Variable | Zero packets forwarded!
-| | ${message_other} = | Set Variable | ${lower_bound.loss_count} packets lost.
-| | ${message} = | Set Variable If | ${lower_bound_lr} >= 1.0
-| | ... | ${message}${\n}${message_zero} | ${message}${\n}${message_other}
-| | Fail | ${message}
-
| Compute Bandwidth
| | [Documentation]
| | ... | Compute (bidir) bandwidth from given (unidir) transaction rate.
@@ -77,8 +48,8 @@
| | ... | due to reconfiguration under traffic.
| |
| | ... | *Arguments:*
-| | ... | - result - Result of bidirectional measurement.
-| | ... | Type: ReceiveRateMeasurement
+| | ... | - result - Result of MLRsearch invocation for one search goal.
+| | ... | Type: StatInterval
| |
| | ... | *Example:*
| |
@@ -86,7 +57,7 @@
| |
| | [Arguments] | ${result}
| |
-| | ${bandwidth} | ${packet_rate}= | Compute Bandwidth | ${result.target_tr}
+| | ${bandwidth} | ${packet_rate}= | Compute Bandwidth | ${result.intended_load}
| | ${packet_loss} = | Set Variable | ${result.loss_count}
| | ${time_loss} = | Evaluate | ${packet_loss} / ${packet_rate}
| | Set Test Message | Packets lost due to reconfig: ${packet_loss}
@@ -95,14 +66,11 @@
| Display result of NDRPDR search
| | [Documentation]
-| | ... | Display result of NDR+PDR search, both quantities, both bounds,
-| | ... | aggregated, in units given by trasaction type, e.g. by default
-| | ... | in packet per seconds and Gbps total bandwidth
+| | ... | Display result of NDR+PDR search, both quantities, aggregated,
+| | ... | conditional throughput only, in units given by trasaction type,
+| | ... | e.g. by default in packet per seconds and Gbps total bandwidth
| | ... | (for initial packet size).
-| | ... |
-| | ... | The bound to display is encoded as target rate, it is assumed
-| | ... | it is in transactions per second. Bidirectional traffic
-| | ... | transaction is understood as having 2 packets, for this purpose.
+| | ... | The lower bounds in the result are assumed to be valid.
| | ... |
| | ... | Througput is calculated as:
| | ... | Sum of measured rate over streams
@@ -115,8 +83,8 @@
| | ... | - transaction_type - String identifier to determine how to count
| | ... | transactions. Default is "packet".
| | ... | *Arguments:*
-| | ... | - result - Measured result data. Aggregated rate, tps or pps.
-| | ... | Type: NdrPdrResult
+| | ... | - result - Measured result data Tps. Type: List[TrimmedStat]
+| | ... | *Returns:* NDR and PDR: Unidirectional intended load as tps float.
| |
| | ... | *Example:*
| |
@@ -124,21 +92,18 @@
| |
| | [Arguments] | ${result}
| |
-| | Display single bound | NDR_LOWER
-| | ... | ${result[0].measured_low.target_tr}
-| | ... | ${result[0].measured_low.latency}
-| | Display single bound | NDR_UPPER
-| | ... | ${result[0].measured_high.target_tr}
-| | Display single bound | PDR_LOWER
-| | ... | ${result[1].measured_low.target_tr}
-| | ... | ${result[1].measured_low.latency}
-| | Display single bound | PDR_UPPER
-| | ... | ${result[1].measured_high.target_tr}
+| | ${ndr} = | Convert To Number | ${result[0]}
+| | ${pdr} = | Convert To Number | ${result[1]}
+| | Display single bound | NDR | ${result[0].conditional_throughput}
+| | Display single bound | PDR | ${result[1].conditional_throughput}
+| | Return From Keyword | ${ndr} | ${pdr}
| Display result of soak search
| | [Documentation]
| | ... | Display result of soak search, avg+-stdev, as upper/lower bounds.
| | ... | See Display single bound for units used.
+| | ... | The displayed values are bidirectional, based on conditional
+| | ... | throughput. The returned
| |
| | ... | *Test (or broader scope) variables read:*
| | ... | - frame_size - L2 Frame Size [B] or IMIX string. Type: integer or
@@ -148,7 +113,6 @@
| | ... | *Arguments:*
| | ... | - avg - Estimated average critical load [pps]. Type: float
| | ... | - stdev - Standard deviation of critical load [pps]. Type: float
-| |
| | ... | *Returns:*
| | ... | - Lower and upper bound of critical load [pps]. Type: 2-tuple of float
| |
@@ -169,14 +133,14 @@
| Display single bound
| | [Documentation]
| | ... | Compute and display one bound of NDR+PDR (or soak) search result.
-| | ... | If the latency string is present, it is displayed as well.
| | ... |
| | ... | The bound to display is given as target transfer rate, it is assumed
-| | ... | it is in transactions per second. Bidirectional traffic
+| | ... | valid and in transactions per second. Bidirectional traffic
| | ... | transaction is understood as having 2 packets, for this purpose.
| | ... |
| | ... | Pps values are aggregated, in packet per seconds
| | ... | and Gbps total bandwidth (for initial packet size).
+| | ... | If the latency string is present, it is displayed as well.
| | ... |
| | ... | Througput is calculated as:
| | ... | Sum of measured rate over streams
@@ -189,20 +153,22 @@
| | ... | transactions. Default is "packet".
| | ... | *Arguments:*
| | ... | - text - Flavor text describing which bound is this. Type: string
-| | ... | - tps - Transaction rate [tps]. Type: float
+| | ... | - tps - Conditional throughput [tps]. Type: Union[float, DiscreteLoad]
| | ... | - latency - Latency data to display if non-empty. Type: string
| |
| | ... | *Example:*
| |
-| | ... | \| Display single bound \| NDR lower bound \| \${12345.67} \
+| | ... | \| Display single bound \| NDR \| \${12345.67} \
| | ... | \| latency=\${EMPTY} \|
| |
| | [Arguments] | ${text} | ${tps} | ${latency}=${EMPTY}
| |
+| | ${tps} = | Convert To Number | ${tps}
| | ${transaction_type} = | Get Transaction Type
| | Run Keyword And Return If | """_cps""" in """${transaction_type}"""
| | ... | Display Single CPS Bound | ${text} | ${tps} | ${latency}
-| | Display Single PPS Bound | ${text} | ${tps} | ${latency}
+| | Run Keyword And Return
+| | ... | Display Single PPS Bound | ${text} | ${tps} | ${latency}
| Display Single CPS Bound
| | [Documentation]
@@ -225,9 +191,8 @@
| | Set Test Message | ${\n}${text}: ${tps} CPS | append=yes
| | ${bandwidth} | ${pps} = | Compute Bandwidth | ${tps}
| | Export Search Bound | ${text} | ${tps} | cps | ${bandwidth * 1e9}
-| | Return From Keyword If | not """${latency}"""
-| | Set Test Message | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency}
-| | ... | append=yes
+| | Run Keyword If | """${latency}""" | Set Test Message
+| | ... | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency} | append=yes
| Display Single PPS Bound
| | [Documentation]
@@ -261,6 +226,5 @@
| | Set Test Message | ${\n}${text}: ${pps} pps, | append=yes
| | Set Test Message | ${bandwidth} Gbps (initial) | append=yes
| | Export Search Bound | ${text} | ${pps} | pps | ${bandwidth * 1e9}
-| | Return From Keyword If | not """${latency}"""
-| | Set Test Message | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency}
-| | ... | append=yes
+| | Run Keyword If | """${latency}""" | Set Test Message
+| | ... | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency} | append=yes
diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot
index 78a1fcc0f9..9392bc0e8c 100644
--- a/resources/libraries/robot/performance/performance_utils.robot
+++ b/resources/libraries/robot/performance/performance_utils.robot
@@ -93,8 +93,8 @@
| | ${average} | ${stdev} = | Perform soak search
| | ... | frame_size=${frame_size}
| | ... | traffic_profile=${traffic_profile}
-| | ... | minimum_transmit_rate=${min_rate_soft}
-| | ... | maximum_transmit_rate=${max_rate}
+| | ... | min_load=${min_rate_soft}
+| | ... | max_load=${max_rate}
| | ... | plr_target=${1e-7}
| | ... | tdpt=${0.1}
| | ... | initial_count=${50}
@@ -142,7 +142,7 @@
| | ... | - traffic_profile - Name of module defining traffc for measurements.
| | ... | Type: string
| | ... | - frame_size - L2 Frame Size [B] or IMIX string. Type: integer or
-| | ... | string
+| | ... | string.
| | ... | - max_rate - Calculated maximal unidirectional transmit rate [tps].
| | ... | Type: float
| | ... | - resetter - Callable to reset DUT state before each trial.
@@ -160,8 +160,7 @@
| | ${disable_latency} = | Get Disable Latency
| | ${max_rate} = | Get Max Rate
| | ${min_rate_soft} = | Get Min Rate Soft
-| | # \${packet_loss_ratio} is used twice so it is worth a variable.
-| | ${packet_loss_ratio} = | Get Packet Loss Ratio
+| | ${loss_ratio} = | Get Packet Loss Ratio
| | ${ppta} = | Get Packets Per Transaction Aggregated
| | ${ramp_up_duration} = | Get Ramp Up Duration
| | ${ramp_up_rate} = | Get Ramp Up Rate
@@ -171,17 +170,18 @@
| | ${transaction_scale} = | Get Transaction Scale
| | ${transaction_type} = | Get Transaction Type
| | ${use_latency} = | Get Use Latency
-| | ${result} = | Perform optimized ndrpdr search
+| | ${result} = | Perform MLR Search
| | ... | frame_size=${frame_size}
| | ... | traffic_profile=${traffic_profile}
-| | ... | minimum_transmit_rate=${min_rate_soft}
-| | ... | maximum_transmit_rate=${max_rate}
-| | ... | packet_loss_ratio=${packet_loss_ratio}
-| | ... | final_relative_width=${0.005}
-| | ... | final_trial_duration=${30.0}
+| | ... | min_load=${min_rate_soft}
+| | ... | max_load=${max_rate}
+| | ... | loss_ratio=${loss_ratio}
+| | ... | relative_width=${0.005}
| | ... | initial_trial_duration=${1.0}
-| | ... | number_of_intermediate_phases=${2}
-| | ... | timeout=${1200.0}
+| | ... | final_trial_duration=${1.0}
+| | ... | duration_sum=${20.0}
+| | ... | preceding_targets=${2}
+| | ... | search_duration_max=${1200.0}
| | ... | ppta=${ppta}
| | ... | resetter=${resetter}
| | ... | traffic_directions=${traffic_directions}
@@ -191,13 +191,8 @@
| | ... | use_latency=${use_latency}
| | ... | ramp_up_duration=${ramp_up_duration}
| | ... | ramp_up_rate=${ramp_up_rate}
-| | Display result of NDRPDR search | ${result}
-| | Check NDRPDR interval validity | ${result[1]}
-| | ... | ${packet_loss_ratio}
-| | Check NDRPDR interval validity | ${result[0]}
-| | ${pdr} = | Set Variable | ${result[1].measured_low.target_tr}
-| | ${ndr} = | Set Variable | ${result[0].measured_low.target_tr}
-| | # We expect NDR and PDR to have different-looking stats.
+| | ${ndr} | ${pdr} = | Display result of NDRPDR search | ${result}
+| | # We expect NDR and PDR to have different-looking telemetry.
| | Set Test Variable | ${telemetry_rate} | pdr
| | Set Test Variable | ${telemetry_export} | ${True}
| | Send traffic at specified rate
@@ -262,17 +257,18 @@
| | ${transaction_scale} = | Get Transaction Scale
| | ${transaction_type} = | Get Transaction Type
| | ${use_latency} = | Get Use Latency
-| | ${result} = | Perform optimized ndrpdr search
+| | ${result} = | Perform MLR Search
| | ... | frame_size=${frame_size}
| | ... | traffic_profile=${traffic_profile}
-| | ... | minimum_transmit_rate=${min_rate_soft}
-| | ... | maximum_transmit_rate=${max_rate}
-| | ... | packet_loss_ratio=${0.0}
-| | ... | final_relative_width=${0.001}
-| | ... | final_trial_duration=${10.0}
+| | ... | min_load=${min_rate_soft}
+| | ... | max_load=${max_rate}
+| | ... | loss_ratio=${0.0}
+| | ... | relative_width=${0.001}
| | ... | initial_trial_duration=${1.0}
-| | ... | number_of_intermediate_phases=${1}
-| | ... | timeout=${1200}
+| | ... | final_trial_duration=${1.0}
+| | ... | duration_sum=${10.0}
+| | ... | preceding_targets=${1}
+| | ... | search_duration_max=${1200}
| | ... | ppta=${ppta}
| | ... | resetter=${resetter}
| | ... | traffic_directions=${traffic_directions}
@@ -282,8 +278,8 @@
| | ... | use_latency=${use_latency}
| | ... | ramp_up_duration=${ramp_up_duration}
| | ... | ramp_up_rate=${ramp_up_rate}
-| | Check NDRPDR interval validity | ${result[0]}
-| | Return From Keyword | ${result[0].measured_low.target_tr}
+| | ${ret} = | Convert To Number | ${result[0]}
+| | Return From Keyword | ${ret}
| Measure and show latency at specified rate
| | [Documentation]
@@ -450,7 +446,8 @@
| | | ... | ramp_up_rate=${ramp_up_rate}
| | | # Out of several quantities for aborted traffic (duration stretching),
| | | # the approximated receive rate is the best estimate we have.
-| | | ${value} = | Set Variable | ${result.approximated_receive_rate}
+| | | ${value} = | Set Variable | ${result.forwarding_count}
+| | | ${value} = | Evaluate | ${value} / ${result.offered_duration}
| | | ${bandwidth} | ${pps} = | Compute Bandwidth | ${value} / ${ppta}
| | | Append Mrr Value | ${value} | ${export_mrr_unit} | ${bandwidth * 1e9}
| | | Append To List | ${results} | ${value}