aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/MLRsearch
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python/MLRsearch')
-rw-r--r--resources/libraries/python/MLRsearch/AbstractMeasurer.py32
-rw-r--r--resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py48
-rw-r--r--resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py647
-rw-r--r--resources/libraries/python/MLRsearch/NdrPdrResult.py65
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateInterval.py88
-rw-r--r--resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py54
-rw-r--r--resources/libraries/python/MLRsearch/__init__.py16
-rw-r--r--resources/libraries/python/MLRsearch/candidate.py153
-rw-r--r--resources/libraries/python/MLRsearch/config.py179
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/dc_property.py173
-rw-r--r--resources/libraries/python/MLRsearch/dataclass/field.py44
-rw-r--r--resources/libraries/python/MLRsearch/discrete_interval.py140
-rw-r--r--resources/libraries/python/MLRsearch/discrete_load.py316
-rw-r--r--resources/libraries/python/MLRsearch/discrete_result.py76
-rw-r--r--resources/libraries/python/MLRsearch/discrete_width.py197
-rw-r--r--resources/libraries/python/MLRsearch/expander.py102
-rw-r--r--resources/libraries/python/MLRsearch/global_width.py70
-rw-r--r--resources/libraries/python/MLRsearch/goal_result.py72
-rw-r--r--resources/libraries/python/MLRsearch/limit_handler.py198
-rw-r--r--resources/libraries/python/MLRsearch/load_rounding.py205
-rw-r--r--resources/libraries/python/MLRsearch/load_stats.py112
-rw-r--r--resources/libraries/python/MLRsearch/measurement_database.py126
-rw-r--r--resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py325
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/__init__.py24
-rw-r--r--resources/libraries/python/MLRsearch/pep3140/classes.py34
-rw-r--r--resources/libraries/python/MLRsearch/relevant_bounds.py56
-rw-r--r--resources/libraries/python/MLRsearch/search_goal.py119
-rw-r--r--resources/libraries/python/MLRsearch/search_goal_tuple.py60
-rw-r--r--resources/libraries/python/MLRsearch/selector.py183
-rw-r--r--resources/libraries/python/MLRsearch/strategy/__init__.py35
-rw-r--r--resources/libraries/python/MLRsearch/strategy/base.py132
-rw-r--r--resources/libraries/python/MLRsearch/strategy/bisect.py193
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_hi.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/extend_lo.py76
-rw-r--r--resources/libraries/python/MLRsearch/strategy/halve.py83
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_hi.py55
-rw-r--r--resources/libraries/python/MLRsearch/strategy/refine_lo.py53
-rw-r--r--resources/libraries/python/MLRsearch/target_scaling.py103
-rw-r--r--resources/libraries/python/MLRsearch/target_spec.py95
-rw-r--r--resources/libraries/python/MLRsearch/target_stat.py153
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/__init__.py19
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py55
-rw-r--r--resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py161
-rw-r--r--resources/libraries/python/MLRsearch/trimmed_stat.py52
45 files changed, 4339 insertions, 935 deletions
diff --git a/resources/libraries/python/MLRsearch/AbstractMeasurer.py b/resources/libraries/python/MLRsearch/AbstractMeasurer.py
deleted file mode 100644
index 622b8fdba6..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractMeasurer.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractMeasurer class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractMeasurer(metaclass=ABCMeta):
- """Abstract class defining common API for measurement providers."""
-
- @abstractmethod
- def measure(self, duration, transmit_rate):
- """Perform trial measurement and return the result.
-
- :param duration: Trial duration [s].
- :param transmit_rate: Target transmit rate [pps].
- :type duration: float
- :type transmit_rate: float
- :returns: Structure containing the result of the measurement.
- :rtype: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
diff --git a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py b/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
deleted file mode 100644
index f4f2d3f096..0000000000
--- a/resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining AbstractSearchAlgorithm class."""
-
-from abc import ABCMeta, abstractmethod
-
-
-class AbstractSearchAlgorithm(metaclass=ABCMeta):
- """Abstract class defining common API for search algorithms."""
-
- def __init__(self, measurer):
- """Store the rate provider.
-
- :param measurer: Object able to perform trial or composite measurements.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- """
- # TODO: Type check for AbstractMeasurer?
- self.measurer = measurer
-
- @abstractmethod
- def narrow_down_ndr_and_pdr(
- self, fail_rate, line_rate, packet_loss_ratio):
- """Perform measurements to narrow down intervals, return them.
-
- This will be renamed when custom loss ratio lists are supported.
-
- :param fail_rate: Minimal target transmit rate [pps].
- :param line_rate: Maximal target transmit rate [pps].
- :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
- :type fail_rate: float
- :type line_rate: float
- :type packet_loss_ratio: float
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: NdrPdrResult.NdrPdrResult
- """
- # TODO: Do we agree on arguments related to precision or trial duration?
diff --git a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py b/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
deleted file mode 100644
index 2db65ef755..0000000000
--- a/resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py
+++ /dev/null
@@ -1,647 +0,0 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining MultipleLossRatioSearch class."""
-
-import logging
-import math
-import time
-
-from .AbstractSearchAlgorithm import AbstractSearchAlgorithm
-from .NdrPdrResult import NdrPdrResult
-from .ReceiveRateInterval import ReceiveRateInterval
-
-
-class MultipleLossRatioSearch(AbstractSearchAlgorithm):
- """Optimized binary search algorithm for finding NDR and PDR bounds.
-
- Traditional binary search algorithm needs initial interval
- (lower and upper bound), and returns final interval after bisecting
- (until some exit condition is met).
- The exit condition is usually related to the interval width,
- (upper bound value minus lower bound value).
-
- The optimized algorithm contains several improvements
- aimed to reduce overall search time.
-
- One improvement is searching for two intervals at once.
- The intervals are for NDR (No Drop Rate) and PDR (Partial Drop Rate).
-
- Next improvement is that the initial interval does not need to be valid.
- Imagine initial interval (10, 11) where 11 is smaller
- than the searched value.
- The algorithm will try (11, 13) interval next, and if 13 is still smaller,
- (13, 17) and so on, doubling width until the upper bound is valid.
- The part when interval expands is called external search,
- the part when interval is bisected is called internal search.
-
- Next improvement is that trial measurements at small trial duration
- can be used to find a reasonable interval for full trial duration search.
- This results in more trials performed, but smaller overall duration
- in general.
-
- Next improvement is bisecting in logarithmic quantities,
- so that exit criteria can be independent of measurement units.
-
- Next improvement is basing the initial interval on receive rates.
-
- Final improvement is exiting early if the minimal value
- is not a valid lower bound.
-
- The complete search consist of several phases,
- each phase performing several trial measurements.
- Initial phase creates initial interval based on receive rates
- at maximum rate and at maximum receive rate (MRR).
- Final phase and preceding intermediate phases are performing
- external and internal search steps,
- each resulting interval is the starting point for the next phase.
- The resulting interval of final phase is the result of the whole algorithm.
-
- Each non-initial phase uses its own trial duration and width goal.
- Any non-initial phase stops searching (for NDR or PDR independently)
- when minimum is not a valid lower bound (at current duration),
- or all of the following is true:
- Both bounds are valid, bound bounds are measured at the current phase
- trial duration, interval width is less than the width goal
- for current phase.
-
- TODO: Review and update this docstring according to rst docs.
- TODO: Support configurable number of Packet Loss Ratios.
- """
-
- class ProgressState:
- """Structure containing data to be passed around in recursion."""
-
- def __init__(
- self, result, phases, duration, width_goal, packet_loss_ratio,
- minimum_transmit_rate, maximum_transmit_rate):
- """Convert and store the argument values.
-
- :param result: Current measured NDR and PDR intervals.
- :param phases: How many intermediate phases to perform
- before the current one.
- :param duration: Trial duration to use in the current phase [s].
- :param width_goal: The goal relative width for the curreent phase.
- :param packet_loss_ratio: PDR fraction for the current search.
- :param minimum_transmit_rate: Minimum target transmit rate
- for the current search [pps].
- :param maximum_transmit_rate: Maximum target transmit rate
- for the current search [pps].
- :type result: NdrPdrResult.NdrPdrResult
- :type phases: int
- :type duration: float
- :type width_goal: float
- :type packet_loss_ratio: float
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- """
- self.result = result
- self.phases = int(phases)
- self.duration = float(duration)
- self.width_goal = float(width_goal)
- self.packet_loss_ratio = float(packet_loss_ratio)
- self.minimum_transmit_rate = float(minimum_transmit_rate)
- self.maximum_transmit_rate = float(maximum_transmit_rate)
-
- def __init__(
- self, measurer, final_relative_width=0.005,
- final_trial_duration=30.0, initial_trial_duration=1.0,
- number_of_intermediate_phases=2, timeout=600.0, doublings=1):
- """Store the measurer object and additional arguments.
-
- :param measurer: Rate provider to use by this search object.
- :param final_relative_width: Final lower bound transmit rate
- cannot be more distant that this multiple of upper bound [1].
- :param final_trial_duration: Trial duration for the final phase [s].
- :param initial_trial_duration: Trial duration for the initial phase
- and also for the first intermediate phase [s].
- :param number_of_intermediate_phases: Number of intermediate phases
- to perform before the final phase [1].
- :param timeout: The search will fail itself when not finished
- before this overall time [s].
- :param doublings: How many doublings to do in external search step.
- Default 1 is suitable for fairly stable tests,
- less stable tests might get better overal duration with 2 or more.
- :type measurer: AbstractMeasurer.AbstractMeasurer
- :type final_relative_width: float
- :type final_trial_duration: float
- :type initial_trial_duration: float
- :type number_of_intermediate_phases: int
- :type timeout: float
- :type doublings: int
- """
- super(MultipleLossRatioSearch, self).__init__(measurer)
- self.final_trial_duration = float(final_trial_duration)
- self.final_relative_width = float(final_relative_width)
- self.number_of_intermediate_phases = int(number_of_intermediate_phases)
- self.initial_trial_duration = float(initial_trial_duration)
- self.timeout = float(timeout)
- self.doublings = int(doublings)
-
- @staticmethod
- def double_relative_width(relative_width):
- """Return relative width corresponding to double logarithmic width.
-
- :param relative_width: The base relative width to double.
- :type relative_width: float
- :returns: The relative width of double logarithmic size.
- :rtype: float
- """
- return 1.999 * relative_width - relative_width * relative_width
- # The number should be 2.0, but we want to avoid rounding errors,
- # and ensure half of double is not larger than the original value.
-
- @staticmethod
- def double_step_down(relative_width, current_bound):
- """Return rate of double logarithmic width below.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound * (
- 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
- )
-
- @staticmethod
- def expand_down(relative_width, doublings, current_bound):
- """Return rate of expanded logarithmic width below.
-
- :param relative_width: The base relative width to double.
- :param doublings: How many doublings to do for expansion.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type doublings: int
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- for _ in range(doublings):
- relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width
- )
- return current_bound * (1.0 - relative_width)
-
- @staticmethod
- def double_step_up(relative_width, current_bound):
- """Return rate of double logarithmic width above.
-
- :param relative_width: The base relative width to double.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically double width [pps].
- :rtype: float
- """
- return current_bound / (
- 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
- )
-
- @staticmethod
- def expand_up(relative_width, doublings, current_bound):
- """Return rate of expanded logarithmic width above.
-
- :param relative_width: The base relative width to double.
- :param doublings: How many doublings to do for expansion.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type doublings: int
- :type current_bound: float
- :returns: Transmit rate smaller by logarithmically double width [pps].
- :rtype: float
- """
- for _ in range(doublings):
- relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width
- )
- return current_bound / (1.0 - relative_width)
-
- @staticmethod
- def half_relative_width(relative_width):
- """Return relative width corresponding to half logarithmic width.
-
- :param relative_width: The base relative width to halve.
- :type relative_width: float
- :returns: The relative width of half logarithmic size.
- :rtype: float
- """
- return 1.0 - math.sqrt(1.0 - relative_width)
-
- @staticmethod
- def half_step_up(relative_width, current_bound):
- """Return rate of half logarithmic width above.
-
- :param relative_width: The base relative width to halve.
- :param current_bound: The current target transmit rate to move [pps].
- :type relative_width: float
- :type current_bound: float
- :returns: Transmit rate larger by logarithmically half width [pps].
- :rtype: float
- """
- return current_bound / (
- 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width)
- )
-
- def narrow_down_ndr_and_pdr(
- self, minimum_transmit_rate, maximum_transmit_rate,
- packet_loss_ratio):
- """Perform initial phase, create state object, proceed with next phases.
-
- :param minimum_transmit_rate: Minimal target transmit rate [pps].
- :param maximum_transmit_rate: Maximal target transmit rate [pps].
- :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
- :type minimum_transmit_rate: float
- :type maximum_transmit_rate: float
- :type packet_loss_ratio: float
- :returns: Structure containing narrowed down intervals
- and their measurements.
- :rtype: NdrPdrResult.NdrPdrResult
- :raises RuntimeError: If total duration is larger than timeout.
- """
- minimum_transmit_rate = float(minimum_transmit_rate)
- maximum_transmit_rate = float(maximum_transmit_rate)
- packet_loss_ratio = float(packet_loss_ratio)
- line_measurement = self.measurer.measure(
- self.initial_trial_duration, maximum_transmit_rate)
- initial_width_goal = self.final_relative_width
- for _ in range(self.number_of_intermediate_phases):
- initial_width_goal = self.double_relative_width(initial_width_goal)
- max_lo = maximum_transmit_rate * (1.0 - initial_width_goal)
- mrr = max(
- minimum_transmit_rate, min(max_lo, line_measurement.receive_rate)
- )
- mrr_measurement = self.measurer.measure(
- self.initial_trial_duration, mrr
- )
- # Attempt to get narrower width.
- if mrr_measurement.loss_fraction > 0.0:
- max2_lo = mrr * (1.0 - initial_width_goal)
- mrr2 = min(max2_lo, mrr_measurement.receive_rate)
- else:
- mrr2 = mrr / (1.0 - initial_width_goal)
- if minimum_transmit_rate < mrr2 < maximum_transmit_rate:
- line_measurement = mrr_measurement
- mrr_measurement = self.measurer.measure(
- self.initial_trial_duration, mrr2)
- if mrr2 > mrr:
- line_measurement, mrr_measurement = \
- (mrr_measurement, line_measurement)
- starting_interval = ReceiveRateInterval(
- mrr_measurement, line_measurement)
- starting_result = NdrPdrResult(starting_interval, starting_interval)
- state = self.ProgressState(
- starting_result, self.number_of_intermediate_phases,
- self.final_trial_duration, self.final_relative_width,
- packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate
- )
- state = self.ndrpdr(state)
- return state.result
-
- def _measure_and_update_state(self, state, transmit_rate):
- """Perform trial measurement, update bounds, return new state.
-
- :param state: State before this measurement.
- :param transmit_rate: Target transmit rate for this measurement [pps].
- :type state: ProgressState
- :type transmit_rate: float
- :returns: State after the measurement.
- :rtype: ProgressState
- """
- # TODO: Implement https://stackoverflow.com/a/24683360
- # to avoid the string manipulation if log verbosity is too low.
- logging.info(f"result before update: {state.result}")
- logging.debug(
- f"relative widths in goals: "
- f"{state.result.width_in_goals(self.final_relative_width)}"
- )
- measurement = self.measurer.measure(state.duration, transmit_rate)
- ndr_interval = self._new_interval(
- state.result.ndr_interval, measurement, 0.0
- )
- pdr_interval = self._new_interval(
- state.result.pdr_interval, measurement, state.packet_loss_ratio
- )
- state.result = NdrPdrResult(ndr_interval, pdr_interval)
- return state
-
- @staticmethod
- def _new_interval(old_interval, measurement, packet_loss_ratio):
- """Return new interval with bounds updated according to the measurement.
-
- :param old_interval: The current interval before the measurement.
- :param measurement: The new meaqsurement to take into account.
- :param packet_loss_ratio: Fraction for PDR (or zero for NDR).
- :type old_interval: ReceiveRateInterval.ReceiveRateInterval
- :type measurement: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type packet_loss_ratio: float
- :returns: The updated interval.
- :rtype: ReceiveRateInterval.ReceiveRateInterval
- """
- old_lo, old_hi = old_interval.measured_low, old_interval.measured_high
- new_lo = new_hi = None
- # Priority zero: direct replace if the target Tr is the same.
- if measurement.target_tr in (old_lo.target_tr, old_hi.target_tr):
- if measurement.target_tr == old_lo.target_tr:
- new_lo = measurement
- else:
- new_hi = measurement
- # Priority one: invalid lower bound allows only one type of update.
- elif old_lo.loss_fraction > packet_loss_ratio:
- # We can only expand down, old bound becomes valid upper one.
- if measurement.target_tr < old_lo.target_tr:
- new_lo, new_hi = measurement, old_lo
- else:
- return old_interval
-
- # Lower bound is now valid.
- # Next priorities depend on target Tr.
- elif measurement.target_tr < old_lo.target_tr:
- # Lower external measurement, relevant only
- # if the new measurement has high loss rate.
- if measurement.loss_fraction > packet_loss_ratio:
- # Returning the broader interval as old_lo
- # would be invalid upper bound.
- new_lo = measurement
- elif measurement.target_tr > old_hi.target_tr:
- # Upper external measurement, only relevant for invalid upper bound.
- if old_hi.loss_fraction <= packet_loss_ratio:
- # Old upper bound becomes valid new lower bound.
- new_lo, new_hi = old_hi, measurement
- else:
- # Internal measurement, replaced boundary
- # depends on measured loss fraction.
- if measurement.loss_fraction > packet_loss_ratio:
- # We have found a narrow valid interval,
- # regardless of whether old upper bound was valid.
- new_hi = measurement
- else:
- # In ideal world, we would not want to shrink interval
- # if upper bound is not valid.
- # In the real world, we want to shrink it for
- # "invalid upper bound at maximal rate" case.
- new_lo = measurement
-
- return ReceiveRateInterval(
- old_lo if new_lo is None else new_lo,
- old_hi if new_hi is None else new_hi
- )
-
- def ndrpdr(self, state):
- """Perform trials for this phase. Return the new state when done.
-
- :param state: State before this phase.
- :type state: ProgressState
- :returns: The updated state.
- :rtype: ProgressState
- :raises RuntimeError: If total duration is larger than timeout.
- """
- start_time = time.time()
- if state.phases > 0:
- # We need to finish preceding intermediate phases first.
- saved_phases = state.phases
- state.phases -= 1
- # Preceding phases have shorter duration.
- saved_duration = state.duration
- duration_multiplier = state.duration / self.initial_trial_duration
- phase_exponent = float(state.phases) / saved_phases
- state.duration = self.initial_trial_duration * math.pow(
- duration_multiplier, phase_exponent
- )
- # Shorter durations do not need that narrow widths.
- saved_width = state.width_goal
- state.width_goal = self.double_relative_width(state.width_goal)
- # Recurse.
- state = self.ndrpdr(state)
- # Restore the state for current phase.
- state.duration = saved_duration
- state.width_goal = saved_width
- state.phases = saved_phases # Not needed, but just in case.
-
- logging.info(
- f"starting iterations with duration {state.duration} and relative "
- f"width goal {state.width_goal}"
- )
- while 1:
- if time.time() > start_time + self.timeout:
- raise RuntimeError(u"Optimized search takes too long.")
- # Order of priorities: invalid bounds (nl, pl, nh, ph),
- # then narrowing relative Tr widths.
- # Durations are not priorities yet,
- # they will settle on their own hopefully.
- ndr_lo = state.result.ndr_interval.measured_low
- ndr_hi = state.result.ndr_interval.measured_high
- pdr_lo = state.result.pdr_interval.measured_low
- pdr_hi = state.result.pdr_interval.measured_high
- ndr_rel_width = max(
- state.width_goal, state.result.ndr_interval.rel_tr_width
- )
- pdr_rel_width = max(
- state.width_goal, state.result.pdr_interval.rel_tr_width
- )
- # If we are hitting maximal or minimal rate, we cannot shift,
- # but we can re-measure.
- new_tr = self._ndrpdr_loss_fraction(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # If we are hitting maximum_transmit_rate,
- # it is still worth narrowing width,
- # hoping large enough loss fraction will happen.
- # But if we are hitting the minimal rate (at current duration),
- # no additional measurement will help with that,
- # so we can stop narrowing in this phase.
- if (ndr_lo.target_tr <= state.minimum_transmit_rate
- and ndr_lo.loss_fraction > 0.0):
- ndr_rel_width = 0.0
- if (pdr_lo.target_tr <= state.minimum_transmit_rate
- and pdr_lo.loss_fraction > state.packet_loss_ratio):
- pdr_rel_width = 0.0
-
- new_tr = self._ndrpdr_width_goal(
- state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # We do not need to improve width, but there still might be
- # some measurements with smaller duration.
- new_tr = self._ndrpdr_duration(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width
- )
-
- if new_tr is not None:
- state = self._measure_and_update_state(state, new_tr)
- continue
-
- # Widths are narrow (or lower bound minimal), bound measurements
- # are long enough, we can return.
- logging.info(u"phase done")
- break
- return state
-
- def _ndrpdr_loss_fraction(
- self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width):
- """Perform loss_fraction-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param ndr_hi: ndr interval measured high
- :param pdr_lo: pdr interval measured low
- :param pdr_hi: pdr interval measured high
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- """
- result = None
- if ndr_lo.loss_fraction > 0.0:
- if ndr_lo.target_tr > state.minimum_transmit_rate:
- result = max(
- state.minimum_transmit_rate, self.expand_down(
- ndr_rel_width, self.doublings, ndr_lo.target_tr
- )
- )
- logging.info(f"ndr lo external {result}")
- elif ndr_lo.duration < state.duration:
- result = state.minimum_transmit_rate
- logging.info(u"ndr lo minimal re-measure")
-
- if result is None and pdr_lo.loss_fraction > state.packet_loss_ratio:
- if pdr_lo.target_tr > state.minimum_transmit_rate:
- result = max(
- state.minimum_transmit_rate, self.expand_down(
- pdr_rel_width, self.doublings, pdr_lo.target_tr
- )
- )
- logging.info(f"pdr lo external {result}")
- elif pdr_lo.duration < state.duration:
- result = state.minimum_transmit_rate
- logging.info(u"pdr lo minimal re-measure")
-
- if result is None and ndr_hi.loss_fraction <= 0.0:
- if ndr_hi.target_tr < state.maximum_transmit_rate:
- result = min(
- state.maximum_transmit_rate, self.expand_up(
- ndr_rel_width, self.doublings, ndr_hi.target_tr
- )
- )
- logging.info(f"ndr hi external {result}")
- elif ndr_hi.duration < state.duration:
- result = state.maximum_transmit_rate
- logging.info(u"ndr hi maximal re-measure")
-
- if result is None and pdr_hi.loss_fraction <= state.packet_loss_ratio:
- if pdr_hi.target_tr < state.maximum_transmit_rate:
- result = min(
- state.maximum_transmit_rate, self.expand_up(
- pdr_rel_width, self.doublings, pdr_hi.target_tr
- )
- )
- logging.info(f"pdr hi external {result}")
- elif pdr_hi.duration < state.duration:
- result = state.maximum_transmit_rate
- logging.info(u"ndr hi maximal re-measure")
- return result
-
- def _ndrpdr_width_goal(
- self, state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width):
- """Perform width_goal-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param pdr_lo: pdr interval measured low
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- Return a new transmit rate if one should be applied.
- """
- if ndr_rel_width > state.width_goal:
- # We have to narrow NDR width first, as NDR internal search
- # can invalidate PDR (but not vice versa).
- result = self.half_step_up(ndr_rel_width, ndr_lo.target_tr)
- logging.info(f"Bisecting for NDR at {result}")
- elif pdr_rel_width > state.width_goal:
- # PDR internal search.
- result = self.half_step_up(pdr_rel_width, pdr_lo.target_tr)
- logging.info(f"Bisecting for PDR at {result}")
- else:
- result = None
- return result
-
- @staticmethod
- def _ndrpdr_duration(
- state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
- pdr_rel_width):
- """Perform duration-based trials within a ndrpdr phase
-
- :param state: current state
- :param ndr_lo: ndr interval measured low
- :param ndr_hi: ndr interval measured high
- :param pdr_lo: pdr interval measured low
- :param pdr_hi: pdr interval measured high
- :param ndr_rel_width: ndr interval relative width
- :param pdr_rel_width: pdr interval relative width
- :type state: ProgressState
- :type ndr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_lo: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type pdr_hi: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type ndr_rel_width: float
- :type pdr_rel_width: float
- :returns: a new transmit rate if one should be applied
- :rtype: float
- """
- # We need to re-measure with full duration, possibly
- # creating invalid bounds to resolve (thus broadening width).
- if ndr_lo.duration < state.duration:
- result = ndr_lo.target_tr
- logging.info(u"re-measuring NDR lower bound")
- elif pdr_lo.duration < state.duration:
- result = pdr_lo.target_tr
- logging.info(u"re-measuring PDR lower bound")
- # Except when lower bounds have high loss fraction, in that case
- # we do not need to re-measure _upper_ bounds.
- elif ndr_hi.duration < state.duration and ndr_rel_width > 0.0:
- result = ndr_hi.target_tr
- logging.info(u"re-measuring NDR upper bound")
- elif pdr_hi.duration < state.duration and pdr_rel_width > 0.0:
- result = pdr_hi.target_tr
- logging.info(u"re-measuring PDR upper bound")
- else:
- result = None
- return result
diff --git a/resources/libraries/python/MLRsearch/NdrPdrResult.py b/resources/libraries/python/MLRsearch/NdrPdrResult.py
deleted file mode 100644
index 3454ef1957..0000000000
--- a/resources/libraries/python/MLRsearch/NdrPdrResult.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining NdrPdrResult class."""
-
-from .ReceiveRateInterval import ReceiveRateInterval
-
-
-class NdrPdrResult:
- """Two measurement intervals, return value of search algorithms.
-
- Partial fraction is NOT part of the result. Pdr interval should be valid
- for all partial fractions implied by the interval."""
-
- def __init__(self, ndr_interval, pdr_interval):
- """Store the measured intervals after checking argument types.
-
- :param ndr_interval: Object containing data for NDR part of the result.
- :param pdr_interval: Object containing data for PDR part of the result.
- :type ndr_interval: ReceiveRateInterval.ReceiveRateInterval
- :type pdr_interval: ReceiveRateInterval.ReceiveRateInterval
- """
- # TODO: Type checking is not very pythonic,
- # perhaps users can fix wrong usage without it?
- if not isinstance(ndr_interval, ReceiveRateInterval):
- raise TypeError(
- f"ndr_interval, is not a ReceiveRateInterval: {ndr_interval!r}"
- )
- if not isinstance(pdr_interval, ReceiveRateInterval):
- raise TypeError(
- f"pdr_interval, is not a ReceiveRateInterval: {pdr_interval!r}"
- )
- self.ndr_interval = ndr_interval
- self.pdr_interval = pdr_interval
-
- def width_in_goals(self, relative_width_goal):
- """Return a debug string related to current widths in logarithmic scale.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Message containing NDR and PDR widths in goals.
- :rtype: str
- """
- return f"ndr {self.ndr_interval.width_in_goals(relative_width_goal)};" \
- f" pdr {self.pdr_interval.width_in_goals(relative_width_goal)}"
-
- def __str__(self):
- """Return string as tuple of named values."""
- return f"NDR={self.ndr_interval!s};PDR={self.pdr_interval!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"NdrPdrResult(ndr_interval={self.ndr_interval!r}," \
- f"pdr_interval={self.pdr_interval!r})"
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py b/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
deleted file mode 100644
index eff23e8bcc..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateInterval.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateInterval class."""
-
-import math
-
-from .ReceiveRateMeasurement import ReceiveRateMeasurement
-
-
-class ReceiveRateInterval:
- """Structure defining two Rr measurements, and their relation."""
-
- def __init__(self, measured_low, measured_high):
- """Store the bound measurements after checking argument types.
-
- :param measured_low: Measurement for the lower bound.
- :param measured_high: Measurement for the upper bound.
- :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
- :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
- """
- # TODO: Type checking is not very pythonic,
- # perhaps users can fix wrong usage without it?
- if not isinstance(measured_low, ReceiveRateMeasurement):
- raise TypeError(
- f"measured_low is not a ReceiveRateMeasurement: "
- f"{measured_low!r}"
- )
- if not isinstance(measured_high, ReceiveRateMeasurement):
- raise TypeError(
- f"measured_high is not a ReceiveRateMeasurement: "
- f"{measured_high!r}"
- )
- self.measured_low = measured_low
- self.measured_high = measured_high
- # Declare secondary quantities to appease pylint.
- self.abs_tr_width = None
- """Absolute width of target transmit rate. Upper minus lower."""
- self.rel_tr_width = None
- """Relative width of target transmit rate. Absolute divided by upper."""
- self.sort()
-
- def sort(self):
- """Sort bounds by target Tr, compute secondary quantities."""
- if self.measured_low.target_tr > self.measured_high.target_tr:
- self.measured_low, self.measured_high = (
- self.measured_high, self.measured_low
- )
- self.abs_tr_width = (
- self.measured_high.target_tr - self.measured_low.target_tr
- )
- self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
-
- def width_in_goals(self, relative_width_goal):
- """Return float value.
-
- Relative width goal is some (negative) value on logarithmic scale.
- Current relative width is another logarithmic value.
- Return the latter divided by the former.
- This is useful when investigating how did surprising widths come to be.
-
- :param relative_width_goal: Upper bound times this is the goal
- difference between upper bound and lower bound.
- :type relative_width_goal: float
- :returns: Current width as logarithmic multiple of goal width [1].
- :rtype: float
- """
- return math.log(1.0 - self.rel_tr_width) / math.log(
- 1.0 - relative_width_goal)
-
- def __str__(self):
- """Return string as half-open interval."""
- return f"[{self.measured_low!s};{self.measured_high!s})"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \
- f"measured_high={self.measured_high!r})"
diff --git a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py b/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
deleted file mode 100644
index 31a6f8202e..0000000000
--- a/resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module defining ReceiveRateMeasurement class."""
-
-
-class ReceiveRateMeasurement:
- """Structure defining the result of single Rr measurement."""
-
- def __init__(self, duration, target_tr, transmit_count, loss_count):
- """Constructor, normalize primary and compute secondary quantities.
-
- :param duration: Measurement duration [s].
- :param target_tr: Target transmit rate [pps].
- If bidirectional traffic is measured, this is bidirectional rate.
- :param transmit_count: Number of packets transmitted [1].
- :param loss_count: Number of packets transmitted but not received [1].
- :type duration: float
- :type target_tr: float
- :type transmit_count: int
- :type loss_count: int
- """
- self.duration = float(duration)
- self.target_tr = float(target_tr)
- self.transmit_count = int(transmit_count)
- self.loss_count = int(loss_count)
- self.receive_count = transmit_count - loss_count
- self.transmit_rate = transmit_count / self.duration
- self.loss_rate = loss_count / self.duration
- self.receive_rate = self.receive_count / self.duration
- self.loss_fraction = float(self.loss_count) / self.transmit_count
- # TODO: Do we want to store also the real time (duration + overhead)?
-
- def __str__(self):
- """Return string reporting input and loss fraction."""
- return f"d={self.duration!s},Tr={self.target_tr!s}," \
- f"Df={self.loss_fraction!s}"
-
- def __repr__(self):
- """Return string evaluable as a constructor call."""
- return f"ReceiveRateMeasurement(duration={self.duration!r}," \
- f"target_tr={self.target_tr!r}," \
- f"transmit_count={self.transmit_count!r}," \
- f"loss_count={self.loss_count!r})"
diff --git a/resources/libraries/python/MLRsearch/__init__.py b/resources/libraries/python/MLRsearch/__init__.py
index 70c713eaa0..09ce7e6719 100644
--- a/resources/libraries/python/MLRsearch/__init__.py
+++ b/resources/libraries/python/MLRsearch/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,3 +14,17 @@
"""
__init__ file for Python package "MLRsearch".
"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+# Import user-facing (API) stuff, so users do not need to know submodules.
+from .config import Config
+from .goal_result import GoalResult
+from .multiple_loss_ratio_search import MultipleLossRatioSearch
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .trial_measurement import AbstractMeasurer, MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/candidate.py b/resources/libraries/python/MLRsearch/candidate.py
new file mode 100644
index 0000000000..16bbe60bae
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/candidate.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Candidate class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from functools import total_ordering
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .selector import Selector
+
+
+@total_ordering
+@dataclass(frozen=True)
+class Candidate:
+ """Class describing next trial inputs, as nominated by a selector.
+
+ As each selector is notified by the controller when its nominated load
+ becomes the winner, a reference to the selector is also included here.
+
+ The rest of the code focuses on defining the ordering between candidates.
+ When two instances are compared, the lesser has higher priority
+ for choosing which trial is actually performed next.
+
+ As Python implicitly converts values to bool in many places
+ (e.g. in "if" statement), any instance is called "truthy" if it converts
+ to True, and "falsy" if it converts to False.
+ To make such places nice and readable, __bool__ method is implemented
+ in a way that a candidate instance is falsy if its load is None.
+ As a falsy candidate never gets measured,
+ other fields of a falsy instance are irrelevant.
+ """
+
+ load: Optional[DiscreteLoad] = None
+ """Measure at this intended load. None if no load nominated by selector."""
+ duration: float = None
+ """Trial duration as chosen by the selector."""
+ width: Optional[DiscreteWidth] = None
+ """Set the global width to this when this candidate becomes the winner."""
+ selector: Selector = None
+ """Reference to the selector instance which nominated this candidate."""
+
+ def __str__(self) -> str:
+ """Convert trial inputs into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"d={self.duration},l={self.load}"
+
+ def __eq__(self, other: Candidate) -> bool:
+ """Return wheter self is identical to the other candidate.
+
+ This is just a pretense for total ordering wrapper to work.
+ In reality, MLRsearch shall never test equivalence,
+ so we save space by just raising RuntimeError if this is ever called.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if the instances are equivalent.
+ :rtype: bool
+ :raises RuntimeError: Always, to prevent unintended usage.
+ """
+ raise RuntimeError("Candidate equality comparison shall not be needed.")
+
+ def __lt__(self, other: Candidate) -> bool:
+ """Return whether self should be measured before other.
+
+ In the decreasing order of importance:
+ Non-None load is preferred.
+ Self is less than other when both loads are None.
+ Lower offered load is preferred.
+ Longer trial duration is preferred.
+ Non-none width is preferred.
+ Larger width is preferred.
+ Self is preferred.
+
+ The logic comes from the desire to save time and being conservative.
+
+ :param other: The other instance to compare to.
+ :type other: Candidate
+ :returns: True if self should be measured sooner.
+ :rtype: bool
+ """
+ if not self.load:
+ if other.load:
+ return False
+ return True
+ if not other.load:
+ return True
+ if self.load < other.load:
+ return True
+ if self.load > other.load:
+ return False
+ if self.duration > other.duration:
+ return True
+ if self.duration < other.duration:
+ return False
+ if not self.width:
+ if other.width:
+ return False
+ return True
+ if not other.width:
+ return True
+ return self.width >= other.width
+
+ def __bool__(self) -> bool:
+ """Does this candidate choose to perform any trial measurement?
+
+ :returns: True if yes, it does choose to perform.
+ :rtype: bool
+ """
+ return bool(self.load)
+
+ @staticmethod
+ def nomination_from(selector: Selector) -> Candidate:
+ """Call nominate on selector, wrap into Candidate instance to return.
+
+ We avoid dependency cycle while letting candidate depend on selector,
+ therefore selector cannot know how to wrap its nomination
+ into a full candidate instance.
+ This factory method finishes the wrapping.
+
+ :param selector: Selector to call.
+ :type selector: Selector
+ :returns: Newly created Candidate instance with nominated trial inputs.
+ :rtype: Candidate
+ """
+ load, duration, width = selector.nominate()
+ return Candidate(
+ load=load,
+ duration=duration,
+ width=width,
+ selector=selector,
+ )
+
+ def won(self) -> None:
+ """Inform selector its candidate became a winner."""
+ self.selector.won(self.load)
diff --git a/resources/libraries/python/MLRsearch/config.py b/resources/libraries/python/MLRsearch/config.py
new file mode 100644
index 0000000000..7aa8ed75a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/config.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Config class."""
+
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Optional
+
+from .dataclass import DataclassProperty
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+
+
+@dataclass
+class Config:
+ """Structure containing several static config items.
+
+ The main MLRsearch algorithm uses multiple customizable values.
+ Pylint complains if the values appear as long argument lists
+ or multiple local variables.
+
+ This class offers a storage for values which do not contain
+ internally mutable state and are set at an unknown time
+ before the search starts. This way users can override only some values,
+ and do it over multiple calls.
+ All "official" user inputs are contained here.
+
+ Properties are defined to enforce the requirements on allowed values.
+ All fields have default values, so instances can be created without any.
+ It is still recommended to set all values after instantiation,
+ as the defaults may change in the next version.
+
+ As some relations between values of different fields are required,
+ users must take care to set them in the correct order.
+
+ For example, min_load has to be set to a value smaller
+ than the current value of max_load.
+ """
+
+ # Externally visible "fields" (but in fact redefined as properties).
+ goals: SearchGoalTuple = SearchGoalTuple((SearchGoal(),))
+ """Container holding search goals."""
+ min_load: float = 9001.0
+ """Each trial measurement must have intended load at least this [tps]."""
+ max_load: float = 1e9
+ """Each trial measurement must have intended load at most this [tps]."""
+ search_duration_max: float = 1200.0
+ """The search will end as a failure this long [s] after it is started."""
+ warmup_duration: float = 1.0
+ """If specified, one trial at max load and this duration is performed
+ before the usual search starts. None converts to zero and means no warmup.
+ The results of that one trial are ignored."""
+
+ @DataclassProperty
+ def goals(self) -> SearchGoalTuple:
+ """Return the reference to the current container of goals.
+
+ :returns: The current container instance.
+ :rtype: SearchGoalTuple
+ """
+ return self._goals
+
+ @goals.setter
+ def goals(self, goals: Iterable[SearchGoal]) -> None:
+ """Create and store the goal container.
+
+ :param goals: Search goals to add to the container to store.
+ :type goals: Iterable[SearchGoal]
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If any of the goals is not a SearchGoal.
+ """
+ self._goals = SearchGoalTuple(goals)
+
+ @DataclassProperty
+ def min_load(self) -> float:
+ """Getter for min load, no logic here.
+
+ :returns: Currently set minimal intended load [tps].
+ :rtype: float
+ """
+ return self._min_load
+
+ @min_load.setter
+ def min_load(self, load: float) -> None:
+ """Set min load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= 0.0:
+ raise ValueError(f"Min load {load} must be positive.")
+ # At the time init is first called, _max_load is not set yet.
+ if hasattr(self, "_max_load") and load >= self.max_load:
+ raise ValueError(f"Min load {load} must be smaller.")
+ self._min_load = load
+
+ @DataclassProperty
+ def max_load(self) -> float:
+ """Getter for max load, no logic here.
+
+ :returns: Currently set maximal intended load [tps].
+ :rtype: float
+ """
+ return self._max_load
+
+ @max_load.setter
+ def max_load(self, load: float) -> None:
+ """Set max load after converting type and checking value.
+
+ :param load: Minimal intended load [tps] to set.
+ :type load: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ load = float(load)
+ if load <= self.min_load:
+ raise ValueError(f"Max load {load} must be bigger.")
+ self._max_load = load
+
+ @DataclassProperty
+ def search_duration_max(self) -> float:
+ """Getter for max search duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._search_duration_max
+
+ @search_duration_max.setter
+ def search_duration_max(self, duration: float) -> None:
+ """Set max search duration after converting and checking value.
+
+ :param duration: Search duration maximum [s] to set.
+ :type duration: float
+ :raises ValueError: If the argument is found invalid.
+ """
+ duration = float(duration)
+ if duration <= 0.0:
+ raise ValueError(f"Search duration max too small: {duration}")
+ self._search_duration_max = duration
+
+ @DataclassProperty
+ def warmup_duration(self) -> float:
+ """Getter for warmup duration, no logic here.
+
+ :returns: Currently set max search duration [s].
+ :rtype: float
+ """
+ return self._warmup_duration
+
+ @warmup_duration.setter
+ def warmup_duration(self, duration: Optional[float]) -> None:
+ """Set warmup duration after converting and checking value.
+
+ Zero duration is treated as None, meaning no warmup trial.
+
+ :param duration: Warmup duration [s] to set.
+ :type duration: Optional(float)
+ :raises ValueError: If the argument is found invalid.
+ """
+ if duration:
+ duration = float(duration)
+ if duration < 0.0:
+ raise ValueError(f"Warmup duration too small: {duration}")
+ else:
+ duration = 0.0
+ self._warmup_duration = duration
diff --git a/resources/libraries/python/MLRsearch/dataclass/__init__.py b/resources/libraries/python/MLRsearch/dataclass/__init__.py
new file mode 100644
index 0000000000..e546b090c9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "dataclass_property".
+"""
+
+from .dc_property import DataclassProperty
+from .field import secondary_field
diff --git a/resources/libraries/python/MLRsearch/dataclass/dc_property.py b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
new file mode 100644
index 0000000000..7f3b49aeb8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/dc_property.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DataclassProperty class.
+
+The main issue that needs support is dataclasses with properties
+(including setters) and with (immutable) default values.
+
+First, this explains how property ends up passed as default constructor value:
+https://florimond.dev/en/posts/2018/10/
+/reconciling-dataclasses-and-properties-in-python/
+TL;DR: By the time __init__ is generated, original class variable (type hint)
+is replaced by property (method definition).
+
+Second, there are ways to deal with that:
+https://stackoverflow.com/a/61480946
+TL;DR: It relies on the underscored field being replaced by the value.
+
+But that does not work for field which use default_factory (or no default)
+(the underscored class field is deleted instead).
+So another way is needed to cover those cases,
+ideally without the need to define both original and underscored field.
+
+This implementation relies on a fact that decorators are executed
+when the class fields do yet exist, and decorated function
+does know its name, so the decorator can get the value stored in
+the class field, and store it as an additional attribute of the getter function.
+Then for setter, the property contains the getter (as an unbound function),
+so it can access the additional attribute to get the value.
+
+This approach circumvents the precautions dataclasses take to prevent mishaps
+when a single mutable object is shared between multiple instances.
+So it is up to setters to create an appropriate copy of the default object
+if the default value is mutable.
+
+The default value cannot be MISSING nor Field nor DataclassProperty,
+otherwise the intended logic breaks.
+"""
+
+from __future__ import annotations
+
+from dataclasses import Field, MISSING
+from functools import wraps
+from inspect import stack
+from typing import Callable, Optional, TypeVar, Union
+
+
+Self = TypeVar("Self")
+"""Type for the dataclass instances being created using properties."""
+Value = TypeVar("Value")
+"""Type for the value the property (getter, setter) handles."""
+
+
+def _calling_scope_variable(name: str) -> Value:
+ """Get a variable from a higher scope.
+
+ This feels dirty, but without this the syntactic sugar
+ would not be sweet enough.
+
+ The implementation is copied from https://stackoverflow.com/a/14694234
+ with the difference of raising RuntimeError (instead of returning None)
+ if no variable of that name is found in any of the scopes.
+
+ :param name: Name of the variable to access.
+ :type name: str
+ :returns: The value of the found variable.
+ :rtype: Value
+ :raises RuntimeError: If the variable is not found in any calling scope.
+ """
+ frame = stack()[1][0]
+ while name not in frame.f_locals:
+ frame = frame.f_back
+ if frame is None:
+ raise RuntimeError(f"Field {name} value not found.")
+ return frame.f_locals[name]
+
+
+class DataclassProperty(property):
+ """Subclass of property, handles default values for dataclass fields.
+
+ If a dataclass field does not specify a default value (nor default_factory),
+ this is not needed, and in fact it will not work (so use built-in property).
+
+ This implementation seemlessly finds and inserts the default value
+ (can be mutable) into a new attribute of the getter function.
+ Before calling a setter function in init (recognized by type),
+ the default value is retrieved and passed transparently to the setter.
+ It is the responsibilty of the setter to appropriately clone the value,
+ in order to prevent multiple instances sharing the same mutable value.
+ """
+
+ def __init__(
+ self,
+ fget: Optional[Callable[[], Value]] = None,
+ fset: Optional[Callable[[Self, Value], None]] = None,
+ fdel: Optional[Callable[[], None]] = None,
+ doc: Optional[str] = None,
+ ):
+ """Find and store the default value, construct the property.
+
+ See this for how the superclass property works:
+ https://docs.python.org/3/howto/descriptor.html#properties
+
+ :param fget: Getter (unbound) function to use, if any.
+ :param fset: Setter (unbound) function to use, if any.
+ :param fdel: Deleter (unbound) function to use, if any.
+ :param doc: Docstring to display when examining the property.
+ :type fget: Optional[Callable[[Self], Value]]
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :type fdel: Optional[Callable[[Self], None]]
+ :type doc: Optional[str]
+ """
+ variable_found = _calling_scope_variable(fget.__name__)
+ if not isinstance(variable_found, DataclassProperty):
+ if isinstance(variable_found, Field):
+ if variable_found.default is not MISSING:
+ fget.default_value = variable_found.default
+ # Else do not store any default value.
+ else:
+ fget.default_value = variable_found
+ # Else this is the second time init is called (when setting setter),
+ # in which case the default is already stored into fget.
+ super().__init__(fget=fget, fset=fset, fdel=fdel, doc=doc)
+
+ def setter(
+ self,
+ fset: Optional[Callable[[Self, Value], None]],
+ ) -> DataclassProperty:
+ """Return new instance with a wrapped setter function set.
+
+ If the argument is None, call superclass method.
+
+ The wrapped function recognizes when it is called in init
+ (by the fact the value argument is of type DataclassProperty)
+ and in that case it extracts the stored default and passes that
+ to the user-defined setter function.
+
+ :param fset: Setter function to wrap and apply.
+ :type fset: Optional[Callable[[Self, Value], None]]
+ :returns: New property instance with correct setter function set.
+ :rtype: DataclassProperty
+ """
+ if fset is None:
+ return super().setter(fset)
+
+ @wraps(fset)
+ def wrapped(sel_: Self, val: Union[Value, DataclassProperty]) -> None:
+ """Extract default from getter if needed, call the user setter.
+
+ The sel_ parameter is listed explicitly, to signify
+ this is an unbound function, not a bounded method yet.
+
+ :param sel_: Instance of dataclass (not of DataclassProperty)
+ to set the value on.
+ :param val: Set this value, or the default value stored there.
+ :type sel_: Self
+ :type val: Union[Value, DataclassProperty]
+ """
+ if isinstance(val, DataclassProperty):
+ val = val.fget.default_value
+ fset(sel_, val)
+
+ return super().setter(wrapped)
diff --git a/resources/libraries/python/MLRsearch/dataclass/field.py b/resources/libraries/python/MLRsearch/dataclass/field.py
new file mode 100644
index 0000000000..55d9d0879f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/dataclass/field.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining secondary_field function.
+
+Just a shrothand for frequently repeated expression.
+
+The main point is that this dataclass field is not used in init.
+Maybe it is a derived value of a frozen dataclass.
+Maybe it is a cache to help avoiding repeated computation.
+Maybe it is a temporary value stored in one method and read in another method.
+In any case, the caller does not need to know it is here,
+so it is excluded from repr, hashing, ordering and similar.
+"""
+
+from dataclasses import Field, field
+
+
+def secondary_field() -> Field:
+ """Return newly created Field with non-default arguments
+
+ In practice, it seems to be fine to reuse the resulting Field instance
+ when defining multiple dataclass fields,
+ but we keep this as a function to improve readability.
+
+ :returns: A new Field instance useful for secondary fields.
+ :rtype: Field
+ """
+ return field(
+ default=None,
+ init=False,
+ repr=False,
+ compare=False,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_interval.py b/resources/libraries/python/MLRsearch/discrete_interval.py
new file mode 100644
index 0000000000..0a3bf443a8
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_interval.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteInterval class."""
+
+from dataclasses import dataclass
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+# TODO: Can this be frozen?
+@dataclass
+class DiscreteInterval:
+ """Interval class with more computations available.
+
+ Along discrete form of width,
+ a MLR specific way for halving the interval is also included.
+
+ The two primary field values do not have to be valid relevant bounds,
+ but at the end of the search, they usually are.
+
+ The load values must be round.
+ """
+
+ lower_bound: DiscreteLoad
+ """Value for the lower intended load (or load stats or similar)."""
+ upper_bound: DiscreteLoad
+ """Value for the higher intended load (or load stats or similar)."""
+ # Primary fields above, derived below.
+ discrete_width: DiscreteWidth = secondary_field()
+ """Discrete width between intended loads (upper_bound minus lower_bound)."""
+
+ def __post_init__(self) -> None:
+ """Sort bounds by intended load, compute secondary quantities.
+
+ :raises RuntimeError: If a result used non-rounded load.
+ """
+ if not self.lower_bound.is_round:
+ raise RuntimeError(f"Non-round lower bound: {self.lower_bound!r}")
+ if not self.upper_bound.is_round:
+ raise RuntimeError(f"Non-round upper bound: {self.upper_bound!r}")
+ if self.lower_bound > self.upper_bound:
+ tmp = self.lower_bound
+ self.lower_bound = self.upper_bound
+ self.upper_bound = tmp
+ self.discrete_width = self.upper_bound - self.lower_bound
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lower_bound=({self.lower_bound}),upper_bound=({self.upper_bound})"
+ )
+
+ # TODO: Use "target" instad of "goal" in argument and variable names.
+
+ def width_in_goals(self, goal: DiscreteWidth) -> float:
+ """Return relative width as a multiple of the given goal (int form).
+
+ Integer forms are used for computation, safe as loads are rounded.
+ The result is a float, as self int may not be divisible by goal int.
+
+ :param goal: A relative width amount to be used as a unit.
+ :type goal: DiscreteWidth
+ :returns: Self width in multiples of (integer form of) goal width.
+ :rtype: float
+ """
+ return int(self.discrete_width) / int(goal)
+
+ def middle(self, goal: DiscreteWidth) -> DiscreteLoad:
+ """Return new intended load (discrete form) in the middle.
+
+ All calculations are based on int forms.
+
+ One of the halfs is rounded to a power-of-two multiple of the goal.
+ The power that leads to most even split is used.
+ Lower width is the smaller one (if not exactly even).
+
+ This approach prefers lower loads (to remain conservative) and can save
+ some measurements (when all middle measurements have high loss).
+ Note that when competing with external search from above,
+ that search is already likely to produce widths that are
+ power-of-two multiples of the target width.
+
+ If the interval width is one goal (or less), RuntimeError is raised.
+ If the interval width is between one and two goals (not including),
+ a more even split is attempted (using half the goal value).
+
+ :param goal: Target width goal to use for uneven halving.
+ :type goal: DiscreteWidth
+ :returns: New load to use for bisecting.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ int_self, int_goal = int(self.discrete_width), int(goal)
+ if int_self <= int_goal:
+ raise RuntimeError(f"Do not halve small enough interval: {self!r}")
+ if int_self == 2 * int_goal:
+ # Even split, return here simplifies the while loop below.
+ return self.lower_bound + goal
+ if int_self < 2 * int_goal:
+ # This can only happen when int_goal >= 2.
+ # In this case, we do not have good enough split at this width goal,
+ # but maybe this is not the final target, so we can attempt
+ # a split at half width goal.
+ if not int_goal % 2:
+ return self.middle(goal=goal.half_rounded_down())
+ # Odd int_goal, so this must by the last phase. Do even split.
+ lo_width = self.discrete_width.half_rounded_down()
+ return self.lower_bound + lo_width
+ hi_width = goal
+ lo_width = self.discrete_width - hi_width
+ # We know lo_width > hi_width because we did not do the even split.
+ while 1:
+ hi2_width = hi_width * 2
+ lo2_width = self.discrete_width - hi2_width
+ if lo2_width <= hi2_width:
+ break
+ hi_width, lo_width = hi2_width, lo2_width
+ # Which of the two options is more even? Product decides.
+ if int(hi_width) * int(lo_width) > int(hi2_width) * int(lo2_width):
+ # Previous attempt was more even, but hi_width was the smaller one.
+ lo2_width = hi_width
+ # Else lo2_width is more even and no larger than hi2_width.
+ return self.lower_bound + lo2_width
diff --git a/resources/libraries/python/MLRsearch/discrete_load.py b/resources/libraries/python/MLRsearch/discrete_load.py
new file mode 100644
index 0000000000..a75b4acf96
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_load.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteLoad class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from functools import total_ordering
+from typing import Callable, Optional, Union
+
+from .load_rounding import LoadRounding
+from .discrete_width import DiscreteWidth
+
+
+@total_ordering
+@dataclass
+class DiscreteLoad:
+ """Structure to store load value together with its rounded integer form.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+ Conversion methods and factories are added for convenience.
+
+ In general, the float form is allowed to differ from conversion from int.
+
+ Comparisons are supported, acting on the float load component.
+ Additive operations are supported, acting on int form.
+ Multiplication by a float constant is supported, acting on float form.
+
+ As for all user defined classes by default, all instances are truthy.
+ That is useful when dealing with Optional values, as None is falsy.
+
+ This dataclass is effectively frozen, but cannot be marked as such
+ as that would prevent LoadStats from being its subclass.
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_load: float = None
+ """Float form of intended load [tps], usable for measurer."""
+ int_load: int = field(compare=False, default=None)
+ """Integer form, usable for exact computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float load to differ from
+ conversion from int load. MLRsearch should round explicitly later,
+ based on its additional information.
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_load is None and self.int_load is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_load is None:
+ self.int_load = int(self.int_load)
+ self.float_load = self.rounding.int2float(self.int_load)
+ else:
+ self.float_load = float(self.float_load)
+ self.int_load = self.rounding.float2int(self.float_load)
+
+ def __str__(self) -> str:
+ """Convert to a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_load={int(self)}"
+
+ # Explicit comparison operators.
+ # Those generated with dataclass order=True do not allow subclass instances.
+
+ def __eq__(self, other: Optional[DiscreteLoad]) -> bool:
+ """Return whether the other instance has the same float form.
+
+ None is effectively considered to be an unequal instance.
+
+ :param other: Other instance to compare to, or None.
+ :type other: Optional[DiscreteLoad]
+ :returns: True only if float forms are exactly equal.
+ :rtype: bool
+ """
+ if other is None:
+ return False
+ return float(self) == float(other)
+
+ def __lt__(self, other: DiscreteLoad) -> bool:
+ """Return whether self has smaller float form than the other instance.
+
+ None is not supported, as MLRsearch does not need that
+ (so when None appears we want to raise).
+
+ :param other: Other instance to compare to.
+ :type other: DiscreteLoad
+ :returns: True only if float forms of self is strictly smaller.
+ :rtype: bool
+ """
+ return float(self) < float(other)
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ @property
+ def is_round(self) -> bool:
+ """Return whether float load matches converted int load.
+
+ :returns: False if float load is not rounded.
+ :rtype: bool
+ """
+ expected = self.rounding.int2float(self.int_load)
+ return expected == self.float_load
+
+ def __int__(self) -> int:
+ """Return the int value.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_load
+
+ def __float__(self) -> float:
+ """Return the float value.
+
+ :returns: The float field value [tps].
+ :rtype: float
+ """
+ return self.float_load
+
+ @staticmethod
+ def int_conver(rounding: LoadRounding) -> Callable[[int], DiscreteLoad]:
+ """Return a factory that turns an int load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from int.
+ :rtype: Callable[[int], DiscreteLoad]
+ """
+
+ def factory_int(int_load: int) -> DiscreteLoad:
+ """Use rounding and int load to create discrete load.
+
+ :param int_load: Intended load in integer form.
+ :type int_load: int
+ :returns: New discrete load instance matching the int load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, int_load=int_load)
+
+ return factory_int
+
+ @staticmethod
+ def float_conver(rounding: LoadRounding) -> Callable[[float], DiscreteLoad]:
+ """Return a factory that turns a float load into a discrete load.
+
+ :param rounding: Rounding instance needed.
+ :type rounding: LoadRounding
+ :returns: Factory to use when converting from float.
+ :rtype: Callable[[float], DiscreteLoad]
+ """
+
+ def factory_float(float_load: float) -> DiscreteLoad:
+ """Use rounding instance and float load to create discrete load.
+
+ The float form is not rounded yet.
+
+ :param int_load: Intended load in float form [tps].
+ :type int_load: float
+ :returns: New discrete load instance matching the float load.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=rounding, float_load=float_load)
+
+ return factory_float
+
+ def rounded_down(self) -> DiscreteLoad:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, int_load=int(self))
+
+ def hashable(self) -> DiscreteLoad:
+ """Return new equivalent instance.
+
+ This is mainly useful for conversion from unhashable subclasses,
+ such as LoadStats.
+ Rounding instance (reference) is copied from self.
+
+ :returns: New instance with values based on float form of self.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(rounding=self.rounding, float_load=float(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance with width added to int load.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding two loads by mistake
+ (or adding int to load and similar).
+
+ :param width: Value to add to int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load + int(width),
+ )
+
+ def __sub__(
+ self, other: Union[DiscreteWidth, DiscreteLoad]
+ ) -> Union[DiscreteLoad, DiscreteWidth]:
+ """Return result based on the argument type.
+
+ Load minus load is width, load minus width is load.
+ This allows the same operator to support both operations.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: Union[DiscreteWidth, DiscreteLoad]
+ :returns: Resulting width or load.
+ :rtype: Union[DiscreteLoad, DiscreteWidth]
+ :raises RuntimeError: If the argument type is not supported.
+ """
+ if isinstance(other, DiscreteWidth):
+ return self._minus_width(other)
+ if isinstance(other, DiscreteLoad):
+ return self._minus_load(other)
+ raise RuntimeError(f"Unsupported type {other!r}")
+
+ def _minus_width(self, width: DiscreteWidth) -> DiscreteLoad:
+ """Return newly constructed instance, width subtracted from int load.
+
+ Rounding instance (reference) is copied from self.
+
+ :param width: Value to subtract from int load.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ """
+ return DiscreteLoad(
+ rounding=self.rounding,
+ int_load=self.int_load - int(width),
+ )
+
+ def _minus_load(self, other: DiscreteLoad) -> DiscreteWidth:
+ """Return newly constructed width instance, difference of int loads.
+
+ Rounding instance (reference) is copied from self.
+
+ :param other: Value to subtract from int load.
+ :type other: DiscreteLoad
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_load - int(other),
+ )
+
+ def __mul__(self, coefficient: float) -> DiscreteLoad:
+ """Return newly constructed instance, float load multiplied by argument.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Value to multiply float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ if not isinstance(coefficient, float):
+ raise RuntimeError(f"Not float: {coefficient!r}")
+ if coefficient <= 0.0:
+ raise RuntimeError(f"Not positive: {coefficient!r}")
+ return DiscreteLoad(
+ rounding=self.rounding,
+ float_load=self.float_load * coefficient,
+ )
+
+ def __truediv__(self, coefficient: float) -> DiscreteLoad:
+ """Call multiplication with inverse argument.
+
+ :param coefficient: Value to divide float load with.
+ :type coefficient: float
+ :returns: New instance.
+ :rtype: DiscreteLoad
+ :raises RuntimeError: If argument is unsupported.
+ """
+ return self * (1.0 / coefficient)
diff --git a/resources/libraries/python/MLRsearch/discrete_result.py b/resources/libraries/python/MLRsearch/discrete_result.py
new file mode 100644
index 0000000000..882b6081c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_result.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .trial_measurement import MeasurementResult
+
+
+@dataclass
+class DiscreteResult(MeasurementResult):
+ """A measurement result where intended load is also given as discrete load.
+
+ The discrete load has to be round and has to match the intended load.
+ """
+
+ # Must have default as superclass has fields with default values.
+ discrete_load: DiscreteLoad = None
+ """Intended load [tps]; discrete, round and equal to intended load."""
+
+ def __post_init__(self) -> None:
+ """Call super, verify intended and discrete loads are the same.
+
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ super().__post_init__()
+ if not isinstance(self.discrete_load, DiscreteLoad):
+ raise TypeError(f"Not a discrete load: {self.discrete_load!r}")
+ if not self.discrete_load.is_round:
+ raise ValueError(f"Discrete load not round: {self.discrete_load!r}")
+ if float(self.discrete_load) != self.intended_load:
+ raise ValueError(f"Load mismatch: {self!r}")
+
+ @staticmethod
+ def with_load(
+ result: MeasurementResult, load: DiscreteLoad
+ ) -> DiscreteResult:
+ """Return result with added load.
+
+ :param result: A result, possibly without discrete load.
+ :param load: Discrete load to add.
+ :type result: MeasurementResult
+ :type load: DiscreteLoad
+ :returns: Equivalent result with matching discrete load.
+ :rtype: DiscreteResult
+ :raises TypeError: If discrete load is not DiscreteLoad.
+ :raises ValueError: If the discrete load is not round.
+ :raises ValueError: If the load does not match intended load.
+ """
+ return DiscreteResult(
+ intended_duration=result.intended_duration,
+ intended_load=result.intended_load,
+ offered_count=result.offered_count,
+ loss_count=result.loss_count,
+ forwarding_count=result.forwarding_count,
+ offered_duration=result.offered_duration,
+ duration_with_overheads=result.duration_with_overheads,
+ intended_count=result.intended_count,
+ discrete_load=load,
+ )
diff --git a/resources/libraries/python/MLRsearch/discrete_width.py b/resources/libraries/python/MLRsearch/discrete_width.py
new file mode 100644
index 0000000000..8a4845a83f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/discrete_width.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining DiscreteWidth class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+
+from .load_rounding import LoadRounding
+
+
+# TODO: Make properly frozen.
+@dataclass(order=True)
+class DiscreteWidth:
+ """Structure to store float width together with its rounded integer form.
+
+ The width does not have to be positive, i.e. the computed integer width
+ does not have to be larger than zero.
+
+ LoadRounding instance is needed to enable conversion between two forms.
+
+ Conversion and arithmetic methods are added for convenience.
+ Division and non-integer multiplication are intentionally not supported,
+ as MLRsearch should not seek unround widths when round ones are available.
+
+ The instance is effectively immutable, but not hashable as it refers
+ to the rounding instance, which is implemented as mutable
+ (although the mutations are not visible).
+ """
+
+ # For most debugs, rounding in repr just takes space.
+ rounding: LoadRounding = field(repr=False, compare=False)
+ """Rounding instance to use for conversion."""
+ float_width: float = None
+ """Relative width of float intended load.
+ This is treated as a constructor argument, and does not need to match
+ the int width. Int width is computed to be no wider than this."""
+ int_width: int = field(compare=False, default=None)
+ """Integer form, difference of integer loads.
+ This is the primary quantity used by most computations."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, compute missing information.
+
+ At this point, it is allowed for float width to be slightly larger
+ than the implied int width.
+
+ If both forms are specified, the float form is taken as primary
+ (thus the integer form is recomputed to match).
+
+ :raises RuntimeError: If both init arguments are None.
+ """
+ if self.float_width is None and self.int_width is None:
+ raise RuntimeError("Float or int value is needed.")
+ if self.float_width is None:
+ self.int_width = int(self.int_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = self.rounding.int2float(self.int_width)
+ self.float_width = (increased_load - min_load) / increased_load
+ return
+ self.float_width = float(self.float_width)
+ min_load = self.rounding.int2float(0)
+ increased_load = min_load / (1.0 - self.float_width)
+ int_load = self.rounding.float2int(increased_load)
+ verify_load = self.rounding.int2float(int_load)
+ if verify_load > increased_load:
+ int_load -= 1
+ self.int_width = int_load
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return f"int_width={int(self)}"
+
+ def __int__(self) -> int:
+ """Return the integer form.
+
+ :returns: The int field value.
+ :rtype: int
+ """
+ return self.int_width
+
+ def __float__(self) -> float:
+ """Return the float form.
+
+ :returns: The float field value.
+ :rtype: float
+ """
+ return self.float_width
+
+ def __hash__(self) -> int:
+ """Return a hash based on the float value.
+
+ With this, the instance can be used as if it was immutable and hashable,
+ e.g. it can be a key in a dict.
+
+ :returns: Hash value for this instance.
+ :rtype: int
+ """
+ return hash(float(self))
+
+ def rounded_down(self) -> DiscreteWidth:
+ """Create and return new instance with float form matching int.
+
+ :returns: New instance with same int form and float form rounded down.
+ :rtype: DiscreteWidth
+ """
+ return DiscreteWidth(rounding=self.rounding, int_width=int(self))
+
+ def __add__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths added.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+
+ :param width: Value to add to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {width!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width + int(width),
+ )
+
+ def __sub__(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return newly constructed instance with int widths subtracted.
+
+ Rounding instance (reference) is copied from self.
+
+ Argument type is checked, to avoid caller adding something unsupported.
+ Non-positive results are disallowed by constructor.
+
+ :param width: Value to subtract to int width.
+ :type width: DiscreteWidth
+ :returns: New instance.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: When argument has unexpected type.
+ """
+ if not isinstance(width, DiscreteWidth):
+ raise RuntimeError(f"Not width: {type(width)}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width - int(width),
+ )
+
+ def __mul__(self, coefficient: int) -> DiscreteWidth:
+ """Construct new instance with int value multiplied.
+
+ Rounding instance (reference) is copied from self.
+
+ :param coefficient: Constant to multiply int width with.
+ :type coefficient: int
+ :returns: New instance with multiplied int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If argument value does not meet requirements.
+ """
+ if not isinstance(coefficient, int):
+ raise RuntimeError(f"Coefficient not int: {coefficient!r}")
+ if coefficient < 1:
+ raise RuntimeError(f"Coefficient not positive: {coefficient!r}")
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width * coefficient,
+ )
+
+ def half_rounded_down(self) -> DiscreteWidth:
+ """Contruct new instance of half the integer width.
+
+ If the current integer width is odd, round the half width down.
+
+ :returns: New instance with half int width.
+ :rtype: DiscreteWidth
+ :raises RuntimeError: If the resulting integerl width is not positive.
+ """
+ return DiscreteWidth(
+ rounding=self.rounding,
+ int_width=self.int_width // 2,
+ )
diff --git a/resources/libraries/python/MLRsearch/expander.py b/resources/libraries/python/MLRsearch/expander.py
new file mode 100644
index 0000000000..0e6800477e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/expander.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetExpander class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetedExpander:
+ """Utility class to track expanding width during external search.
+
+ One instance per selector but takes into consideration global current width.
+
+ Generally, many strategies may limit next_width immediately,
+ but next_width expands only after measurement
+ when external search fails to find its bound (global width is also bumped).
+ See strategy classes for specific details on external and internal search.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if initial upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if initial lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ next_width: DiscreteWidth = secondary_field()
+ """This will be used in next search step if no strategy intervenes."""
+
+ def __post_init__(self) -> None:
+ """Prepare next width."""
+ self.next_width = self.target.discrete_width
+ if self.initial_lower_load and self.initial_upper_load:
+ interval_width = self.initial_upper_load - self.initial_lower_load
+ self.next_width = max(self.next_width, interval_width)
+ self.expand(bump_global=False)
+
+ def expand(self, bump_global: bool = True) -> None:
+ """Multiply next width by expansion coefficient.
+
+ The global current width should be bumped when external search
+ is done but load is not the bound we were looking for.
+
+ For global width shrinking, set the field directly.
+
+ :param bump_global: False if called from limit or post init.
+ :type bump_global: bool
+ """
+ self.next_width *= self.target.expansion_coefficient
+ if bump_global:
+ self.global_width.width = self.next_width
+
+ def get_width(self) -> DiscreteWidth:
+ """Return next width corrected by global current width.
+
+ :returns: The width to use, see GlobalWidth.
+ :rtype: DiscreteWidth
+ """
+ return self.global_width.or_larger(self.next_width)
+
+ def limit(self, last_width: DiscreteWidth) -> None:
+ """Decrease the prepared next width.
+
+ This is called by other strategies when bounds are getting narrower.
+
+ Global current width is not updated yet,
+ as the other strategy may not end up becoming the winner
+ and we want to avoid interfering with other selector strategies.
+
+ :param last_width: As applied by other strategy, smaller of two halves.
+ :type last_width: DiscreteWidth
+ """
+ self.next_width = max(last_width, self.target.discrete_width)
+ self.expand(bump_global=False)
diff --git a/resources/libraries/python/MLRsearch/global_width.py b/resources/libraries/python/MLRsearch/global_width.py
new file mode 100644
index 0000000000..6f7df8b894
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/global_width.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GlobalWidth class."""
+
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+
+
+@dataclass
+class GlobalWidth:
+ """Primarily used to synchronize external search steps across selectors.
+
+ The full name is global current width, but that is too long for identifiers.
+
+ While each selector tracks its "local" (per goal) width using expander,
+ it is important we do not interleave upper external search for two goals.
+ That is why all selector instances refer to a singleton instance of this.
+
+ In general, this value remains constant when main loop iterates over
+ selectors and when selector iterates over strategies.
+ After winner is measured, this width is set to winner width value
+ and for some strategies that width is expanded when external search says so.
+
+ The two methods are not really worth creating a new class,
+ but the main reason is having a name for type hints
+ that distinguish this from various other "width" and "current" values.
+ """
+
+ width: DiscreteWidth
+ """Minimum width to apply at next external search step."""
+ # TODO: Add a setter, so it is easier to add debug logging.
+
+ @staticmethod
+ def from_loads(load0: DiscreteLoad, load1: DiscreteLoad) -> GlobalWidth:
+ """Initialize the value based on two loads from initial trials.
+
+ :param load0: Lower (or equal) load from the two most recent trials.
+ :param load1: Higher (or equal) load from the two most recent trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :returns: Newly created instance with computed width.
+ :rtype: GlobalWidth
+ """
+ return GlobalWidth(load1 - load0)
+
+ def or_larger(self, width: DiscreteWidth) -> DiscreteWidth:
+ """Return width from argument or self, whichever is larger.
+
+ :param width: A selector (strategy) asks if this width is large enough.
+ :type width: DiscreteWidth
+ :returns: Argument or current width.
+ :rtype: DiscreteWidth
+ """
+ return width if width > self.width else self.width
diff --git a/resources/libraries/python/MLRsearch/goal_result.py b/resources/libraries/python/MLRsearch/goal_result.py
new file mode 100644
index 0000000000..91dccec0bb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/goal_result.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining GoalResult class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .discrete_load import DiscreteLoad
+from .relevant_bounds import RelevantBounds
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class GoalResult:
+ """Composite to be mapped for each search goal at the end of the search.
+
+ The values are stored as trimmed stats,
+ the conditional throughput is returned as a discrete loads.
+ Thus, users interested only in float values have to convert explicitly.
+
+ Irregular goal results are supported as instances with a bound missing.
+ """
+
+ relevant_lower_bound: Optional[TrimmedStat]
+ """The relevant lower bound for the search goal."""
+ relevant_upper_bound: Optional[TrimmedStat]
+ """The relevant lower upper for the search goal."""
+
+ @staticmethod
+ def from_bounds(bounds: RelevantBounds) -> GoalResult:
+ """Factory, so that the call site can be shorter.
+
+ :param bounds: The relevant bounds as found in measurement database.
+ :type bounds: RelevantBounds
+ :returns: Newly created instance based on the bounds.
+ :rtype: GoalResult
+ """
+ return GoalResult(
+ relevant_lower_bound=bounds.clo,
+ relevant_upper_bound=bounds.chi,
+ )
+
+ @property
+ def conditional_throughput(self) -> Optional[DiscreteLoad]:
+ """Compute conditional throughput from the relevant lower bound.
+
+ If the relevant lower bound is missing, None is returned.
+
+ The conditional throughput has the same semantics as load,
+ so if load is unidirectional and user wants bidirectional
+ throughput, the manager has to compensate.
+
+ :return: Conditional throughput at the relevant lower bound.
+ :rtype: Optional[DiscreteLoad]
+ """
+ if not (rlb := self.relevant_lower_bound):
+ return None
+ stat = next(iter(rlb.target_to_stat.values()))
+ return rlb * (1.0 - stat.pessimistic_loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/limit_handler.py b/resources/libraries/python/MLRsearch/limit_handler.py
new file mode 100644
index 0000000000..5919f398f3
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/limit_handler.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LimitHandler class."""
+
+from dataclasses import dataclass
+from typing import Callable, Optional
+
+from .dataclass import secondary_field
+from .discrete_interval import DiscreteInterval
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+
+
+@dataclass
+class LimitHandler:
+ """Encapsulated methods for logic around handling limits.
+
+ In multiple places within MLRsearch code, an intended load value
+ is only useful if it is far enough from possible known values.
+ All such places can be served with the handle method
+ with appropriate arguments.
+ """
+
+ rounding: LoadRounding
+ """Rounding instance to use."""
+ debug: Callable[[str], None]
+ """Injectable logging function."""
+ # The two fields below are derived, extracted from rounding as a shortcut.
+ min_load: DiscreteLoad = secondary_field()
+ """Minimal load, as prescribed by Config."""
+ max_load: DiscreteLoad = secondary_field()
+ """Maximal load, as prescribed by Config."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived quantities."""
+ from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.min_load = from_float(self.rounding.min_load)
+ self.max_load = from_float(self.rounding.max_load)
+
+ def handle(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ clo: Optional[DiscreteLoad],
+ chi: Optional[DiscreteLoad],
+ ) -> Optional[DiscreteLoad]:
+ """Return new intended load after considering limits and bounds.
+
+ Not only we want to avoid measuring outside minmax interval,
+ we also want to avoid measuring too close to known limits and bounds.
+ We either round or return None, depending on hints from bound loads.
+
+ When rounding away from hard limits, we may end up being
+ too close to an already measured bound.
+ In this case, pick a midpoint between the bound and the limit.
+
+ The last two arguments are just loads (not full measurement results)
+ to allow callers to exclude some load without measuring them.
+ As a convenience, full results are also supported,
+ so that callers do not need to care about None when extracting load.
+
+ :param load: Intended load candidate, initial or from a load selector.
+ :param width: Relative width goal, considered narrow enough for now.
+ :param clo: Intended load of current relevant lower bound.
+ :param chi: Intended load of current relevant upper bound.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type clo: Optional[DiscreteLoad]
+ :type chi: Optional[DiscreteLoad]
+ :return: Adjusted load to measure at, or None if narrow enough already.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If unsupported corner case is detected.
+ """
+ if not load:
+ raise RuntimeError("Got None load to handle.")
+ load = load.rounded_down()
+ min_load, max_load = self.min_load, self.max_load
+ if clo and not clo.is_round:
+ raise RuntimeError(f"Clo {clo} should have been round.")
+ if chi and not chi.is_round:
+ raise RuntimeError(f"Chi {chi} should have been round.")
+ if not clo and not chi:
+ load = self._handle_load_with_excludes(
+ load, width, min_load, max_load, min_ex=False, max_ex=False
+ )
+ # The "return load" lines are separate from load computation,
+ # so that logging can be added more easily when debugging.
+ return load
+ if chi and not clo:
+ if chi <= min_load:
+ # Expected when hitting the min load.
+ return None
+ if load >= chi:
+ # This can happen when mrr2 forward rate is rounded to mrr2.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, min_load, chi, min_ex=False, max_ex=True
+ )
+ return load
+ if clo and not chi:
+ if clo >= max_load:
+ raise RuntimeError("Lower load expected.")
+ if load <= clo:
+ raise RuntimeError("Higher load expected.")
+ load = self._handle_load_with_excludes(
+ load, width, clo, max_load, min_ex=True, max_ex=False
+ )
+ return load
+ # We have both clo and chi defined.
+ if not clo < load < chi:
+ # Happens when bisect compares with bounded extend.
+ return None
+ load = self._handle_load_with_excludes(
+ load, width, clo, chi, min_ex=True, max_ex=True
+ )
+ return load
+
+ def _handle_load_with_excludes(
+ self,
+ load: DiscreteLoad,
+ width: DiscreteWidth,
+ minimum: DiscreteLoad,
+ maximum: DiscreteLoad,
+ min_ex: bool,
+ max_ex: bool,
+ ) -> Optional[DiscreteLoad]:
+ """Adjust load if too close to limits, respecting exclusions.
+
+ This is a reusable block.
+ Limits may come from previous bounds or from hard load limits.
+ When coming from bounds, rounding to that is not allowed.
+ When coming from hard limits, rounding to the limit value
+ is allowed in general (given by the setting the _ex flag).
+
+ :param load: The candidate intended load before accounting for limits.
+ :param width: Relative width of area around the limits to avoid.
+ :param minimum: The lower limit to round around.
+ :param maximum: The upper limit to round around.
+ :param min_ex: If false, rounding to the minimum is allowed.
+ :param max_ex: If false, rounding to the maximum is allowed.
+ :type load: DiscreteLoad
+ :type width: DiscreteWidth
+ :type minimum: DiscreteLoad
+ :type maximum: DiscreteLoad
+ :type min_ex: bool
+ :type max_ex: bool
+ :returns: Adjusted load value, or None if narrow enough.
+ :rtype: Optional[DiscreteLoad]
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if not minimum <= load <= maximum:
+ raise RuntimeError(
+ "Internal error: load outside limits:"
+ f" load {load} min {minimum} max {maximum}"
+ )
+ max_width = maximum - minimum
+ if width >= max_width:
+ self.debug("Warning: Handling called with wide width.")
+ if not min_ex:
+ self.debug("Minimum not excluded, rounding to it.")
+ return minimum
+ if not max_ex:
+ self.debug("Maximum not excluded, rounding to it.")
+ return maximum
+ self.debug("Both limits excluded, narrow enough.")
+ return None
+ soft_min = minimum + width
+ soft_max = maximum - width
+ if soft_min > soft_max:
+ self.debug("Whole interval is less than two goals.")
+ middle = DiscreteInterval(minimum, maximum).middle(width)
+ soft_min = soft_max = middle
+ if load < soft_min:
+ if min_ex:
+ self.debug("Min excluded, rounding to soft min.")
+ return soft_min
+ self.debug("Min not excluded, rounding to minimum.")
+ return minimum
+ if load > soft_max:
+ if max_ex:
+ self.debug("Max excluded, rounding to soft max.")
+ return soft_max
+ self.debug("Max not excluded, rounding to maximum.")
+ return maximum
+ # Far enough from all limits, no additional adjustment is needed.
+ return load
diff --git a/resources/libraries/python/MLRsearch/load_rounding.py b/resources/libraries/python/MLRsearch/load_rounding.py
new file mode 100644
index 0000000000..0ac4487be9
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_rounding.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadRounding class."""
+
+import math
+
+from dataclasses import dataclass
+from typing import List, Tuple
+
+from .dataclass import secondary_field
+
+
+@dataclass
+class LoadRounding:
+ """Class encapsulating stateful utilities that round intended load values.
+
+ For MLRsearch algorithm logic to be correct, it is important that
+ interval width expansion and narrowing are exactly reversible,
+ which is not true in general for floating point number arithmetics.
+
+ This class offers conversion to and from an integer quantity.
+ Operations in the integer realm are guaranteed to be reversible,
+ so the only risk is when converting between float and integer realm.
+
+ Which relative width corresponds to the unit integer
+ is computed in initialization from width goals,
+ striking a balance between memory requirements and precision.
+
+ There are two quality knobs. One restricts how far
+ can an integer be from the exact float value.
+ The other restrict how close it can be. That is to make sure
+ even with unpredictable rounding errors during the conversion,
+ the converted integer value is never bigger than the intended float value,
+ to ensure the intervals returned from MLRsearch will always
+ meet the relative width goal.
+
+ An instance of this class is mutable only in the sense it contains
+ a growing cache of previously computed values.
+ """
+
+ # TODO: Hide the cache and present as frozen hashable object.
+
+ min_load: float
+ """Minimal intended load [tps] to support, must be positive."""
+ max_load: float
+ """Maximal intended load [tps] to support, must be bigger than min load."""
+ float_goals: Tuple[float]
+ """Relative width goals to approximate, each must be positive
+ and smaller than one. Deduplicated and sorted in post init."""
+ quality_lower: float = 0.99
+ """Minimal multiple of each goal to be achievable."""
+ quality_upper: float = 0.999999
+ """Maximal multiple of each goal to be achievable."""
+ # Primary fields above, computed fields below.
+ max_int_load: int = secondary_field()
+ """Integer for max load (min load int is zero)."""
+ _int2load: List[Tuple[int, float]] = secondary_field()
+ """Known int values (sorted) and their float equivalents."""
+
+ def __post_init__(self) -> None:
+ """Ensure types, perform checks, initialize conversion structures.
+
+ :raises RuntimeError: If a requirement is not met.
+ """
+ self.min_load = float(self.min_load)
+ self.max_load = float(self.max_load)
+ if not 0.0 < self.min_load < self.max_load:
+ raise RuntimeError("Load limits not supported: {self}")
+ self.quality_lower = float(self.quality_lower)
+ self.quality_upper = float(self.quality_upper)
+ if not 0.0 < self.quality_lower < self.quality_upper < 1.0:
+ raise RuntimeError("Qualities not supported: {self}")
+ goals = []
+ for goal in self.float_goals:
+ goal = float(goal)
+ if not 0.0 < goal < 1.0:
+ raise RuntimeError(f"Goal width {goal} is not supported.")
+ goals.append(goal)
+ self.float_goals = tuple(sorted(set(goals)))
+ self.max_int_load = self._find_ints()
+ self._int2load = []
+ self._int2load.append((0, self.min_load))
+ self._int2load.append((self.max_int_load, self.max_load))
+
+ def _find_ints(self) -> int:
+ """Find and return value for max_int_load.
+
+ Separated out of post init, as this is less conversion and checking,
+ and more math and searching.
+
+ A dumb implementation would start with 1 and kept increasing by 1
+ until all goals are within quality limits.
+ An actual implementation is smarter with the increment,
+ so it is expected to find the resulting values somewhat faster.
+
+ :returns: Value to be stored as max_int_load.
+ :rtype: int
+ """
+ minmax_log_width = math.log(self.max_load) - math.log(self.min_load)
+ log_goals = [-math.log1p(-goal) for goal in self.float_goals]
+ candidate = 1
+ while 1:
+ log_width_unit = minmax_log_width / candidate
+ # Fallback to increment by one if rounding errors make tries bad.
+ next_tries = [candidate + 1]
+ acceptable = True
+ for log_goal in log_goals:
+ units = log_goal / log_width_unit
+ int_units = math.floor(units)
+ quality = int_units / units
+ if not self.quality_lower <= quality <= self.quality_upper:
+ acceptable = False
+ target = (int_units + 1) / self.quality_upper
+ next_try = (target / units) * candidate
+ next_tries.append(next_try)
+ # Else quality acceptable, not bumping the candidate.
+ if acceptable:
+ return candidate
+ candidate = int(math.ceil(max(next_tries)))
+
+ def int2float(self, int_load: int) -> float:
+ """Convert from int to float tps load. Expand internal table as needed.
+
+ Too low or too high ints result in min or max load respectively.
+
+ :param int_load: Integer quantity to turn back into float load.
+ :type int_load: int
+ :returns: Converted load in tps.
+ :rtype: float
+ :raises RuntimeError: If internal inconsistency is detected.
+ """
+ if int_load <= 0:
+ return self.min_load
+ if int_load >= self.max_int_load:
+ return self.max_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_int < int_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_int > int_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_load
+ raise RuntimeError("Bisect in int2float failed.")
+
+ def float2int(self, float_load: float) -> int:
+ """Convert and round from tps load to int. Maybe expand internal table.
+
+ Too low or too high load result in zero or max int respectively.
+
+ Result value is rounded down to an integer.
+
+ :param float_load: Tps quantity to convert into int.
+ :type float_load: float
+ :returns: Converted integer value suitable for halving.
+ :rtype: int
+ """
+ if float_load <= self.min_load:
+ return 0
+ if float_load >= self.max_load:
+ return self.max_int_load
+ lo_index, hi_index = 0, len(self._int2load)
+ lo_int, hi_int = 0, self.max_int_load
+ lo_load, hi_load = self.min_load, self.max_load
+ while hi_int - lo_int >= 2:
+ mid_index = (hi_index + lo_index + 1) // 2
+ if mid_index >= hi_index:
+ mid_int = (hi_int + lo_int) // 2
+ log_coeff = math.log(hi_load) - math.log(lo_load)
+ log_coeff *= (mid_int - lo_int) / (hi_int - lo_int)
+ mid_load = lo_load * math.exp(log_coeff)
+ self._int2load.insert(mid_index, (mid_int, mid_load))
+ hi_index += 1
+ mid_int, mid_load = self._int2load[mid_index]
+ if mid_load < float_load:
+ lo_index, lo_int, lo_load = mid_index, mid_int, mid_load
+ continue
+ if mid_load > float_load:
+ hi_index, hi_int, hi_load = mid_index, mid_int, mid_load
+ continue
+ return mid_int
+ return lo_int
diff --git a/resources/libraries/python/MLRsearch/load_stats.py b/resources/libraries/python/MLRsearch/load_stats.py
new file mode 100644
index 0000000000..5f4757f488
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/load_stats.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStats class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .target_stat import TargetStat
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+
+
+# The eq=False part is needed to make sure comparison is inherited properly.
+@dataclass(eq=False)
+class LoadStats(DiscreteLoad):
+ """An offered load together with stats for all possible targets.
+
+ As LoadStats is frequently passed instead of plan DiscreteLoad,
+ equality and ordering is dictated by the float load.
+ """
+
+ target_to_stat: Dict[TargetSpec, TargetStat] = None
+ """Mapping from target specification to its current stat for this load."""
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there are targets to track."""
+ super().__post_init__()
+ if not self.target_to_stat:
+ raise ValueError(f"No targets: {self.target_to_stat!r}")
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ This works well only for trimmed stats,
+ as only the stat for the first target present is shown.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"fl={self.float_load}"
+ f",s=({next(iter(self.target_to_stat.values()))})"
+ )
+
+ def __hash__(self) -> int:
+ """Raise as stats are mutable by definition.
+
+ :returns: Hash value for this instance if possible.
+ :rtype: int
+ :raises TypeError: Not immutable.
+ """
+ raise TypeError("Loadstats are mutable so constant hash is impossible.")
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial measurement result.
+
+ :param result: The result to take into account.
+ :type result: DiscreteResult
+ :raises RuntimeError: If result load does is not equal to the self load.
+ """
+ if result.intended_load != float(self):
+ raise RuntimeError(
+ f"Attempting to add load {result.intended_load}"
+ f" to result set for {float(self)}"
+ )
+ for stat in self.target_to_stat.values():
+ stat.add(result)
+
+ @staticmethod
+ def new_empty(load: DiscreteLoad, targets: Tuple[TargetSpec]) -> LoadStats:
+ """Factory method to initialize mapping for given targets.
+
+ :param load: The intended load value for the new instance.
+ :param targets: The target specifications to track stats for.
+ :type load: DiscreteLoad
+ :type targets: Tuple[TargetSpec]
+ :returns: New instance with empty stats initialized.
+ :rtype: LoadStats
+ :raise ValueError: Is the load is not rounded.
+ """
+ if not load.is_round:
+ raise ValueError(f"Not round: {load!r}")
+ return LoadStats(
+ rounding=load.rounding,
+ int_load=int(load),
+ target_to_stat={target: TargetStat(target) for target in targets},
+ )
+
+ def estimates(self, target: TargetSpec) -> Tuple[bool, bool]:
+ """Classify this load according to given target.
+
+ :param target: According to which target this should be classified.
+ :type target: TargetSpec
+ :returns: Tuple of two estimates whether load can be lower bound.
+ (True, False) means target is not reached yet.
+ :rtype: Tuple[bool, bool]
+ """
+ return self.target_to_stat[target].estimates()
diff --git a/resources/libraries/python/MLRsearch/measurement_database.py b/resources/libraries/python/MLRsearch/measurement_database.py
new file mode 100644
index 0000000000..7a6618c0da
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/measurement_database.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementDatabase class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .load_stats import LoadStats
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class MeasurementDatabase:
+ """Structure holding measurement results for multiple durations and loads.
+
+ Several utility methods are added, accomplishing tasks useful for MLRsearch.
+
+ While TargetStats can decide when a single load is a lower bound (or upper),
+ it does not deal with loss inversion (higher load with less load).
+
+ This class introduces the concept of relevant bounds.
+ Relevant upper bound is simply the lowest load classified as an upper bound.
+ But relevant lower bound is only chosen from lower bound loads
+ strictly smaller than the relevant upper bound.
+ This way any higher loads with good results are ignored,
+ so relevant bound give conservative estimate of SUT true performance.
+ """
+
+ targets: Tuple[TargetSpec] = None
+ """Targets to track stats for."""
+ load_to_stats: Dict[DiscreteLoad, LoadStats] = None
+ """Mapping from loads to stats."""
+
+ def __post_init__(self) -> None:
+ """Check and sort initial values.
+
+ If no stats yet, initialize empty ones.
+
+ :raises ValueError: If there are no targets.
+ """
+ if not self.targets:
+ raise ValueError(f"Database needs targets: {self.targets!r}")
+ if not self.load_to_stats:
+ self.load_to_stats = {}
+ self._sort()
+
+ def _sort(self) -> None:
+ """Sort keys from low to high load."""
+ self.load_to_stats = dict(sorted(self.load_to_stats.items()))
+
+ def __getitem__(self, key: DiscreteLoad) -> LoadStats:
+ """Allow access to stats as if self was load_to_stats.
+
+ This also accepts LoadStats as key, so callers do not need
+ to care about hashability.
+
+ :param key: The load to get stats for.
+ :type key: DiscreteLoad
+ :returns: Stats for the given load.
+ :rtype LoadStats:
+ """
+ return self.load_to_stats[key.hashable()]
+
+ def add(self, result: DiscreteResult) -> None:
+ """Incorporate given trial measurement result.
+
+ :param result: Measurement result to add to the database.
+ :type result: DiscreteResult
+ """
+ discrete_load = result.discrete_load.hashable()
+ if not discrete_load.is_round:
+ raise ValueError(f"Not round load: {discrete_load!r}")
+ if discrete_load not in self.load_to_stats:
+ self.load_to_stats[discrete_load] = LoadStats.new_empty(
+ load=discrete_load,
+ targets=self.targets,
+ )
+ self._sort()
+ self.load_to_stats[discrete_load].add(result)
+
+ def get_relevant_bounds(self, target: TargetSpec) -> RelevantBounds:
+ """Return None or a valid trimmed stat, for the two relevant bounds.
+
+ A load is valid only if both optimistic and pessimistic estimates agree.
+
+ If some value is not available, None is returned instead.
+ The returned stats are trimmed to the argument target.
+
+ The implementation starts from low loads
+ and the search stops at lowest upper bound,
+ thus conforming to the conservative definition of relevant bounds.
+
+ :param target: Target to classify loads when finding bounds.
+ :type target: TargetSpec
+ :returns: Relevant lower bound, relevant upper bound.
+ :rtype: RelevantBounds
+ """
+ lower_bound, upper_bound = None, None
+ for load_stats in self.load_to_stats.values():
+ opt, pes = load_stats.estimates(target)
+ if opt != pes:
+ continue
+ if not opt:
+ upper_bound = load_stats
+ break
+ lower_bound = load_stats
+ if lower_bound:
+ lower_bound = TrimmedStat.for_target(lower_bound, target)
+ if upper_bound:
+ upper_bound = TrimmedStat.for_target(upper_bound, target)
+ return RelevantBounds(clo=lower_bound, chi=upper_bound)
diff --git a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
new file mode 100644
index 0000000000..4d3ff7c4cb
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py
@@ -0,0 +1,325 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MultipleLossRatioSearch class."""
+
+import logging
+import time
+
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple
+
+from .candidate import Candidate
+from .config import Config
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_result import DiscreteResult
+from .expander import GlobalWidth
+from .goal_result import GoalResult
+from .limit_handler import LimitHandler
+from .load_rounding import LoadRounding
+from .measurement_database import MeasurementDatabase
+from .pep3140 import Pep3140Dict
+from .search_goal import SearchGoal
+from .selector import Selector
+from .target_scaling import TargetScaling
+from .trial_measurement import AbstractMeasurer
+
+
+@dataclass
+class MultipleLossRatioSearch:
+ """Implementation of the controller part of MLRsearch algorithm.
+
+ The manager part is creating and calling this,
+ the measurer part is injected.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final narrow bounds
+ (related to its search goal) after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm in this class contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for bounds for multiple search goals at once.
+ Specifically, the trial measurement results influence bounds for all goals,
+ even though the selection of trial inputs for next measurement
+ focuses only on one goal. The focus can switch between goals frequently.
+
+ Next improvement is that results of trial measurements
+ with small trial duration can be used to find a reasonable starting interval
+ for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+ Internally, such shorter trials come from "preceding targets",
+ handled in a same way as a search goal "final target".
+ Related improvement is that the "current" interval does not need to be valid
+ (e.g. one of the bounds is missing).
+ In that case, this algorithm will move and expand the interval,
+ in a process called external search. Only when both bounds are found,
+ the interval bisection (called internal search) starts making it narrow.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that target relative width is independent of measurement units.
+
+ Next improvement is basing the initial interval on forwarding rates
+ of few initial measurements, starting at max load and using forwarding rates
+ seen so far.
+
+ Next improvement is to allow the use of multiple shorter trials
+ instead one big trial, allowing a percentage of trials
+ to exceed the loss ratio target.
+ This makes the result more stable in practice.
+ Conservative behavior (single long trial, zero exceed ratio)
+ is still available using corresponding goal definitions.
+
+ Final improvement is exiting early if the minimal load
+ is not a valid lower bound (at final duration)
+ and also exiting if the overall search duration is too long.
+
+ There are also subtle optimizations related to candidate selection
+ and uneven splitting of intervals, too numerous to list here.
+
+ The return values describe performance at the relevant lower bound
+ as "conditional throughput", which is based on loss ratio of one of trials
+ selected as a quantile based on exceed ratio parameter.
+ Usually this value may be quite pessimistic, as MLRsearch stops
+ measuring a load as soon as it becomes a lower bound,
+ so conditional throughput is usually based on forwarding rate
+ of the worst on the good long trials.
+ """
+
+ config: Config
+ """Arguments required at construction time."""
+ # End of fields required at intance creation.
+ measurer: AbstractMeasurer = secondary_field()
+ """Measurer to use, set at calling search()."""
+ debug: Callable[[str], None] = secondary_field()
+ """Object to call for logging, None means logging.debug."""
+ # Fields below are computed from data above
+ rounding: LoadRounding = secondary_field()
+ """Derived from goals. Instance to use for intended load rounding."""
+ from_float: Callable[[float], DiscreteLoad] = secondary_field()
+ """Conversion method from float [tps] intended load values."""
+ limit_handler: LimitHandler = secondary_field()
+ """Load post-processing utility based on config and rounding."""
+ scaling: TargetScaling = secondary_field()
+ """Utility for creating target chains for search goals."""
+ database: MeasurementDatabase = secondary_field()
+ """Storage for (stats of) measurement results so far."""
+ stop_time: float = secondary_field()
+ """Monotonic time value at which the search should end with failure."""
+
+ def search(
+ self,
+ measurer: AbstractMeasurer,
+ debug: Optional[Callable[[str], None]] = None,
+ ) -> Pep3140Dict[SearchGoal, GoalResult]:
+ """Perform initial trials, create state object, proceed with main loop.
+
+ Stateful arguments (measurer and debug) are stored.
+ Derived objects are constructed from config.
+
+ :param measurer: Measurement provider to use by this search object.
+ :param debug: Callable to optionally use instead of logging.debug().
+ :type measurer: AbstractMeasurer
+ :type debug: Optional[Callable[[str], None]]
+ :returns: Structure containing conditional throughputs and other stats,
+ one for each search goal. If a value is None it means there is
+ no lower bound (min load turned out to be an upper bound).
+ :rtype: Pep3140Dict[SearchGoal, GoalResult]
+ :raises RuntimeError: If total duration is larger than timeout,
+ or if min load becomes an upper bound for a search goal
+ that has fail fast true.
+ """
+ self.measurer = measurer
+ self.debug = logging.debug if debug is None else debug
+ self.rounding = LoadRounding(
+ min_load=self.config.min_load,
+ max_load=self.config.max_load,
+ float_goals=[goal.relative_width for goal in self.config.goals],
+ )
+ self.from_float = DiscreteLoad.float_conver(rounding=self.rounding)
+ self.limit_handler = LimitHandler(
+ rounding=self.rounding,
+ debug=self.debug,
+ )
+ self.scaling = TargetScaling(
+ goals=self.config.goals,
+ rounding=self.rounding,
+ )
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.stop_time = time.monotonic() + self.config.search_duration_max
+ result0, result1 = self.run_initial_trials()
+ self.main_loop(result0.discrete_load, result1.discrete_load)
+ ret_dict = Pep3140Dict()
+ for goal in self.config.goals:
+ target = self.scaling.goal_to_final_target[goal]
+ bounds = self.database.get_relevant_bounds(target=target)
+ ret_dict[goal] = GoalResult.from_bounds(bounds=bounds)
+ return ret_dict
+
+ def measure(self, duration: float, load: DiscreteLoad) -> DiscreteResult:
+ """Call measurer and put the result to appropriate form in database.
+
+ Also check the argument types and load roundness,
+ and return the result to the caller.
+
+ :param duration: Intended duration for the trial measurement.
+ :param load: Intended load for the trial measurement:
+ :type duration: float
+ :type load: DiscreteLoad
+ :returns: The trial results.
+ :rtype: DiscreteResult
+ :raises RuntimeError: If an argument doed not have the required type.
+ """
+ if not isinstance(duration, float):
+ raise RuntimeError(f"Duration has to be float: {duration!r}")
+ if not isinstance(load, DiscreteLoad):
+ raise RuntimeError(f"Load has to be discrete: {load!r}")
+ if not load.is_round:
+ raise RuntimeError(f"Told to measure unrounded: {load!r}")
+ self.debug(f"Measuring at d={duration},il={int(load)}")
+ result = self.measurer.measure(
+ intended_duration=duration,
+ intended_load=float(load),
+ )
+ self.debug(f"Measured lr={result.loss_ratio}")
+ result = DiscreteResult.with_load(result=result, load=load)
+ self.database.add(result)
+ return result
+
+ def run_initial_trials(self) -> Tuple[DiscreteResult, DiscreteResult]:
+ """Perform trials to get enough data to start the selectors.
+
+ Measurements are done with all initial targets in mind,
+ based on smallest target loss ratio, largest initial trial duration,
+ and largest initial target width.
+
+ Forwarding rate is used as a hint for next intended load.
+ The relative quantity is used, as load can use different units.
+ When the smallest target loss ratio is non-zero, a correction is needed
+ (forwarding rate is only a good hint for zero loss ratio load).
+ The correction is conservative (all increase in load turns to losses).
+
+ Also, warmup trial (if configured) is performed,
+ all other trials are added to the database.
+
+ This could return the initial width, but from implementation perspective
+ it is easier to return two measurements (or the same one twice) here
+ and compute width later. The "one value twice" happens when max load
+ has small loss, or when min load has big loss.
+
+ :returns: Two last measured values, in any order. Or one value twice.
+ :rtype: Tuple[DiscreteResult, DiscreteResult]
+ """
+ max_load = self.limit_handler.max_load
+ ratio, duration, width = None, None, None
+ for target in self.scaling.targets:
+ if target.preceding:
+ continue
+ if ratio is None or ratio > target.loss_ratio:
+ ratio = target.loss_ratio
+ if not duration or duration < target.trial_duration:
+ duration = target.trial_duration
+ if not width or width < target.discrete_width:
+ width = target.discrete_width
+ self.debug(f"Init ratio {ratio} duration {duration} width {width}")
+ if self.config.warmup_duration:
+ self.debug("Warmup trial.")
+ self.measure(self.config.warmup_duration, max_load)
+ # Warmup should not affect the real results, reset the database.
+ self.database = MeasurementDatabase(self.scaling.targets)
+ self.debug(f"First trial at max rate: {max_load}")
+ result0 = self.measure(duration, max_load)
+ rfr = result0.relative_forwarding_rate
+ corrected_rfr = (self.from_float(rfr) / (1.0 - ratio)).rounded_down()
+ if corrected_rfr >= max_load:
+ self.debug("Small loss, no other initial trials are needed.")
+ return result0, result0
+ mrr = self.limit_handler.handle(corrected_rfr, width, None, max_load)
+ self.debug(f"Second trial at (corrected) mrr: {mrr}")
+ result1 = self.measure(duration, mrr)
+ # Attempt to get narrower width.
+ result_ratio = result1.loss_ratio
+ if result_ratio > ratio:
+ rfr2 = result1.relative_forwarding_rate
+ crfr2 = (self.from_float(rfr2) / (1.0 - ratio)).rounded_down()
+ mrr2 = self.limit_handler.handle(crfr2, width, None, mrr)
+ else:
+ mrr2 = mrr + width
+ mrr2 = self.limit_handler.handle(mrr2, width, mrr, max_load)
+ if not mrr2:
+ self.debug("Close enough, measuring at mrr2 is not needed.")
+ return result1, result1
+ self.debug(f"Third trial at (corrected) mrr2: {mrr2}")
+ result2 = self.measure(duration, mrr2)
+ return result1, result2
+
+ def main_loop(self, load0: DiscreteLoad, load1: DiscreteLoad) -> None:
+ """Initialize selectors and keep measuring the winning candidate.
+
+ Selectors are created, the two input loads are useful starting points.
+
+ The search ends when no selector nominates any candidate,
+ or if the search takes too long (or if a selector raises).
+
+ Winner is selected according to ordering defined in Candidate class.
+ In case of a tie, selectors for earlier goals are preferred.
+
+ As a selector is only allowed to update current width as the winner,
+ the update is done here explicitly.
+
+ :param load0: Discrete load of one of results from run_initial_trials.
+ :param load1: Discrete load of other of results from run_initial_trials.
+ :type load0: DiscreteLoad
+ :type load1: DiscreteLoad
+ :raises RuntimeError: If the search takes too long,
+ or if min load becomes an upper bound for any search goal
+ """
+ if load1 < load0:
+ load0, load1 = load1, load0
+ global_width = GlobalWidth.from_loads(load0, load1)
+ selectors = []
+ for target in self.scaling.goal_to_final_target.values():
+ selector = Selector(
+ final_target=target,
+ global_width=global_width,
+ initial_lower_load=load0,
+ initial_upper_load=load1,
+ database=self.database,
+ handler=self.limit_handler,
+ debug=self.debug,
+ )
+ selectors.append(selector)
+ while time.monotonic() < self.stop_time:
+ winner = Candidate()
+ for selector in selectors:
+ # Order of arguments is important
+ # when two targets nominate the same candidate.
+ winner = min(Candidate.nomination_from(selector), winner)
+ if not winner:
+ break
+ # We do not check duration versus stop_time here,
+ # as some measurers can be unpredictably faster
+ # than their intended duration suggests.
+ self.measure(duration=winner.duration, load=winner.load)
+ # Delayed updates.
+ if winner.width:
+ global_width.width = winner.width
+ winner.won()
+ else:
+ raise RuntimeError("Optimized search takes too long.")
+ self.debug("Search done.")
diff --git a/resources/libraries/python/MLRsearch/pep3140/__init__.py b/resources/libraries/python/MLRsearch/pep3140/__init__.py
new file mode 100644
index 0000000000..f8e2ffaa8f
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "pep3140".
+"""
+
+# TODO: Move submodules to separate modules.
+# Not obvious how to do that from PyPI point of view
+# without affecting the current CSIT global "resources" package root.
+# Probably it can be done by specifying multiple directories
+# in PYTHONPATH used throughout CSIT.
+
+from .classes import Pep3140Dict
diff --git a/resources/libraries/python/MLRsearch/pep3140/classes.py b/resources/libraries/python/MLRsearch/pep3140/classes.py
new file mode 100644
index 0000000000..9ab6e25c7c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/pep3140/classes.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining a subclass of dict with an alternative str method."""
+
+
+class Pep3140Dict(dict):
+ """A dict with str support as proposed in PEP 3140.
+
+ Python implemented str acting on dict such that the resulting string
+ shows both keys and values in their repr form.
+ Therefore, str() of a dict gives the same result as repr().
+
+ This class shows both keys and values their str form instead.
+ """
+
+ def __str__(self) -> str:
+ """Return comma+space separated str of items in curly brackets.
+
+ :returns: PEP 3140 string form of the dict data.
+ :rtype: str
+ """
+ body = ", ".join(f"{key}: {value}" for key, value in self.items())
+ return f"{{{body}}}"
diff --git a/resources/libraries/python/MLRsearch/relevant_bounds.py b/resources/libraries/python/MLRsearch/relevant_bounds.py
new file mode 100644
index 0000000000..4bc6796f71
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/relevant_bounds.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RelevantBounds class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .trimmed_stat import TrimmedStat
+
+
+@dataclass
+class RelevantBounds:
+ """Container for the pair of relevant bounds for a target.
+
+ If there is no valid bound, None is used.
+
+ Relevant upper bound is smallest load acting as an upper bound.
+ Relevant lower bound acts as a lower bound, has to be strictly smaller
+ than the relevant upper bound, and is largest among such loads.
+
+ The short names "clo" and "chi" are also commonly used
+ in logging and technical comments.
+
+ Trimming could be done here, but it needs to known the target explicitly,
+ so it is done in MeasurementDatabase instead.
+ """
+
+ clo: Optional[TrimmedStat]
+ """The relevant lower bound (trimmed) for the current target."""
+ chi: Optional[TrimmedStat]
+ """The relevant upper bound (trimmed) for the current target."""
+
+ # TODO: Check types in post init?
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ clo = int(self.clo) if self.clo else None
+ chi = int(self.chi) if self.chi else None
+ return f"clo={clo},chi={chi}"
diff --git a/resources/libraries/python/MLRsearch/search_goal.py b/resources/libraries/python/MLRsearch/search_goal.py
new file mode 100644
index 0000000000..777ad5b991
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoal class."""
+
+from dataclasses import dataclass
+
+
+@dataclass(frozen=True, eq=True)
+class SearchGoal:
+ """Storage class for search goal attributes.
+
+ This is the part of controller inputs that can be repeated
+ with different values. MLRsearch saves time by searching
+ for conditional throughput for each goal at the same time,
+ compared to repeated calls with separate goals.
+
+ Most fields (called attributes) of this composite
+ are relevant to the definition of conditional throughput.
+ The rest does not, but can affect the overal search time.
+ """
+
+ loss_ratio: float = 0.0
+ """The goal loss ratio.
+ A trial can satisfy the goal only when its trial loss ratio is not higher
+ than this. See MeasurementResult.loss_ratio for details.
+ A trial that does not satisfy this goal is called a bad trial."""
+ exceed_ratio: float = 0.5
+ """What portion of the duration sum can consist of bad trial seconds
+ while still being classified as lower bound (assuming no short trials)."""
+ relative_width: float = 0.005
+ """Target is achieved when the relevant lower bound
+ is no more than this (in units of the tightest upper bound) far
+ from the relevant upper bound."""
+ initial_trial_duration: float = 1.0
+ """Shortest trial duration employed when searching for this goal."""
+ final_trial_duration: float = 1.0
+ """Longest trial duration employed when searching for this goal."""
+ duration_sum: float = 21.0
+ """Minimal sum of durations of relevant trials sufficient to declare a load
+ to be upper or lower bound for this goal."""
+ preceding_targets: int = 2
+ """Number of increasingly coarser search targets to insert,
+ hoping to speed up searching for the final target of this goal."""
+ expansion_coefficient: int = 2
+ """External search multiplies width (in logarithmic space) by this."""
+ fail_fast: bool = True
+ """If true and min load is not an upper bound, raise.
+ If false, search will return None instead of lower bound."""
+
+ def __post_init__(self) -> None:
+ """Convert fields to correct types and call validate."""
+ super().__setattr__("loss_ratio", float(self.loss_ratio))
+ super().__setattr__("exceed_ratio", float(self.exceed_ratio))
+ super().__setattr__("relative_width", float(self.relative_width))
+ super().__setattr__(
+ "final_trial_duration", float(self.final_trial_duration)
+ )
+ super().__setattr__(
+ "initial_trial_duration", float(self.initial_trial_duration)
+ )
+ super().__setattr__("duration_sum", float(self.duration_sum))
+ super().__setattr__("preceding_targets", int(self.preceding_targets))
+ super().__setattr__(
+ "expansion_coefficient", int(self.expansion_coefficient)
+ )
+ super().__setattr__("fail_fast", bool(self.fail_fast))
+ self.validate()
+
+ def validate(self) -> None:
+ """Make sure the initialized values conform to requirements.
+
+ :raises ValueError: If a field value is outside allowed bounds.
+ """
+ if self.loss_ratio < 0.0:
+ raise ValueError(f"Loss ratio cannot be negative: {self}")
+ if self.loss_ratio >= 1.0:
+ raise ValueError(f"Loss ratio must be lower than 1: {self}")
+ if self.exceed_ratio < 0.0:
+ raise ValueError(f"Exceed ratio cannot be negative: {self}")
+ if self.exceed_ratio >= 1.0:
+ raise ValueError(f"Exceed ratio must be lower than 1: {self}")
+ if self.relative_width <= 0.0:
+ raise ValueError(f"Relative width must be positive: {self}")
+ if self.relative_width >= 1.0:
+ raise ValueError(f"Relative width must be less than 1: {self}")
+ if self.initial_trial_duration <= 0.0:
+ raise ValueError(f"Initial trial duration must be positive: {self}")
+ if self.final_trial_duration < self.initial_trial_duration:
+ raise ValueError(
+ f"Single duration max must be at least initial: {self}"
+ )
+ if self.duration_sum < self.final_trial_duration:
+ raise ValueError(
+ "Min duration sum cannot be smaller"
+ f" than final trial duration: {self}"
+ )
+ if self.expansion_coefficient <= 1:
+ raise ValueError(f"Expansion coefficient is too small: {self}")
+ too_small = False
+ if self.preceding_targets < 0:
+ too_small = True
+ elif self.preceding_targets < 1:
+ if self.initial_trial_duration < self.duration_sum:
+ too_small = True
+ if too_small:
+ raise ValueError(
+ f"Number of preceding targets is too small: {self}"
+ )
diff --git a/resources/libraries/python/MLRsearch/search_goal_tuple.py b/resources/libraries/python/MLRsearch/search_goal_tuple.py
new file mode 100644
index 0000000000..d40ba99b4b
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/search_goal_tuple.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining SearchGoalTuple class."""
+
+from collections.abc import Iterator
+from dataclasses import dataclass
+from typing import Tuple
+
+from .search_goal import SearchGoal
+
+
+@dataclass(frozen=True)
+class SearchGoalTuple:
+ """Container class holding multiple search goals.
+
+ Just a convenience for checking their number and types.
+ """
+
+ goals: Tuple[SearchGoal, ...]
+ """Goals extracted from user-provided Iterable of search goals."""
+
+ def __post_init__(self) -> None:
+ """Check type and number of search goals.
+
+ :raises ValueError: If there are no goals.
+ :raises TypeError: If a goal is not a SearchGoal.
+ """
+ super().__setattr__("goals", tuple(self.goals))
+ if not self.goals:
+ raise ValueError(f"Cannot be empty: {self.goals}")
+ for goal in self.goals:
+ if not isinstance(goal, SearchGoal):
+ raise TypeError(f"Must be a SearchGoal instance: {goal}")
+ copied = list(self.goals)
+ deduplicated = set(self.goals)
+ for goal in copied:
+ if goal not in deduplicated:
+ raise ValueError(f"Duplicate goal: {goal}")
+ deduplicated.remove(goal)
+ if deduplicated:
+ raise ValueError(f"Error processing goals: {deduplicated}")
+
+ def __iter__(self) -> Iterator[SearchGoal]:
+ """Enable itertion over goals.
+
+ :returns: Iterator iteratinc over contained goals.
+ :rtype: Iterator[SearchGoal]
+ """
+ return iter(self.goals)
diff --git a/resources/libraries/python/MLRsearch/selector.py b/resources/libraries/python/MLRsearch/selector.py
new file mode 100644
index 0000000000..4a6d2e2574
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/selector.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining Selector class."""
+
+
+from dataclasses import dataclass, field
+from typing import Callable, List, Optional, Tuple
+
+from .dataclass import secondary_field
+from .discrete_load import DiscreteLoad
+from .discrete_width import DiscreteWidth
+from .expander import TargetedExpander
+from .global_width import GlobalWidth
+from .limit_handler import LimitHandler
+from .measurement_database import MeasurementDatabase
+from .relevant_bounds import RelevantBounds
+from .target_spec import TargetSpec
+from .strategy import StrategyBase, STRATEGY_CLASSES
+
+
+@dataclass
+class Selector:
+ """A selector is an abstraction that focuses on only one of search goals.
+
+ While lower-level logic is hidden in strategy classes,
+ the code in this class is responsible for initializing strategies
+ and shifting targets towards the final target.
+
+ While the public methods have the same names and meaning as the ones
+ in strategy classes, their signature is different.
+ Selector adds the current target trial duration to the output of nominate(),
+ and adds the current bounds to the input of won().
+
+ The nominate method does not return a complete Candidate instance,
+ as we need to avoid circular dependencies
+ (candidate will refer to selector).
+ """
+
+ final_target: TargetSpec
+ """The target this selector is trying to ultimately achieve."""
+ global_width: GlobalWidth
+ """Reference to the global width tracking instance."""
+ initial_lower_load: DiscreteLoad
+ """Smaller of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ initial_upper_load: DiscreteLoad
+ """Larger of the two loads distinguished at instance creation.
+ During operation, this field is reused to store preceding target bound."""
+ database: MeasurementDatabase = field(repr=False)
+ """Reference to the common database used by all selectors."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the class used to avoid too narrow intervals."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+ # Primary above, derived below.
+ current_target: TargetSpec = secondary_field()
+ """The target the selector is focusing on currently."""
+ target_stack: List[TargetSpec] = secondary_field()
+ """Stack of targets. When current target is achieved, next is popped."""
+ strategies: Tuple[StrategyBase] = secondary_field()
+ """Instances implementing particular selection strategies."""
+ current_strategy: Optional[StrategyBase] = secondary_field()
+ """Reference to strategy used for last nomination, needed for won()."""
+ # Cache.
+ bounds: RelevantBounds = secondary_field()
+ """New relevant bounds for this round of candidate selection."""
+
+ def __post_init__(self) -> None:
+ """Initialize derived values."""
+ self.target_stack = [self.final_target]
+ while preceding_target := self.target_stack[-1].preceding:
+ self.target_stack.append(preceding_target)
+ self.current_target = self.target_stack.pop()
+ self._recreate_strategies()
+
+ def _recreate_strategies(self) -> None:
+ """Recreate strategies after current target has changed.
+
+ Width expander is recreated as target width is now smaller.
+ For convenience, strategies get injectable debug
+ which prints also the current target.
+ """
+ expander = TargetedExpander(
+ target=self.current_target,
+ global_width=self.global_width,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=self.debug,
+ )
+
+ def wrapped_debug(text: str) -> None:
+ """Call self debug with current target info prepended.
+
+ :param text: Message to log at debug level.
+ :type text: str
+ """
+ self.debug(f"Target {self.current_target}: {text}")
+
+ self.strategies = tuple(
+ cls(
+ target=self.current_target,
+ expander=expander,
+ initial_lower_load=self.initial_lower_load,
+ initial_upper_load=self.initial_upper_load,
+ handler=self.handler,
+ debug=wrapped_debug,
+ )
+ for cls in STRATEGY_CLASSES
+ )
+ self.current_strategy = None
+ self.debug(f"Created strategies for: {self.current_target}")
+
+ def _update_bounds(self) -> None:
+ """Before each iteration, call this to update bounds cache."""
+ self.bounds = self.database.get_relevant_bounds(self.current_target)
+
+ def nominate(
+ self,
+ ) -> Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]:
+ """Find first strategy that wants to nominate, return trial inputs.
+
+ Returned load is None if no strategy wants to nominate.
+
+ Current target is shifted when (now preceding) target is reached.
+ As each strategy never becomes done before at least one
+ bound relevant to the current target becomes available,
+ it is never needed to revert to the preceding target after the shift.
+
+ As the initial trials had inputs relevant to all initial targets,
+ the only way for this not to nominate a load
+ is when the final target is reached (including hitting min or max load).
+ The case of hitting min load raises, so search fails early.
+
+ :returns: Nominated load, duration, and global width to set if winning.
+ :rtype: Tuple[Optional[DiscreteLoad], float, Optional[DiscreteWidth]]
+ :raises RuntimeError: If internal inconsistency is detected,
+ or if min load becomes an upper bound.
+ """
+ self._update_bounds()
+ self.current_strategy = None
+ while 1:
+ for strategy in self.strategies:
+ load, width = strategy.nominate(self.bounds)
+ if load:
+ self.current_strategy = strategy
+ return load, self.current_target.trial_duration, width
+ if not self.bounds.clo and not self.bounds.chi:
+ raise RuntimeError("Internal error: no clo nor chi.")
+ if not self.target_stack:
+ if not self.bounds.clo and self.current_target.fail_fast:
+ raise RuntimeError(f"No lower bound: {self.bounds.chi!r}")
+ self.debug(f"Goal {self.current_target} reached: {self.bounds}")
+ return None, self.current_target.trial_duration, None
+ # Everything is ready for next target in the chain.
+ self.current_target = self.target_stack.pop()
+ # Debug logs look better if we forget bounds are TrimmedStat.
+ # Abuse rounding (if not None) to convert to pure DiscreteLoad.
+ clo, chi = self.bounds.clo, self.bounds.chi
+ self.initial_lower_load = clo.rounded_down() if clo else clo
+ self.initial_upper_load = chi.rounded_down() if chi else chi
+ self._update_bounds()
+ self._recreate_strategies()
+
+ def won(self, load: DiscreteLoad) -> None:
+ """Update any private info when candidate became a winner.
+
+ :param load: The load previously nominated by current strategy.
+ :type load: DiscreteLoad
+ """
+ self._update_bounds()
+ self.current_strategy.won(bounds=self.bounds, load=load)
diff --git a/resources/libraries/python/MLRsearch/strategy/__init__.py b/resources/libraries/python/MLRsearch/strategy/__init__.py
new file mode 100644
index 0000000000..a1e0225a17
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "strategy".
+"""
+
+from .base import StrategyBase
+from .bisect import BisectStrategy
+from .extend_hi import ExtendHiStrategy
+from .extend_lo import ExtendLoStrategy
+from .halve import HalveStrategy
+from .refine_hi import RefineHiStrategy
+from .refine_lo import RefineLoStrategy
+
+
+STRATEGY_CLASSES = (
+ HalveStrategy,
+ RefineLoStrategy,
+ RefineHiStrategy,
+ ExtendLoStrategy,
+ ExtendHiStrategy,
+ BisectStrategy,
+)
+"""Tuple of strategy constructors, in order of priority decreasing."""
diff --git a/resources/libraries/python/MLRsearch/strategy/base.py b/resources/libraries/python/MLRsearch/strategy/base.py
new file mode 100644
index 0000000000..0724f882bf
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/base.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining StrategyBase class."""
+
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Callable, Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..expander import TargetedExpander
+from ..limit_handler import LimitHandler
+from ..relevant_bounds import RelevantBounds
+from ..target_spec import TargetSpec
+
+
+@dataclass
+class StrategyBase(ABC):
+ """Abstract class encompassing data common to most strategies.
+
+ A strategy is one piece of logic a selector may use
+ when nominating a candidate according to its current target.
+
+ The two initial bound arguments may not be bounds at all.
+ For initial targets, the two values are usually mrr and mrr2.
+ For subsequent targets, the initial values are usually
+ the relevant bounds of the preceding target,
+ but one of them may be None if hitting min or max load.
+
+ The initial values are mainly used as stable alternatives
+ to relevant bounds of preceding target,
+ because those bounds may have been unpredictably altered
+ by nominations from unrelated search goals.
+ This greatly simplifies reasoning about strategies making progress.
+ """
+
+ target: TargetSpec
+ """The target this strategy is focusing on."""
+ expander: TargetedExpander
+ """Instance to track width expansion during search (if applicable)."""
+ initial_lower_load: Optional[DiscreteLoad]
+ """Smaller of the two loads distinguished at instance creation.
+ Can be None if upper bound is the min load."""
+ initial_upper_load: Optional[DiscreteLoad]
+ """Larger of the two loads distinguished at instance creation.
+ Can be None if lower bound is the max load."""
+ handler: LimitHandler = field(repr=False)
+ """Reference to the limit handler instance."""
+ debug: Callable[[str], None] = field(repr=False)
+ """Injectable function for debug logging."""
+
+ @abstractmethod
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate if the conditions activate this strategy.
+
+ A complete candidate refers also to the nominating selector.
+ To prevent circular dependence (selector refers to nominating strategy),
+ this function returns only duration and width.
+
+ Width should only be non-None if global current width should be updated
+ when the candidate based on this becomes winner.
+ But currently all strategies return non-None width
+ if they return non-None load.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ return None, None
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Notify the strategy its candidate became the winner.
+
+ Most strategies have no use for this information,
+ but some strategies may need to update their private information.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ return
+
+ def not_worth(self, bounds: RelevantBounds, load: DiscreteLoad) -> bool:
+ """A check on bounds common for multiple strategies.
+
+ The load is worth measuring only if it can create or improve
+ either relevant bound.
+
+ Each strategy is designed to create a relevant bound for current target,
+ which is only needed if that (or better) bound does not exist yet.
+ Conversely, if a strategy does not nominate, it is because
+ the load it would nominate (if any) is found not worth by this method.
+
+ :param bounds: Current relevant bounds.
+ :param load: Load of a possible candidate.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ :returns: True if the load should NOT be nominated.
+ :rtype: bool
+ """
+ if bounds.clo and bounds.clo >= load:
+ return True
+ if bounds.chi and bounds.chi <= load:
+ return True
+ if bounds.clo and bounds.chi:
+ # We are not hitting min nor max load.
+ # Measuring at this load will create or improve clo or chi.
+ # The only reason not to nominate is if interval is narrow already.
+ wig = DiscreteInterval(
+ lower_bound=bounds.clo,
+ upper_bound=bounds.chi,
+ ).width_in_goals(self.target.discrete_width)
+ if wig <= 1.0:
+ return True
+ return False
diff --git a/resources/libraries/python/MLRsearch/strategy/bisect.py b/resources/libraries/python/MLRsearch/strategy/bisect.py
new file mode 100644
index 0000000000..894544695e
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/bisect.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining BisectStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class BisectStrategy(StrategyBase):
+ """Strategy to use when both bounds relevant to curent target are present.
+
+ Primarily, this strategy is there to perform internal search.
+ As powers of two are fiendly to binary search,
+ this strategy relies on the splitting logic described in DiscreteInterval.
+
+ The main reason why this class is so long is that a mere existence
+ of a valid bound for the current target does not imply
+ that bound is a good approximation of the final conditional throughput.
+ The bound might become valid due to efforts of a strategy
+ focusing on an entirely different search goal.
+
+ On the other hand, initial bounds may be better approximations,
+ but they also may be bad approximations (for example
+ when SUT behavior strongly depends on trial duration).
+
+ Based on comparison of existing current bounds to intial bounds,
+ this strategy also mimics what would external search do
+ (if the one current bound was missing and other initial bound was current).
+ In case that load value is closer to appropriate inital bound
+ (compared to how far the simple bisect between current bounds is),
+ that load is nominated.
+
+ It turns out those "conditional" external search nominations
+ are quite different from unconditional ones,
+ at least when it comes to handling limits
+ and tracking when width expansion should be applied.
+ That is why that logic is here
+ and not in some generic external search class.
+ """
+
+ expand_on_clo: bool = False
+ """If extending up, width should be expanded when load becomes clo."""
+ expand_on_chi: bool = False
+ """If extending down, width should be expanded when load becomes chi."""
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate a load candidate between bounds or extending from them.
+
+ The external search logic is offloaded into private methods.
+ If they return a truthy load, that is returned from here as well.
+
+ Only if the actual bisect is selected,
+ the per-selector expander is limited to the (smaller) new width.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ if not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ interval = DiscreteInterval(bounds.clo, bounds.chi)
+ if interval.width_in_goals(self.target.discrete_width) <= 1.0:
+ return None, None
+ bisect_load = interval.middle(self.target.discrete_width)
+ load, width = self._extend_lo(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = False, True
+ self.debug(f"Preferring to extend down: {load}")
+ return load, width
+ load, width = self._extend_hi(bounds, bisect_load)
+ if load:
+ self.expand_on_clo, self.expand_on_chi = True, False
+ self.debug(f"Preferring to extend up: {load}")
+ return load, width
+ load = bisect_load
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.expand_on_clo, self.expand_on_chi = False, False
+ self.debug(f"Preferring to bisect: {load}")
+ width_lo = DiscreteInterval(bounds.clo, load).discrete_width
+ width_hi = DiscreteInterval(load, bounds.chi).discrete_width
+ width = min(width_lo, width_hi)
+ self.expander.limit(width)
+ return load, width
+
+ def _extend_lo(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending down, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_upper_load:
+ return None, None
+ if bisect_load >= self.initial_upper_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.chi - width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load <= bisect_load:
+ return None, None
+ if load >= self.initial_upper_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def _extend_hi(
+ self, bounds: RelevantBounds, bisect_load: DiscreteLoad
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Compute load as if extending up, return it if preferred.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param bisect_load: Load when bisection is preferred.
+ :type bounds: RelevantBounds
+ :type bisect_load: DiscreteLoad
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ :raises RuntimeError: If an internal inconsistency is detected.
+ """
+ # TODO: Simplify all the conditions or explain them better.
+ if not self.initial_lower_load:
+ return None, None
+ if bisect_load <= self.initial_lower_load:
+ return None, None
+ width = self.expander.get_width()
+ load = bounds.clo + width
+ load = self.handler.handle(
+ load=load,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if not load:
+ return None, None
+ if load >= bisect_load:
+ return None, None
+ if load <= self.initial_lower_load:
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ raise RuntimeError(f"Load not worth: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width when appropriate.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if self.expand_on_clo and load == bounds.clo:
+ self.expander.expand()
+ elif self.expand_on_chi and load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_hi.py b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
new file mode 100644
index 0000000000..79c4ad7cf2
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_hi.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendHiStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant upper bound.
+
+ Typically this is needed after RefineHiStrategy turned initial upper bound
+ into a current relevant lower bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant lower bound plus expander width.
+
+ This performs external search in upwards direction,
+ until a valid upper bound for the current target is found,
+ or until max load is hit.
+ Limit handling is used to avoid nominating too close
+ (or above) the max rate.
+
+ Width expansion is only applied if the candidate becomes a lower bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.chi or not bounds.clo or bounds.clo >= self.handler.max_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.clo + width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No chi, extending up: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became the new lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.clo:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/extend_lo.py b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
new file mode 100644
index 0000000000..68d20b6a6a
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/extend_lo.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining ExtendLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class ExtendLoStrategy(StrategyBase):
+ """This strategy is applied when there is no relevant lower bound.
+
+ Typically this is needed after RefineLoStrategy turned initial lower bound
+ into a current relevant upper bound.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate current relevant upper bound minus expander width.
+
+ This performs external search in downwards direction,
+ until a valid lower bound for the current target is found,
+ or until min load is hit.
+ Limit handling is used to avoid nominating too close
+ (or below) the min rate.
+
+ Width expansion is only applied if the candidate becomes an upper bound,
+ so that is detected in done method.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if bounds.clo or not bounds.chi or bounds.chi <= self.handler.min_load:
+ return None, None
+ width = self.expander.get_width()
+ load = self.handler.handle(
+ load=bounds.chi - width,
+ width=self.target.discrete_width,
+ clo=bounds.clo,
+ chi=bounds.chi,
+ )
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"No clo, extending down: {load}")
+ return load, width
+
+ def won(self, bounds: RelevantBounds, load: DiscreteLoad) -> None:
+ """Expand width if the load became new upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :param load: The current load, so strategy does not need to remember.
+ :type bounds: RelevantBounds
+ :type load: DiscreteLoad
+ """
+ if load == bounds.chi:
+ self.expander.expand()
diff --git a/resources/libraries/python/MLRsearch/strategy/halve.py b/resources/libraries/python/MLRsearch/strategy/halve.py
new file mode 100644
index 0000000000..3188a041c6
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/halve.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining HalveStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_interval import DiscreteInterval
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class HalveStrategy(StrategyBase):
+ """First strategy to apply for a new current target.
+
+ Pick a load between initial lower bound and initial upper bound,
+ nominate it if it is (still) worth it.
+
+ In a sense, this can be viewed as an extension of preceding target's
+ bisect strategy. But as the current target may require a different
+ trial duration, it is better to do it for the new target.
+
+ Alternatively, this is a way to save one application
+ of subsequent refine strategy, thus avoiding reducing risk of triggering
+ an external search (slight time saver for highly unstable SUTs).
+ Either way, minor time save is achieved by preceding target
+ only needing to reach double of current target width.
+
+ If the distance between initial bounds is already at or below
+ current target width, the middle point is not nominated.
+ The reasoning is that in this case external search is likely
+ to get triggered by the subsequent refine strategies,
+ so attaining a relevant bound here is not as likely to help.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the middle between initial lower and upper bound.
+
+ The returned width is the target width, even if initial bounds
+ happened to be closer together.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not self.initial_lower_load or not self.initial_upper_load:
+ return None, None
+ interval = DiscreteInterval(
+ lower_bound=self.initial_lower_load,
+ upper_bound=self.initial_upper_load,
+ )
+ wig = interval.width_in_goals(self.target.discrete_width)
+ if wig > 2.0:
+ # Can happen for initial target.
+ return None, None
+ if wig <= 1.0:
+ # Already was narrow enough, refinements shall be sufficient.
+ return None, None
+ load = interval.middle(self.target.discrete_width)
+ if self.not_worth(bounds, load):
+ return None, None
+ self.debug(f"Halving available: {load}")
+ # TODO: Report possibly smaller width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_hi.py b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
new file mode 100644
index 0000000000..caa8fc4a7d
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_hi.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineHiStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineHiStrategy(StrategyBase):
+ """If initial upper bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant lower bound,
+ or if there was no halving (and RefineLoStrategy confirmed initial
+ lower bound became a relevant lower bound for the new current target).
+
+ This either ensures a matching upper bound (target is achieved)
+ or moves the relevant lower bound higher (triggering external search).
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial upper bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_upper_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Upperbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/strategy/refine_lo.py b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
new file mode 100644
index 0000000000..7927798505
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/strategy/refine_lo.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining RefineLoStrategy class."""
+
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+from ..discrete_load import DiscreteLoad
+from ..discrete_width import DiscreteWidth
+from ..relevant_bounds import RelevantBounds
+from .base import StrategyBase
+
+
+@dataclass
+class RefineLoStrategy(StrategyBase):
+ """If initial lower bound is still worth it, nominate it.
+
+ This usually happens when halving resulted in relevant upper bound,
+ or if there was no halving.
+ This ensures a relevant bound (upper or lower) for the current target
+ exists.
+ """
+
+ def nominate(
+ self, bounds: RelevantBounds
+ ) -> Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]:
+ """Nominate the initial lower bound.
+
+ :param bounds: Freshly updated bounds relevant for current target.
+ :type bounds: RelevantBounds
+ :returns: Two nones or candidate intended load and duration.
+ :rtype: Tuple[Optional[DiscreteLoad], Optional[DiscreteWidth]]
+ """
+ if not (load := self.initial_lower_load):
+ return None, None
+ if self.not_worth(bounds=bounds, load=load):
+ return None, None
+ self.debug(f"Lowerbound refinement available: {load}")
+ # TODO: Limit to possibly smaller than target width?
+ self.expander.limit(self.target.discrete_width)
+ return load, self.target.discrete_width
diff --git a/resources/libraries/python/MLRsearch/target_scaling.py b/resources/libraries/python/MLRsearch/target_scaling.py
new file mode 100644
index 0000000000..25114c311c
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_scaling.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetScaling class."""
+
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from .dataclass import secondary_field
+from .discrete_width import DiscreteWidth
+from .load_rounding import LoadRounding
+from .search_goal import SearchGoal
+from .search_goal_tuple import SearchGoalTuple
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TargetScaling:
+ """Encapsulate targets derived from goals.
+
+ No default values for primaries, contructor call has to specify everything.
+ """
+
+ goals: SearchGoalTuple
+ """Set of goals to generate targets for."""
+ rounding: LoadRounding
+ """Rounding instance to use (targets have discrete width)."""
+ # Derived quantities.
+ targets: Tuple[TargetSpec] = secondary_field()
+ """The generated targets, linked into chains."""
+ goal_to_final_target: Dict[SearchGoal, TargetSpec] = secondary_field()
+ """Mapping from a goal to its corresponding final target."""
+
+ def __post_init__(self) -> None:
+ """For each goal create final, and non-final targets and link them."""
+ linked_targets = []
+ self.goal_to_final_target = {}
+ for goal in self.goals:
+ standalone_targets = []
+ # Final target.
+ width = DiscreteWidth(
+ rounding=self.rounding,
+ float_width=goal.relative_width,
+ ).rounded_down()
+ duration_sum = goal.duration_sum
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=goal.final_trial_duration,
+ duration_sum=duration_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=goal.fail_fast,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Non-final targets.
+ preceding_targets = goal.preceding_targets
+ multiplier = (
+ pow(
+ goal.initial_trial_duration / duration_sum,
+ 1.0 / preceding_targets,
+ )
+ if preceding_targets
+ else 1.0
+ )
+ for count in range(preceding_targets):
+ preceding_sum = duration_sum * pow(multiplier, count + 1)
+ if count + 1 >= preceding_targets:
+ preceding_sum = goal.initial_trial_duration
+ trial_duration = min(goal.final_trial_duration, preceding_sum)
+ width *= 2
+ target = TargetSpec(
+ loss_ratio=goal.loss_ratio,
+ exceed_ratio=goal.exceed_ratio,
+ discrete_width=width,
+ trial_duration=trial_duration,
+ duration_sum=preceding_sum,
+ expansion_coefficient=goal.expansion_coefficient,
+ fail_fast=False,
+ preceding=None,
+ )
+ standalone_targets.append(target)
+ # Link preceding targets.
+ preceding_target = None
+ for target in reversed(standalone_targets):
+ linked_target = target.with_preceding(preceding_target)
+ linked_targets.append(linked_target)
+ preceding_target = linked_target
+ # Associate final target to the goal.
+ self.goal_to_final_target[goal] = linked_targets[-1]
+ # Store all targets as a tuple.
+ self.targets = tuple(linked_targets)
diff --git a/resources/libraries/python/MLRsearch/target_spec.py b/resources/libraries/python/MLRsearch/target_spec.py
new file mode 100644
index 0000000000..5279ba00a1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_spec.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TargetSpec class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Optional
+
+from .discrete_width import DiscreteWidth
+
+
+@dataclass(frozen=True, eq=True)
+class TargetSpec:
+ """Composite object holding attributes specifying one search target.
+
+ Abstractly, this has several similar meanings.
+ With discrete_width attribute this specifies when a selector is Done.
+ With expansion_coefficient attribute it tells selector how quickly
+ should it expand interval in external search.
+ With "preceding" attribute it helps selector, so it does not need to point
+ to preceding target separately from its current target.
+ Without those three attributes this object is still sufficient
+ for LoadStats to classify loads as lower bound, upper bound, or unknown.
+ """
+
+ loss_ratio: float
+ """Target loss ratio. Equal and directly analogous to goal loss ratio,
+ but applicable also for non-final targets."""
+ exceed_ratio: float
+ """Target exceed ratio. Equal and directly analogous to goal exceed ratio,
+ but applicable also for non-final targets."""
+ discrete_width: DiscreteWidth
+ """Target relative width. Analogous to goal relative width,
+ but coarser for non-final targets."""
+ trial_duration: float
+ """Duration to use for trials for this target. Shorter trials have lesser
+ (and more complicated) impact when determining upper and lower bounds."""
+ duration_sum: float
+ """Sum of trial durations sufficient to classify a load
+ as an upper or lower bound.
+ For non-final targets, this is shorter than goal duration_sum."""
+ expansion_coefficient: int = field(repr=False)
+ """Equal and directly analogous to goal expansion coefficient,
+ but applicable also for non-final targets."""
+ fail_fast: bool = field(repr=False)
+ """Copied from goal. If true and min load is not an upper bound, raise."""
+ preceding: Optional[TargetSpec] = field(repr=False)
+ """Reference to next coarser target (if any) belonging to the same goal."""
+
+ # No conversions or validations, as this is an internal structure.
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"lr={self.loss_ratio},er={self.exceed_ratio}"
+ f",ds={self.duration_sum}"
+ )
+
+ def with_preceding(self, preceding: Optional[TargetSpec]) -> TargetSpec:
+ """Create an equivalent instance but with different preceding field.
+
+ This is useful in initialization. Create semi-initialized targets
+ starting from final one, than add references in reversed order.
+
+ :param preceding: New value for preceding field, cannot be None.
+ :type preceding: Optional[TargetSpec]
+ :returns: Instance with the new value applied.
+ :rtype: TargetSpec
+ """
+ return TargetSpec(
+ loss_ratio=self.loss_ratio,
+ exceed_ratio=self.exceed_ratio,
+ discrete_width=self.discrete_width,
+ trial_duration=self.trial_duration,
+ duration_sum=self.duration_sum,
+ expansion_coefficient=self.expansion_coefficient,
+ fail_fast=self.fail_fast,
+ preceding=preceding,
+ )
diff --git a/resources/libraries/python/MLRsearch/target_stat.py b/resources/libraries/python/MLRsearch/target_stat.py
new file mode 100644
index 0000000000..18e1ff4161
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/target_stat.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining LoadStat class."""
+
+from dataclasses import dataclass, field
+from typing import Dict, Tuple
+
+from .target_spec import TargetSpec
+from .discrete_result import DiscreteResult
+
+
+@dataclass
+class TargetStat:
+ """Class for aggregating trial results for a single load and target.
+
+ Reference to the target is included for convenience.
+
+ The main usage is for load classification, done in estimates method.
+ If both estimates agree, the load is classified as either a lower bound
+ or an upper bound. For additional logic for dealing with loss inversion
+ see MeasurementDatabase.
+
+ Also, data needed for conditional throughput is gathered here,
+ exposed only as a pessimistic loss ratio
+ (as the load value is not stored here).
+ """
+
+ target: TargetSpec = field(repr=False)
+ """The target for which this instance is aggregating results."""
+ good_long: float = 0.0
+ """Sum of durations of long enough trials satisfying target loss ratio."""
+ bad_long: float = 0.0
+ """Sum of durations of long trials not satisfying target loss ratio."""
+ good_short: float = 0.0
+ """Sum of durations of shorter trials satisfying target loss ratio."""
+ bad_short: float = 0.0
+ """Sum of durations of shorter trials not satisfying target loss ratio."""
+ long_losses: Dict[float, float] = field(repr=False, default_factory=dict)
+ """If a loss ratio value occured in a long trial, map it to duration sum."""
+
+ def __str__(self) -> str:
+ """Convert into a short human-readable string.
+
+ :returns: The short string.
+ :rtype: str
+ """
+ return (
+ f"gl={self.good_long},bl={self.bad_long}"
+ f",gs={self.good_short},bs={self.bad_short}"
+ )
+
+ def add(self, result: DiscreteResult) -> None:
+ """Take into account one more trial result.
+
+ Use intended duration for deciding between long and short trials,
+ but use offered duation (with overheads) to increase the duration sums.
+
+ :param result: The trial result to add to the stats.
+ :type result: DiscreteResult
+ """
+ dwo = result.duration_with_overheads
+ rlr = result.loss_ratio
+ if result.intended_duration >= self.target.trial_duration:
+ if rlr not in self.long_losses:
+ self.long_losses[rlr] = 0.0
+ self.long_losses = dict(sorted(self.long_losses.items()))
+ self.long_losses[rlr] += dwo
+ if rlr > self.target.loss_ratio:
+ self.bad_long += dwo
+ else:
+ self.good_long += dwo
+ else:
+ if rlr > self.target.loss_ratio:
+ self.bad_short += dwo
+ else:
+ self.good_short += dwo
+
+ def estimates(self) -> Tuple[bool, bool]:
+ """Return whether this load can become a lower bound.
+
+ This returns two estimates, hence the weird nonverb name of this method.
+ One estimate assumes all following results will satisfy the loss ratio,
+ the other assumes all results will not satisfy the loss ratio.
+ The sum of durations of the assumed results
+ is the minimum to reach target duration sum, or zero if already reached.
+
+ If both estimates are the same, it means the load is a definite bound.
+ This may happen even when the sum of durations of already
+ measured trials is less than the target, when the missing measurements
+ cannot change the classification.
+
+ :returns: Tuple of two estimates whether the load can be a lower bound.
+ (True, False) means more trial results are needed.
+ :rtype: Tuple[bool, bool]
+ """
+ coeff = self.target.exceed_ratio
+ decrease = self.good_short * coeff / (1.0 - coeff)
+ short_excess = self.bad_short - decrease
+ effective_excess = self.bad_long + max(0.0, short_excess)
+ effective_dursum = max(
+ self.good_long + effective_excess,
+ self.target.duration_sum,
+ )
+ limit_dursum = effective_dursum * self.target.exceed_ratio
+ optimistic = effective_excess <= limit_dursum
+ pessimistic = (effective_dursum - self.good_long) <= limit_dursum
+ return optimistic, pessimistic
+
+ @property
+ def pessimistic_loss_ratio(self) -> float:
+ """Return the loss ratio for conditional throughput computation.
+
+ It adds missing dursum as full-loss trials to long_losses
+ and returns a quantile corresponding to exceed ratio.
+ In case of tie (as in median for even number of samples),
+ this returns the lower value (as being equal to goal exceed ratio
+ is allowed).
+
+ For loads classified as a lower bound, the return value
+ ends up being no larger than the target loss ratio.
+ This is because the excess short bad trials would only come
+ after the quantile in question (as would full-loss missing trials).
+ For other loads, anything can happen, but conditional throughput
+ should not be computed for those anyway.
+ Those two facts allow the logic here be simpler than in estimates().
+
+ :returns: Effective loss ratio based on long trial results.
+ :rtype: float
+ """
+ all_long = max(self.target.duration_sum, self.good_long + self.bad_long)
+ remaining = all_long * (1.0 - self.target.exceed_ratio)
+ ret = None
+ for ratio, dursum in self.long_losses.items():
+ if ret is None or remaining > 0.0:
+ ret = ratio
+ remaining -= dursum
+ else:
+ break
+ else:
+ if remaining > 0.0:
+ ret = 1.0
+ return ret
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/__init__.py b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
new file mode 100644
index 0000000000..034ae41819
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for Python package "trial_measurement".
+"""
+
+from .abstract_measurer import AbstractMeasurer
+from .measurement_result import MeasurementResult
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
new file mode 100644
index 0000000000..6fab79c8dc
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/abstract_measurer.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining AbstractMeasurer class."""
+
+from abc import ABCMeta, abstractmethod
+
+from .measurement_result import MeasurementResult as Result
+
+
+class AbstractMeasurer(metaclass=ABCMeta):
+ """Abstract class defining common API for trial measurement providers.
+
+ The original use of this class was in the realm of
+ RFC 2544 Throughput search, which explains the teminology
+ related to networks, frames, packets, offered load, forwarding rate
+ and similar.
+
+ But the same logic can be used in higher level networking scenarios
+ (e.g. https requests) or even outside networking (database transactions).
+
+ The current code uses language from packet forwarding,
+ docstring sometimes mention transactions as an alternative view.
+ """
+
+ @abstractmethod
+ def measure(self, intended_duration: float, intended_load: float) -> Result:
+ """Perform trial measurement and return the result.
+
+ It is assumed the measurer got already configured with anything else
+ needed to perform the measurement (e.g. traffic profile
+ or transaction limit).
+
+ Duration and load are the only values expected to vary
+ during the search.
+
+ :param intended_duration: Intended trial duration [s].
+ :param intended_load: Intended rate of transactions (packets) [tps].
+ It is a per-port rate, e.g. uni-directional for SUTs
+ with two ports.
+ :type intended_duration: float
+ :type intended_load: float
+ :returns: Structure detailing the result of the measurement.
+ :rtype: measurement_result.MeasurementResult
+ """
diff --git a/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
new file mode 100644
index 0000000000..9dc1ccf5f1
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trial_measurement/measurement_result.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining MeasurementResult class."""
+
+from dataclasses import dataclass
+
+
+@dataclass
+class MeasurementResult:
+ """Structure defining the result of a single trial measurement.
+
+ There are few primary (required) quantities. Various secondary (derived)
+ quantities are calculated and can be queried.
+
+ The constructor allows broader argument types,
+ the post init function converts to the stricter types.
+
+ Integer quantities (counts) are preferred, as float values
+ can suffer from rounding errors, and sometimes they are measured
+ at unknown (possibly very limited) precision and accuracy.
+
+ There are relations between the counts (e.g. offered count
+ should be equal to a sum of forwarding count and loss count).
+ This implementation does not perform consistency checks, but uses them
+ for computing quantities the caller left unspecified.
+
+ In some cases, the units of intended load are different from units
+ of loss count (e.g. load in transactions but loss in packets).
+ Quantities with relative_ prefix can be used to get load candidates
+ from forwarding results.
+
+ Sometimes, the measurement provider is unable to reach the intended load,
+ and it can react by spending longer than intended duration
+ to reach its intended count. To signal irregular situations like this,
+ several optional fields can be given, and various secondary quantities
+ are populated, so the measurement consumer can query the quantity
+ it wants to rely on in these irregular situations.
+
+ The current implementation intentionally limits the secondary quantities
+ to the few that proved useful in practice.
+ """
+
+ # Required primary quantities.
+ intended_duration: float
+ """Intended trial measurement duration [s]."""
+ intended_load: float
+ """Intended load [tps]. If bidirectional (or multi-port) traffic is used,
+ most users will put unidirectional (single-port) value here,
+ as bandwidth and pps limits are usually per-port."""
+ # Two of the next three primary quantities are required.
+ offered_count: int = None
+ """Number of packets actually transmitted (transactions attempted).
+ This should be the aggregate (bidirectional, multi-port) value,
+ so that asymmetric trafic profiles are supported."""
+ loss_count: int = None
+ """Number of packets transmitted but not received (transactions failed)."""
+ forwarding_count: int = None
+ """Number of packets successfully forwarded (transactions succeeded)."""
+ # Optional primary quantities.
+ offered_duration: float = None
+ """Estimate of the time [s] the trial was actually transmitting traffic."""
+ duration_with_overheads: float = None
+ """Estimate of the time [s] it took to get the trial result
+ since the measurement started."""
+ intended_count: int = None
+ """Expected number of packets to transmit. If not known,
+ the value of offered_count is used."""
+
+ def __post_init__(self) -> None:
+ """Convert types, compute missing values.
+
+ Current caveats:
+ A failing assumption looks like a conversion error.
+ Negative counts are allowed, which can lead to errors later.
+ """
+ self.intended_duration = float(self.intended_duration)
+ if self.offered_duration is None:
+ self.offered_duration = self.intended_duration
+ else:
+ self.offered_duration = float(self.offered_duration)
+ if self.duration_with_overheads is None:
+ self.duration_with_overheads = self.offered_duration
+ else:
+ self.duration_with_overheads = float(self.duration_with_overheads)
+ self.intended_load = float(self.intended_load)
+ if self.forwarding_count is None:
+ self.forwarding_count = int(self.offered_count) - int(
+ self.loss_count
+ )
+ else:
+ self.forwarding_count = int(self.forwarding_count)
+ if self.offered_count is None:
+ self.offered_count = self.forwarding_count + int(self.loss_count)
+ else:
+ self.offered_count = int(self.offered_count)
+ if self.loss_count is None:
+ self.loss_count = self.offered_count - self.forwarding_count
+ else:
+ self.loss_count = int(self.loss_count)
+ if self.intended_count is None:
+ self.intended_count = self.offered_count
+ else:
+ self.intended_count = int(self.intended_count)
+ # TODO: Handle (somehow) situations where offered > intended?
+
+ @property
+ def unsent_count(self) -> int:
+ """How many packets were not transmitted (transactions not started).
+
+ :return: Intended count minus offered count.
+ :rtype: int
+ """
+ return self.intended_count - self.offered_count
+
+ @property
+ def loss_ratio(self) -> float:
+ """Bad count divided by overall count, zero if the latter is zero.
+
+ The bad count includes not only loss count, but also unsent count.
+ If unsent count is negative, its absolute value is used.
+ The overall count is intended count or offered count,
+ whichever is bigger.
+
+ Together, the resulting formula tends to increase loss ratio
+ (but not above 100%) in irregular situations,
+ thus guiding search algorithms towards lower loads
+ where there should be less irregularities.
+ The zero default is there to prevent search algorithms from
+ getting stuck on a too low intended load.
+
+ :returns: Bad count divided by overall count.
+ :rtype: float
+ """
+ overall = max(self.offered_count, self.intended_count)
+ bad = abs(self.loss_count) + abs(self.unsent_count)
+ return bad / overall if overall else 0.0
+
+ @property
+ def relative_forwarding_rate(self) -> float:
+ """Forwarding rate in load units as if duration and load was intended.
+
+ The result is based purely on intended load and loss ratio.
+ While the resulting value may be far from what really happened,
+ it has nice behavior with respect to common assumptions
+ of search algorithms.
+
+ :returns: Forwarding rate in load units estimated from loss ratio.
+ :rtype: float
+ """
+ return self.intended_load * (1.0 - self.loss_ratio)
diff --git a/resources/libraries/python/MLRsearch/trimmed_stat.py b/resources/libraries/python/MLRsearch/trimmed_stat.py
new file mode 100644
index 0000000000..74918d78b0
--- /dev/null
+++ b/resources/libraries/python/MLRsearch/trimmed_stat.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module defining TrimmedStat class."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .load_stats import LoadStats
+from .target_spec import TargetSpec
+
+
+@dataclass
+class TrimmedStat(LoadStats):
+ """Load stats trimmed to a single target.
+
+ Useful mainly for reporting the overall results.
+ """
+
+ def __post_init__(self) -> None:
+ """Initialize load value and check there is one target to track."""
+ super().__post_init__()
+ if len(self.target_to_stat) != 1:
+ raise ValueError(f"No single target: {self.target_to_stat!r}")
+
+ @staticmethod
+ def for_target(stats: LoadStats, target: TargetSpec) -> TrimmedStat:
+ """Return new instance with only one target in the mapping.
+
+ :param stats: The load stats instance to trim.
+ :param target: The one target which should remain in the mapping.
+ :type stats: LoadStats
+ :type target: TargetSpec
+ :return: Newly created instance.
+ :rtype: TrimmedStat
+ """
+ return TrimmedStat(
+ rounding=stats.rounding,
+ int_load=stats.int_load,
+ target_to_stat={target: stats.target_to_stat[target]},
+ )