diff options
author | Vratko Polak <vrpolak@cisco.com> | 2023-10-19 10:47:48 +0200 |
---|---|---|
committer | Vratko Polak <vrpolak@cisco.com> | 2023-10-19 09:38:51 +0000 |
commit | 377412dfd433b993876025bc8af8df8a7ee01133 (patch) | |
tree | 85701d7fe0f899093ff13a4650f783328eb9808b | |
parent | 2e8e3ba24bc007e84bcbff06f40d04c559aad92f (diff) |
feat(MLRseach): Update to v8 conditional throughput
Hopefully, with CSIT config values, PDR lower than NDR will not happen.
+ Bump duration_sum default to an odd number,
so users are not surprised by not seeing standard median behavior.
For CSIT this should not matter, overheads hide ties
and number of trials (at least for STL) should stay the same.
Change-Id: Id7130f978c31e71227499612424007c473bcfac2
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
(cherry picked from commit 577bc7b093f30b4faa65e97b1d13778ff44bcaa1)
-rw-r--r-- | PyPI/MLRsearch/README.rst | 24 | ||||
-rw-r--r-- | PyPI/MLRsearch/setup.py | 2 | ||||
-rw-r--r-- | resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py | 19 | ||||
-rw-r--r-- | resources/libraries/python/MLRsearch/search_goal.py | 6 | ||||
-rw-r--r-- | resources/libraries/python/MLRsearch/target_stat.py | 59 | ||||
-rw-r--r-- | resources/libraries/python/MLRsearch/trimmed_stat.py | 19 | ||||
-rw-r--r-- | resources/libraries/python/TrafficGenerator.py | 2 | ||||
-rw-r--r-- | resources/libraries/robot/performance/performance_utils.robot | 4 |
8 files changed, 90 insertions, 45 deletions
diff --git a/PyPI/MLRsearch/README.rst b/PyPI/MLRsearch/README.rst index 92c2afe484..de26fadfa4 100644 --- a/PyPI/MLRsearch/README.rst +++ b/PyPI/MLRsearch/README.rst @@ -16,7 +16,7 @@ is only a symlink to the original place of tightly coupled CSIT code. Change log ---------- -1.0.0: Logic improvements, independent selectors, exceed ratio support, +1.1.0: Logic improvements, independent selectors, exceed ratio support, better width rounding, conditional throughput as output. Implementation relies more on dataclasses, code split into smaller files. API changed considerably, mainly to avoid long argument lists. @@ -106,7 +106,7 @@ This is the screen capture of interactive python interpreter ... relative_width=0.005, ... initial_trial_duration=1.0, ... final_trial_duration=1.0, - ... duration_sum=61.0, + ... duration_sum=21.0, ... preceding_targets=2, ... expansion_coefficient=2, ... ) @@ -122,29 +122,27 @@ This is the screen capture of interactive python interpreter >>> result = controller.search(measurer=Hard1MppsMeasurer(), debug=print_dot) .................................................................................... .................................................................................... - .................................................................................... - .................................................................................... - .................................................................................... - .................................................................................... - ...>>> print(result) + ...................>>> print(result) {SearchGoal(loss_ratio=0.0, exceed_ratio=0.005, relative_width=0.005, initial_trial_ - duration=1.0, final_trial_duration=1.0, duration_sum=61.0, preceding_targets=2, expa - nsion_coefficient=2): fl=997497.6029392382,s=(gl=61.0,bl=0.0,gs=0.0,bs=0.0), SearchG - oal(loss_ratio=0.005, exceed_ratio=0.005, relative_width=0.005, initial_trial_durati - on=1.0, final_trial_duration=1.0, duration_sum=61.0, preceding_targets=2, expansion_ - coefficient=2): fl=1002508.6747611101,s=(gl=61.0,bl=0.0,gs=0.0,bs=0.0)} + duration=1.0, final_trial_duration=1.0, duration_sum=21.0, preceding_targets=2, expa + nsion_coefficient=2, fail_fast=True): fl=997497.6029392382,s=(gl=21.0,bl=0.0,gs=0.0, + bs=0.0), SearchGoal(loss_ratio=0.005, exceed_ratio=0.005, relative_width=0.005, init + ial_trial_duration=1.0, final_trial_duration=1.0, duration_sum=21.0, preceding_targe + ts=2, expansion_coefficient=2, fail_fast=True): fl=1002508.6747611101,s=(gl=21.0,bl= + 0.0,gs=0.0,bs=0.0)} >>> print(f"NDR conditional throughput: {float(result[ndr_goal].conditional_throughp ut)}") NDR conditional throughput: 997497.6029392382 >>> print(f"PDR conditional throughput: {float(result[pdr_goal].conditional_throughp ut)}") PDR conditional throughput: 1000000.6730730429 + >>> Operation logic --------------- The currently published `IETF draft`_ describes the logic of version 0.4, -the logic of version 1.0 will be descibed better in the next draft version (-05). +the logic of version 1.1 will be descibed better in the next draft version (-05). .. _CSIT: https://wiki.fd.io/view/CSIT .. _fd.io: https://fd.io/ diff --git a/PyPI/MLRsearch/setup.py b/PyPI/MLRsearch/setup.py index 1906ff3e87..f824d15fd6 100644 --- a/PyPI/MLRsearch/setup.py +++ b/PyPI/MLRsearch/setup.py @@ -16,7 +16,7 @@ with open(path.join(here, u"README.rst"), encoding=u"utf-8") as f: setup( name=u"MLRsearch", - version=u"1.0.0", # This is currently the only place listing the version. + version=u"1.1.0", # This is currently the only place listing the version. description=u"Library for speeding up binary search using shorter measurements.", long_description=long_description, long_description_content_type=u"text/x-rst", diff --git a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py index b48e2e7547..9f7be4fcd1 100644 --- a/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py +++ b/resources/libraries/python/MLRsearch/multiple_loss_ratio_search.py @@ -38,7 +38,10 @@ from .trimmed_stat import TrimmedStat @dataclass class MultipleLossRatioSearch: - """Optimized binary search algorithm for finding conditional throughputs. + """Implementation of the controller part of MLRsearch algorithm. + + The manager part is creating and calling this, + the measurer part is injected. Traditional binary search algorithm needs initial interval (lower and upper bound), and returns final narrow bounds @@ -88,6 +91,14 @@ class MultipleLossRatioSearch: There are also subtle optimizations related to candidate selection and uneven splitting of intervals, too numerous to list here. + + The return values describe performance at the relevant lower bound + as "conditional throughput", which is based on loss ratio of one of trials + selected as a quantile based on exceed ratio parameter. + Usually this value may be quite pessimistic, as MLRsearch stops + measuring a load as soon as it becomes a lower bound, + so conditional throughput is usually based on forwarding rate + of the worst on the good long trials. """ config: Config @@ -123,11 +134,11 @@ class MultipleLossRatioSearch: :param measurer: Measurement provider to use by this search object. :param debug: Callable to optionally use instead of logging.debug(). - :returns: Structure containing conditional throughputs and other stats, - one for each search goal. :type measurer: AbstractMeasurer :type debug: Optional[Callable[[str], None]] - :returns: Mapping from goal to lower bound (none if min load is hit). + :returns: Structure containing conditional throughputs and other stats, + one for each search goal. If a value is None it means there is + no lower bound (min load turned out to be an upper bound). :rtype: Pep3140Dict[SearchGoal, Optional[TrimmedStat]] :raises RuntimeError: If total duration is larger than timeout, or if min load becomes an upper bound for a search goal diff --git a/resources/libraries/python/MLRsearch/search_goal.py b/resources/libraries/python/MLRsearch/search_goal.py index 7d7fd69841..777ad5b991 100644 --- a/resources/libraries/python/MLRsearch/search_goal.py +++ b/resources/libraries/python/MLRsearch/search_goal.py @@ -18,7 +18,9 @@ from dataclasses import dataclass @dataclass(frozen=True, eq=True) class SearchGoal: - """This is the part of controller inputs that can be repeated + """Storage class for search goal attributes. + + This is the part of controller inputs that can be repeated with different values. MLRsearch saves time by searching for conditional throughput for each goal at the same time, compared to repeated calls with separate goals. @@ -44,7 +46,7 @@ class SearchGoal: """Shortest trial duration employed when searching for this goal.""" final_trial_duration: float = 1.0 """Longest trial duration employed when searching for this goal.""" - duration_sum: float = 20.0 + duration_sum: float = 21.0 """Minimal sum of durations of relevant trials sufficient to declare a load to be upper or lower bound for this goal.""" preceding_targets: int = 2 diff --git a/resources/libraries/python/MLRsearch/target_stat.py b/resources/libraries/python/MLRsearch/target_stat.py index 9d30d51b9c..e558139af7 100644 --- a/resources/libraries/python/MLRsearch/target_stat.py +++ b/resources/libraries/python/MLRsearch/target_stat.py @@ -14,7 +14,7 @@ """Module defining LoadStat class.""" from dataclasses import dataclass, field -from typing import Tuple +from typing import Dict, Tuple from .target_spec import TargetSpec from .discrete_result import DiscreteResult @@ -31,12 +31,9 @@ class TargetStat: or an upper bound. For additional logic for dealing with loss inversion see MeasurementDatabase. - Besides the duration sums needed for determining upper and lower bound, - a field useful for computing the conditional throughput is also included. - The conditional throughput is average of the (relative) forwarding rates - of good long trials weighted by gool long trial durations. - As the intended load is stored elsewhere, the one additional field here - has a peculiar unit, it is a sum of products of seconds and loss ratios. + Also, data needed for conditional throughput is gathered here, + exposed only as a pessimistic loss ratio + (as the load value is not stored here). """ target: TargetSpec = field(repr=False) @@ -49,8 +46,8 @@ class TargetStat: """Sum of durations of shorter trials satisfying target loss ratio.""" bad_short: float = 0.0 """Sum of durations of shorter trials not satisfying target loss ratio.""" - dur_rat_sum: float = 0.0 - """Sum over good long trials, of duration multiplied by loss ratio.""" + long_losses: Dict[float, float] = field(repr=False, default_factory=dict) + """If a loss ratio value occured in a long trial, map it to duration sum.""" def __str__(self) -> str: """Convert into a short human-readable string. @@ -73,14 +70,18 @@ class TargetStat: :type result: DiscreteResult """ dwo = result.duration_with_overheads + rlr = result.loss_ratio if result.intended_duration >= self.target.trial_duration: - if result.loss_ratio > self.target.loss_ratio: + if rlr not in self.long_losses: + self.long_losses[rlr] = 0.0 + self.long_losses = dict(sorted(self.long_losses.items())) + self.long_losses[rlr] += dwo + if rlr > self.target.loss_ratio: self.bad_long += dwo else: self.good_long += dwo - self.dur_rat_sum += dwo * result.loss_ratio else: - if result.loss_ratio > self.target.loss_ratio: + if rlr > self.target.loss_ratio: self.bad_short += dwo else: self.good_short += dwo @@ -115,3 +116,37 @@ class TargetStat: optimistic = effective_excess <= limit_dursum pessimistic = (effective_dursum - self.good_long) <= limit_dursum return optimistic, pessimistic + + def pessimistic_loss_ratio(self) -> float: + """Return the loss ratio for conditional throughput computation. + + It adds missing dursum as full-loss trials to long_losses + and returns a quantile corresponding to exceed ratio. + In case of tie (as in median for even number of samples), + this returns the lower value (as being equal to goal exceed ratio + is allowed). + + For loads classified as a lower bound, the return value + ends up being no larger than the target loss ratio. + This is because the excess short bad trials would only come + after the quantile in question (as would full-loss missing trials). + For other loads, anything can happen, but conditional throughput + should not be computed for those anyway. + Those two facts allow the logic here be simpler than in estimates(). + + :returns: Effective loss ratio based on long trial results. + :rtype: float + """ + all_long = max(self.target.duration_sum, self.good_long + self.bad_long) + remaining = all_long * (1.0 - self.target.exceed_ratio) + ret = None + for ratio, dursum in self.long_losses.items(): + if ret is None or remaining > 0.0: + ret = ratio + remaining -= dursum + else: + break + else: + if remaining > 0.0: + ret = 1.0 + return ret diff --git a/resources/libraries/python/MLRsearch/trimmed_stat.py b/resources/libraries/python/MLRsearch/trimmed_stat.py index 0076644aa3..088e8beaf8 100644 --- a/resources/libraries/python/MLRsearch/trimmed_stat.py +++ b/resources/libraries/python/MLRsearch/trimmed_stat.py @@ -57,22 +57,21 @@ class TrimmedStat(LoadStats): def conditional_throughput(self) -> Optional[DiscreteLoad]: """Compute conditional throughput from the load. - Target stat has dur_rat_sum and good_long. - The code here adds intended load and handles the case min load is hit. - If min load is not a lower bound, None is returned. + The conditional throughput has the same semantics as load, + so if load is unicirectional and user wants bidirectional + throughput, the manager has to compensate. + + This only works correctly if the self load is a lower bound + for the self target, but this method does not check that. + Its should not matter, as MLRsearch controller only returns + the relevant lower bounds to the manager. :return: Conditional throughput assuming self is a relevant lower bound. :rtype: Optional[DiscreteLoad] :raises RuntimeError: If target is unclear or load is spurious. """ target = list(self.target_to_stat.keys())[0] - _, pes = self.estimates(target) - if not pes: - if int(self): - raise RuntimeError(f"Not a lower bound: {self}") - return None - # TODO: Verify self is really the clo? stat = self.target_to_stat[target] - loss_ratio = stat.dur_rat_sum / stat.good_long + loss_ratio = stat.pessimistic_loss_ratio() ret = self * (1.0 - loss_ratio) return ret diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py index 7d6fdead64..9faa3c49f6 100644 --- a/resources/libraries/python/TrafficGenerator.py +++ b/resources/libraries/python/TrafficGenerator.py @@ -1434,7 +1434,7 @@ class OptimizedSearch: relative_width: float = 0.005, initial_trial_duration: float = 1.0, final_trial_duration: float = 1.0, - duration_sum: float = 20.0, + duration_sum: float = 21.0, expansion_coefficient: int = 2, preceding_targets: int = 2, search_duration_max: float = 1200.0, diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot index 9392bc0e8c..fd03583ac9 100644 --- a/resources/libraries/robot/performance/performance_utils.robot +++ b/resources/libraries/robot/performance/performance_utils.robot @@ -179,7 +179,7 @@ | | ... | relative_width=${0.005} | | ... | initial_trial_duration=${1.0} | | ... | final_trial_duration=${1.0} -| | ... | duration_sum=${20.0} +| | ... | duration_sum=${21.0} | | ... | preceding_targets=${2} | | ... | search_duration_max=${1200.0} | | ... | ppta=${ppta} @@ -266,7 +266,7 @@ | | ... | relative_width=${0.001} | | ... | initial_trial_duration=${1.0} | | ... | final_trial_duration=${1.0} -| | ... | duration_sum=${10.0} +| | ... | duration_sum=${11.0} | | ... | preceding_targets=${1} | | ... | search_duration_max=${1200} | | ... | ppta=${ppta} |