aboutsummaryrefslogtreecommitdiffstats
path: root/src/scripts
AgeCommit message (Expand)AuthorFilesLines
2020-05-06misc: fts add support for non-ascii author names in yamlPaul Vinciguerra1-2/+2
2020-03-18pg: update packet generator test scriptsShivaShankarK23-46/+46
2020-01-09misc: feature tracking markdown improvementsOle Troan1-26/+142
2020-01-04misc: fix feature.yamlOle Troan1-4/+10
2020-01-03nat: add feature.yamlOle Troan1-3/+15
2019-12-17build: Allow to override build date with SOURCE_DATE_EPOCHBernhard M. Wiedemann1-3/+8
2019-11-05misc: Fix python scripts shebang lineRenato Botelho do Couto2-2/+2
2019-11-05docs: fix issues with src/scripts/fts.pyPaul Vinciguerra1-15/+20
2019-06-18ipsec: ipsec-tun protectNeale Ranns1-0/+71
2019-06-12tcp: add cc stats plotting toolsFlorin Coras2-0/+231
2019-05-31tools: FEATURE.yaml meta-data infrastructureOle Troan1-0/+131
2019-04-30crypto: enforce per-alg crypto key lengthBenoît Ganne1-2/+2
2019-03-26Convert GRE nodes to new buffer APIs and multiarchBenoît Ganne2-0/+124
2019-03-26Simplify adjacency rewrite codeBenoît Ganne1-0/+67
2019-03-04IPSEC: script to bounce IPSEC traffic through a pipe to test encrypt and decrpytNeale Ranns1-0/+66
2019-02-20pg: remove no-recycle optionDamjan Marion39-77/+0
2019-01-29cmake: fix out-of-git-tree buildDamjan Marion1-1/+1
2019-01-20Rework of debian packagingDamjan Marion1-0/+37
2018-08-17CMake as an alternative to autotools (experimental)Damjan Marion1-0/+28
2018-06-15NAT44: endpoint dependent mode (VPP-1273)Matus Fabian2-1/+49
2018-03-12License text cleanupDave Barach1-0/+13
2017-12-13NAT64: multi-thread support (VPP-891)Matus Fabian2-0/+86
2017-10-30Remove old Python vppctl scriptChris Luke1-134/+0
2017-10-16udp: refactor udp codeFlorin Coras2-25/+72
2017-08-23NAT: Rename snat plugin to nat (VPP-955)Matus Fabian4-10/+10
2017-08-04SNAT: fix address and port allocation for multiple worker threads (VPP-925)Matus Fabian1-5/+12
2017-05-09Add support for tcp/session buffer chainsFlorin Coras1-2/+17
2017-05-05First commit SR MPLSPablo Camarillo7-118/+11
2017-04-19Fix "make dist" to include version number, docouple it from rpm packagingDamjan Marion1-8/+4
2017-04-13Session layer refactoringFlorin Coras1-1/+2
2017-03-13VPP-659 Improve tcp/session debugging and testingFlorin Coras1-0/+4
2017-03-10VPP-659 TCP improvementsFlorin Coras3-3/+28
2017-03-07DHCP Multiple Servers (VPP-602, VPP-605)Neale Ranns1-1/+2
2017-03-07CGN: Deterministic NAT (VPP-623)Matus Fabian1-0/+108
2017-03-04Cleanup URI code and TCP bugfixingFlorin Coras2-0/+66
2017-03-01VPP-598: tcp stack initial commitDave Barach5-8/+91
2017-02-28vlib: add buffer cloning supportDamjan Marion1-8/+11
2017-02-21dhcp: multiple additionsNeale Ranns1-0/+21
2017-02-02Fix SR multicast post mfib commitNeale Ranns1-0/+58
2017-01-27IP Multicast FIB (mfib)Neale Ranns1-0/+22
2017-01-27Add multi-vpp support back into pythonic vppctlEd Warnicke1-7/+20
2017-01-25[re]Enable per-Adjacency/neighbour countersNeale Ranns1-2/+16
2017-01-21Fix issue in rpm versioning for release buildsDamjan Marion1-1/+1
2017-01-13vppctl: new bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-10Revert "vppctl: bash completion for vppctl commands"Damjan Marion1-30/+0
2017-01-09vppctl: bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-03fix version.h generation for out-of-tree buildsDamjan Marion1-0/+54
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion78-0/+3766
hlight .ni { color: #f8f8f2 } /* Name.Entity */ .highlight .ne { color: #a6e22e } /* Name.Exception */ .highlight .nf { color: #a6e22e } /* Name.Function */ .highlight .nl { color: #f8f8f2 } /* Name.Label */ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ .highlight .nx { color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Module holding PLRsearch class."""

import logging
import math
import multiprocessing
import time

import dill
# TODO: Inform pylint about scipy (of correct version) being available.
from scipy.special import erfcx, erfc

# TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH,
# then switch to absolute imports within PLRsearch package.
# Current usage of relative imports is just a short term workaround.
import Integrator  # pylint: disable=relative-import
from log_plus import log_plus, log_minus  # pylint: disable=relative-import
import stat_trackers  # pylint: disable=relative-import


class PLRsearch(object):
    """A class to encapsulate data relevant for the search method.

    The context is performance testing of packet processing systems.
    The system, when being offered a steady stream of packets,
    can process some of them successfully, other are considered "lost".

    See docstring of the search method for algorithm description.

    Two constants are stored as class fields for speed.

    Method othed than search (and than __init__)
    are just internal code structure.
    TODO: Those method names should start with underscore then.
    """

    xerfcx_limit = math.pow(math.acos(0), -0.5)
    log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10)))

    def __init__(
            self, measurer, trial_duration_per_trial, packet_loss_ratio_target,
            trial_number_offset=0, timeout=1800.0, trace_enabled=False):
        """Store rate measurer and additional parameters.

        TODO: Copy AbstractMeasurer from MLRsearch.

        :param measurer: The measurer to call when searching.
        :param trial_duration_per_trial: Each trial has larger duration
            than the previous trial. This is the increment, in seconds.
        :param packet_loss_ratio_target: The algorithm tries to estimate
            the offered load leading to this ratio on average.
            Trial ratio is number of packets lost divided by packets offered.
        :param trial_number_offset: The "first" trial number will be 1+this.
            Use this to ensure first iterations have enough time to compute
            reasonable estimates for later trials to use.
        :param timeout: The search ends if it lasts more than this many seconds.
        :type measurer: MLRsearch.AbstractMeasurer
        :type trial_duration_per_trial: float
        :type packet_loss_ratio_target: float
        :type trial_number_offset: int
        :type timeout: float
        """
        self.measurer = measurer
        self.trial_duration_per_trial = float(trial_duration_per_trial)
        self.packet_loss_ratio_target = float(packet_loss_ratio_target)
        self.trial_number_offset = int(trial_number_offset)
        self.timeout = float(timeout)
        self.trace_enabled = bool(trace_enabled)

    def search(self, min_rate, max_rate):
        """Perform the search, return average and stdev for throughput estimate.

        Considering measurer and packet_loss_ratio_target (see __init__),
        find such an offered load (called critical load) that is expected
        to hit the target loss ratio in the limit of very long trial duration.
        As the system is probabilistic (and test duration is finite),
        the critical ratio is only estimated.
        Return the average and standard deviation of the estimate.

        In principle, this algorithm performs trial measurements,
        each with varied offered load (which is constant during the trial).
        During each measurement, Bayesian inference is performed
        on all the measurement results so far.
        When timeout is up, the last estimate is returned,
        else another trial is performed.

        It is assumed that the system under test, even though not deterministic,
        still follows the rule of large numbers. In another words,
        any growing set of measurements at a particular offered load
        will converge towards unique (for the given load) packet loss ratio.
        This means there is a deterministic (but unknown) function
        mapping the offered load to average loss ratio.
        This function is called loss ratio function.
        This also assumes the average loss ratio
        does not depend on trial duration.

        The actual probability distribution of loss counts, achieving
        the average ratio on trials of various duration
        can be complicated (and can depend on offered load), but simply assuming
        Poisson distribution will make the algorithm converge.
        Binomial distribution would be more precise,
        but Poisson is more practical, as it effectively gives
        less information content to high ratio results.

        Even when applying other assumptions on the loss ratio function
        (increasing function, limit zero ratio when load goes to zero,
        global upper limit on rate of packets processed), there are still
        too many different shapes of possible loss functions,
        which makes full Bayesian reasoning intractable.

        This implementation radically simplifies things by examining
        only two shapes, each with finitely many (in this case just two)
        parameters. In other words, two fitting functions
        (each with two parameters and one argument).
        When restricting model space to one of the two fitting functions,
        the Bayesian inference becomes tractable (even though it needs
        numerical integration from Integrator class).

        The first measurement is done at the middle between
        min_rate and max_rate, to help with convergence
        if max_rate measurements give loss below target.
        TODO: Fix overflow error and use min_rate instead of the middle.

        The second measurement is done at max_rate, next few measurements
        have offered load of previous load minus excess loss rate.
        This simple rule is found to be good when offered loads
        so far are way above the critical rate. After few measurements,
        inference from fitting functions converges faster that this initial
        "optimistic" procedure.

        Offered loads close to (limiting) critical rate are the most useful,
        as linear approximation of the fitting function
        becomes good enough there (thus reducing the impact
        of the overall shape of fitting function).
        After several trials, usually one of the fitting functions
        has better predictions than the other one, but the algorithm
        does not track that. Simply, it uses the estimate average,
        alternating between the functions.
        Multiple workarounds are applied to try and avoid measurements
        both in zero loss region and in big loss region,
        as their results tend to make the critical load estimate worse.

        The returned average and stdev is a combination of the two fitting
        estimates.

        :param min_rate: Avoid measuring at offered loads below this,
            in packets per second.
        :param max_rate: Avoid measuring at offered loads above this,
            in packets per second.
        :type min_rate: float
        :type max_rate: float
        :returns: Average and stdev of critical load estimate.
        :rtype: 2-tuple of floats
        """
        stop_time = time.time() + self.timeout
        min_rate = float(min_rate)
        max_rate = float(max_rate)
        logging.info("Started search with min_rate %(min)r, max_rate %(max)r",
                     {"min": min_rate, "max": max_rate})
        trial_result_list = list()
        trial_number = self.trial_number_offset
        focus_trackers = (None, None)
        transmit_rate = (min_rate + max_rate) / 2.0
        lossy_loads = [max_rate]
        zeros = [0, 0]  # Cosecutive zero loss, separately for stretch and erf.
        while 1:
            trial_number += 1
            logging.info("Trial %(number)r", {"number": trial_number})
            results = self.measure_and_compute(
                self.trial_duration_per_trial * trial_number, transmit_rate,
                trial_result_list, min_rate, max_rate, focus_trackers)
            measurement, average, stdev, avg1, avg2, focus_trackers = results
            index = trial_number % 2
            zeros[index] += 1
            # TODO: Ratio of fill rate to drain rate seems to have
            # exponential impact. Make it configurable, or is 4:3 good enough?
            if measurement.loss_fraction >= self.packet_loss_ratio_target:
                for _ in range(4 * zeros[index]):
                    lossy_loads.append(measurement.target_tr)
            if measurement.loss_count > 0:
                zeros[index] = 0
            lossy_loads.sort()
            if stop_time <= time.time():
                return average, stdev
            trial_result_list.append(measurement)
            if (trial_number - self.trial_number_offset) <= 1:
                next_load = max_rate
            elif (trial_number - self.trial_number_offset) <= 3:
                next_load = (measurement.receive_rate / (
                    1.0 - self.packet_loss_ratio_target))
            else:
                index = (trial_number + 1) % 2
                next_load = (avg1, avg2)[index]
                if zeros[index] > 0:
                    if lossy_loads[0] > next_load:
                        diminisher = math.pow(2.0, 1 - zeros[index])
                        next_load = lossy_loads[0] + diminisher * next_load
                        next_load /= (1.0 + diminisher)
                    # On zero measurement, we need to drain obsoleted low losses
                    # even if we did not use them to increase next_load,
                    # in order to get to usable loses with higher load.
                    if len(lossy_loads) > 3:
                        lossy_loads = lossy_loads[3:]
                logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r",
                              {"z": zeros, "o": (avg1, avg2)[index],
                               "n": next_load, "s": lossy_loads})
            transmit_rate = min(max_rate, max(min_rate, next_load))

    @staticmethod
    def lfit_stretch(trace, load, mrr, spread):
        """Stretch-based fitting function.

        Return the logarithm of average packet loss per second
        when the load (argument) is offered to a system with given
        mrr and spread (parameters).
        Stretch function is 1/(1+Exp[-x]). The average itself is definite
        integral from zero to load, of shifted and x-scaled stretch function.
        As the integrator is sensitive to discontinuities,
        and it calls this function at large areas of parameter space,
        the implementation has to avoid rounding errors, overflows,
        and correctly approximate underflows.

        TODO: Explain how the high-level description
        has been converted into an implementation full of ifs.

        :param trace: A multiprocessing-friendly logging function (closure).
        :param load: Offered load (positive), in packets per second.
        :param mrr: Parameter of this fitting function, equal to limiting
            (positive) average number of packets received (as opposed to lost)
            when offered load is many spreads more than mrr.
        :param spread: The x-scaling parameter (positive). No nice semantics,
            roughly corresponds to size of "tail" for loads below mrr.
        :type trace: function (str, object) -> NoneType
        :type load: float
        :type mrr: float
        :type spread: float
        :returns: Logarithm of average number of packets lost per second.
        :rtype: float
        """
        # TODO: What is the fastest way to use such values?
        log_2 = math.log(2)
        log_3 = math.log(3)
        log_spread = math.log(spread)
        # TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization
        chi = (load - mrr) / spread
        chi0 = -mrr / spread
        trace("stretch: load", load)
        trace("mrr", mrr)
        trace("spread", spread)
        trace("chi", chi)
        trace("chi0", chi0)
        if chi > 0:
            log_lps = math.log(
                load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread)
            trace("big loss direct log_lps", log_lps)
        else:
            two_positive = log_plus(chi, 2 * chi0 - log_2)
            two_negative = log_plus(chi0, 2 * chi - log_2)
            if two_positive <= two_negative:
                log_lps = log_minus(chi, chi0) + log_spread
                trace("small loss crude log_lps", log_lps)
                return log_lps
            two = log_minus(two_positive, two_negative)
            three_positive = log_plus(two_positive, 3 * chi - log_3)
            three_negative = log_plus(two_negative, 3 * chi0 - log_3)
            three = log_minus(three_positive, three_negative)
            if two == three:
                log_lps = two + log_spread
                trace("small loss approx log_lps", log_lps)
            else:
                log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0))
                log_lps += log_spread
                trace("small loss direct log_lps", log_lps)
        return log_lps

    @staticmethod
    def lfit_erf(trace, load, mrr, spread):
        """Erf-based fitting function.

        Return the logarithm of average packet loss per second
        when the load (argument) is offered to a system with given
        mrr and spread (parameters).
        Erf function is Primitive function to normal distribution density.
        The average itself is definite integral from zero to load,
        of shifted and x-scaled erf function.
        As the integrator is sensitive to discontinuities,
        and it calls this function at large areas of parameter space,
        the implementation has to avoid rounding errors, overflows,
        and correctly approximate underflows.

        TODO: Explain how the high-level description
        has been converted into an implementation full of ifs.

        :param trace: A multiprocessing-friendly logging function (closure).
        :param load: Offered load (positive), in packets per second.
        :param mrr: Parameter of this fitting function, equal to limiting
            (positive) average number of packets received (as opposed to lost)
            when offered load is many spreads more than mrr.
        :param spread: The x-scaling parameter (positive). No nice semantics,
            roughly corresponds to size of "tail" for loads below mrr.
        :type trace: function (str, object) -> NoneType
        :type load: float
        :type mrr: float
        :type spread: float
        :returns: Logarithm of average number of packets lost per second.
        :rtype: float
        """
        # Beware, this chi has the sign opposite to the stretch function chi.
        # TODO: The stretch sign is just to have less minuses. Worth changing?
        chi = (mrr - load) / spread
        chi0 = mrr / spread
        trace("Erf: load", load)
        trace("mrr", mrr)
        trace("spread", spread)
        trace("chi", chi)
        trace("chi0", chi0)
        if chi >= -1.0:
            trace("positive, b roughly bigger than m", None)
            if chi > math.exp(10):
                first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10)
                trace("approximated first", first)
            else:
                first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi))
                trace("exact first", first)
            first -= chi * chi
            second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
            second -= chi0 * chi0
            intermediate = log_minus(first, second)
            trace("first", first)
        else:
            trace("negative, b roughly smaller than m", None)
            exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi)
            exp_first *= math.exp(-chi * chi)
            exp_first -= 2 * chi
            # TODO: Why has the following line chi there (as opposed to chi0)?
            # In general the functions would be more readable if they explicitly
            #     return math.log(func(chi) - func(chi0))
            # for some function "func", at least for some branches.
            second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
            second -= chi0 * chi0
            intermediate = math.log(exp_first - math.exp(second))
            trace("exp_first", exp_first)
        trace("second", second)
        trace("intermediate", intermediate)
        result = intermediate + math.log(spread) - math.log(erfc(-chi0))
        trace("result", result)
        return result

    @staticmethod
    def find_critical_rate(
            trace, lfit_func, min_rate, max_rate, loss_ratio_target,
            mrr, spread):
        """Given ratio target and parameters, return the achieving offered load.

        This is basically an inverse function to lfit_func
        when parameters are fixed.
        Instead of implementing effective implementation
        of the inverse function, this implementation uses
        brute force binary search. It is bisecting (nim_rate, max_rate) interval
        until the critical load is found (or interval becomes degenerate).
        This implementation assures min and max rate limits are honored.

        TODO: Use some method with faster convergence?

        :param trace: A multiprocessing-friendly logging function (closure).
        :param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
        :param min_rate: Lower bound for binary search [pps].
        :param max_rate: Upper bound for binary search [pps].
        :param loss_ratio_target: Fitting function should return loss rate
            giving this ratio at the returned load and parameters [1].
        :param mrr: The mrr parameter for the fitting function [pps].
        :param spread: The spread parameter for the fittinmg function [pps].
        :type trace: function (str, object) -> None
        :type lfit_func: Function from 3 floats to float.
        :type min_rate: float
        :type max_rate: float
        :type log_lps_target: float
        :type mrr: float
        :type spread: float
        :returns: Load [pps] which achieves the target with given parameters.
        :rtype: float
        """
        trace("Finding critical rate for loss_ratio_target", loss_ratio_target)
        rate_lo = min_rate
        rate_hi = max_rate
        loss_ratio = -1
        while loss_ratio != loss_ratio_target:
            rate = (rate_hi + rate_lo) / 2.0
            if rate == rate_hi or rate == rate_lo:
                break
            loss_rate = math.exp(lfit_func(trace, rate, mrr, spread))
            loss_ratio = loss_rate / rate
            if loss_ratio > loss_ratio_target:
                trace("halving down", rate)
                rate_hi = rate
            elif loss_ratio < loss_ratio_target:
                trace("halving up", rate)
                rate_lo = rate
        trace("found", rate)
        return rate

    @staticmethod
    def log_weight(trace, lfit_func, trial_result_list, mrr, spread):
        """Return log of weight of trial results by the function and parameters.

        Integrator assumes uniform distribution, but over different parameters.
        Weight and likelihood are used interchangeably here anyway.

        Each trial has an offered load, a duration and a loss count.
        Fitting function is used to compute the average loss per second.
        Poisson distribution (with average loss per trial) is used
        to get likelihood of one trial result, the overal likelihood
        is a product of all trial likelihoods.
        As likelihoods can be extremely small, logarithms are tracked instead.

        TODO: Copy ReceiveRateMeasurement from MLRsearch.

        :param trace: A multiprocessing-friendly logging function (closure).
        :param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
        :param result_list: List of trial measurement results.
        :param mrr: The mrr parameter for the fitting function.
        :param spread: The spread parameter for the fittinmg function.
        :type trace: function (str, object) -> None
        :type lfit_func: Function from 3 floats to float.
        :type result_list: list of MLRsearch.ReceiveRateMeasurement
        :type mrr: float
        :type spread: float
        :returns: Logarithm of result weight for given function and parameters.
        :rtype: float
        """
        log_likelihood = 0.0
        trace("log_weight for mrr", mrr)
        trace("spread", spread)
        for result in trial_result_list:
            trace("for tr", result.target_tr)
            trace("lc", result.loss_count)
            trace("d", result.duration)
            log_avg_loss_per_second = lfit_func(
                trace, result.target_tr, mrr, spread)
            log_avg_loss_per_trial = (
                log_avg_loss_per_second + math.log(result.duration))
            # Poisson probability computation works nice for logarithms.
            log_trial_likelihood = (
                result.loss_count * log_avg_loss_per_trial
                - math.exp(log_avg_loss_per_trial))
            log_trial_likelihood -= math.lgamma(1 + result.loss_count)
            log_likelihood += log_trial_likelihood
            trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial))
            trace("log_trial_likelihood", log_trial_likelihood)
        return log_likelihood

    # TODO: Refactor (somehow) so pylint stops complaining about
    # too many local variables.
    def measure_and_compute(
            self, trial_duration, transmit_rate, trial_result_list,
            min_rate, max_rate, focus_trackers=(None, None), max_samples=None):
        """Perform both measurement and computation at once.

        High level steps: Prepare and launch computation worker processes,
        perform the measurement, stop computation and combine results.

        Integrator needs a specific function to process (-1, 1) parameters.
        As our fitting functions use dimensional parameters,
        so a transformation is performed, resulting in a specific prior
        distribution over the dimensional parameters.
        Maximal rate (line rate) is needed for that transformation.

        Two fitting functions are used, computation is started
        on temporary worker process per fitting function. After the measurement,
        average and stdev of the critical rate (not log) of each worker
        are combined and returned. Raw averages are also returned,
        offered load for next iteration is chosen based on them.
        The idea is that one fitting function might be fitting much better,
        measurements at its avg are best for relevant results (for both),
        but we do not know which fitting function it is.

        Focus trackers are updated in-place. If a focus tracker in None,
        new instance is created.

        TODO: Define class for result object, so that fields are documented.
        TODO: Re-use processes, instead creating on each computation?
        TODO: As only one result is needed fresh, figure out a way
        how to keep the other worker running. This will alow shorter
        duration per trial. Special handling at first and last measurement
        will be needed (to properly initialize and to properly combine results).

        :param trial_duration: Length of the measurement in seconds.
        :param transmit_rate: Offered load in packets per second.
        :param trial_result_list: Results of previous measurements.
        :param min_rate: Practical minimum of possible ofered load.
        :param max_rate: Practical maximum of possible ofered load.
        :param focus_trackers: Pair of trackers initialized
            to speed up the numeric computation.
        :param max_samples: Limit for integrator samples, for debugging.
        :type trial_duration: float
        :type transmit_rate: float
        :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
        :type min_rate: float
        :type max_rate: float
        :type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker
        :type max_samples: None or int
        :returns: Measurement and computation results.
        :rtype: 6-tuple: ReceiveRateMeasurement, 4 floats, 2-tuple of trackers.
        """
        logging.debug(
            "measure_and_compute started with self %(self)r, trial_duration "
            + "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, "
            + "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r",
            {"self": self, "dur": trial_duration, "tr": transmit_rate,
             "trl": trial_result_list, "mr": max_rate, "track": focus_trackers,
             "ms": max_samples})
        # Preparation phase.
        dimension = 2
        stretch_focus_tracker, erf_focus_tracker = focus_trackers
        if stretch_focus_tracker is None:
            stretch_focus_tracker = stat_trackers.VectorStatTracker(dimension)
            stretch_focus_tracker.unit_reset()
        if erf_focus_tracker is None:
            erf_focus_tracker = stat_trackers.VectorStatTracker(dimension)
            erf_focus_tracker.unit_reset()
        old_trackers = stretch_focus_tracker.copy(), erf_focus_tracker.copy()
        def start_computing(fitting_function, focus_tracker):
            """Just a block of code to be used for each fitting function.

            Define function for integrator, create process and pipe ends,
            start computation, return the boss pipe end.

            :param fitting_function: lfit_erf or lfit_stretch.
            :param bias_avg: Tuple of floats to start searching around.
            :param bias_cov: Covariance matrix defining initial focus shape.
            :type fitting_function: Function from 3 floats to float.
            :type bias_avg: 2-tuple of floats
            :type bias_cov: 2-tuple of 2-tuples of floats
            :returns: Boss end of communication pipe.
            :rtype: multiprocessing.Connection
            """
            def value_logweight_func(trace, x_mrr, x_spread):
                """Return log of critical rate and log of likelihood.

                This is a closure. The ancestor function got
                trial_result_list as a parameter, and we are accessing it.
                As integrator has strict conditions on function signature,
                trial_result_list cannot be an explicit argument
                of the current function.
                This is also why we have to define this closure
                at each invocation of the ancestor function anew.

                The dimensional spread parameter is the (dimensional) mrr
                raised to the power of x_spread scaled to interval (0, 1).
                The dimensional mrr parameter distribution has shape of
                1/(1+x^2), but x==1 corresponds to max_rate
                and 1.0 pps is added to avoid numerical problems in fitting
                functions.

                TODO: x^-2 (for x>1.0) might be simpler/nicer prior.

                :param trace: Multiprocessing-safe logging function (closure).
                :param x_mrr: The first dimensionless param
                    from (-1, 1) interval.
                :param x_spread: The second dimensionless param
                    from (-1, 1) interval.
                :type trace: function (str, object) -> None
                :type x_mrr: float
                :type x_spread: float
                :returns: Log of critical rate [pps] and log of likelihood.
                :rtype: 2-tuple of float
                """
                mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0
                spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr))
                logweight = self.log_weight(
                    trace, fitting_function, trial_result_list, mrr, spread)
                value = math.log(self.find_critical_rate(
                    trace, fitting_function, min_rate, max_rate,
                    self.packet_loss_ratio_target, mrr, spread))
                return value, logweight
            dilled_function = dill.dumps(value_logweight_func)
            boss_pipe_end, worker_pipe_end = multiprocessing.Pipe()
            boss_pipe_end.send(
                (dimension, dilled_function, focus_tracker, max_samples))
            worker = multiprocessing.Process(
                target=Integrator.try_estimate_nd, args=(
                    worker_pipe_end, 10.0, self.trace_enabled))
            worker.daemon = True
            worker.start()
            return boss_pipe_end
        erf_pipe = start_computing(
            self.lfit_erf, erf_focus_tracker)
        stretch_pipe = start_computing(
            self.lfit_stretch, stretch_focus_tracker)
        # Measurement phase.
        measurement = self.measurer.measure(trial_duration, transmit_rate)
        # Processing phase.
        def stop_computing(name, pipe):
            """Just a block of code to be used for each worker.

            Send stop object, poll for result, then either
            unpack response, log messages and return, or raise traceback.

            TODO: Define class/structure for the return value?

            :param name: Human friendly worker identifier for logging purposes.
            :param pipe: Boss end of connection towards worker to stop.
            :type name: str
            :type pipe: multiprocessing.Connection
            :returns: Computed value tracker, actual focus tracker,
                and number of samples used for this iteration.
            :rtype: 3-tuple of tracker, tracker and int
            """
            pipe.send(None)
            if not pipe.poll(10.0):
                raise RuntimeError(
                    "Worker {name} did not finish!".format(name=name))
            result_or_traceback = pipe.recv()
            try:
                value_tracker, focus_tracker, debug_list, trace_list, sampls = (
                    result_or_traceback)
            except ValueError:
                raise RuntimeError(
                    "Worker {name} failed with the following traceback:\n{tr}"
                    .format(name=name, tr=result_or_traceback))
            logging.info("Logs from worker %(name)r:", {"name": name})
            for message in debug_list:
                logging.info(message)
            for message in trace_list:
                logging.debug(message)
            logging.debug("trackers: value %(val)r focus %(foc)r", {
                "val": value_tracker, "foc": focus_tracker})
            return value_tracker, focus_tracker, sampls
        stretch_value_tracker, stretch_focus_tracker, stretch_samples = (
            stop_computing("stretch", stretch_pipe))
        erf_value_tracker, erf_focus_tracker, erf_samples = (
            stop_computing("erf", erf_pipe))
        stretch_avg = stretch_value_tracker.average
        erf_avg = erf_value_tracker.average
        # TODO: Take into account secondary stats.
        stretch_stdev = math.exp(stretch_value_tracker.log_variance / 2)
        erf_stdev = math.exp(erf_value_tracker.log_variance / 2)
        avg = math.exp((stretch_avg + erf_avg) / 2.0)
        var = (stretch_stdev * stretch_stdev + erf_stdev * erf_stdev) / 2.0
        var += (stretch_avg - erf_avg) * (stretch_avg - erf_avg) / 4.0
        stdev = avg * math.sqrt(var)
        focus_trackers = (stretch_focus_tracker, erf_focus_tracker)
        logging.info(
            "measure_and_compute finished with trial result %(res)r "
            "avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r "
            "new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r "
            "erf samples %(es)r",
            {"res": measurement, "avg": avg, "stdev": stdev,
             "a1": math.exp(stretch_avg), "a2": math.exp(erf_avg),
             "nt": focus_trackers, "ot": old_trackers, "ss": stretch_samples,
             "es": erf_samples})
        return (
            measurement, avg, stdev, math.exp(stretch_avg),
            math.exp(erf_avg), focus_trackers)