diff options
author | Vratko Polak <vrpolak@cisco.com> | 2021-03-25 19:26:35 +0100 |
---|---|---|
committer | Vratko Polak <vrpolak@cisco.com> | 2021-03-26 14:32:10 +0100 |
commit | 9f0ea4e6be697aa8ca08a6da596d7ca5892d2183 (patch) | |
tree | 79ede5e7c43a5d0106a84b81093650c7bce8117b /resources/libraries/python/TrafficGenerator.py | |
parent | 6bba7b0bef31a8993848f0dcf5e661e8ccf02ddd (diff) |
STL traffic: Use the sleep+stop logic
+ PPS limit for AWS set to 1.2 Mpps.
+ The logic is very similar to that one in ASTF driver.
+ This helps for testbeds with high duration stretching (e.g. AWS).
+ Difference: No transaction scale, and we deal with floats.
+ Update loss counting to count unsent packets as lost.
+ Also count "unsent" transactions for other transaction types.
+ If nonzero, log the number of unsent packets/transactions.
+ Make STL and ASTF time overhead constant (called delay) configurable.
+ Subtract delay from approximated_duration, also for ASTF.
Change-Id: I6ee6aa6fba4f110ba1636e1b0ff76cac64383e33
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/libraries/python/TrafficGenerator.py')
-rw-r--r-- | resources/libraries/python/TrafficGenerator.py | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py index c0d1b4ec63..e42ad48930 100644 --- a/resources/libraries/python/TrafficGenerator.py +++ b/resources/libraries/python/TrafficGenerator.py @@ -13,6 +13,7 @@ """Performance testing traffic generator library.""" +import math import time from robot.api import logger @@ -577,7 +578,6 @@ class TrafficGenerator(AbstractMeasurer): # so we can compare with what telemetry suggests # the real duration was. logger.debug(f"Expected duration {computed_duration}") - computed_duration += 0.1115 if not self.duration_limit: return computed_duration, True limited_duration = min(computed_duration, self.duration_limit) @@ -652,6 +652,9 @@ class TrafficGenerator(AbstractMeasurer): command_line.add_if(u"async_start", async_call) command_line.add_if(u"latency", self.use_latency) command_line.add_if(u"force", Constants.TREX_SEND_FORCE) + command_line.add_with_value( + u"delay", Constants.PERF_TRIAL_ASTF_DELAY + ) self._start_time = time.monotonic() self._rate = multiplier @@ -759,6 +762,7 @@ class TrafficGenerator(AbstractMeasurer): command_line.add_if(u"async_start", async_call) command_line.add_if(u"latency", self.use_latency) command_line.add_if(u"force", Constants.TREX_SEND_FORCE) + command_line.add_with_value(u"delay", Constants.PERF_TRIAL_STL_DELAY) # TODO: This is ugly. Handle parsing better. self._start_time = time.monotonic() @@ -1244,16 +1248,27 @@ class TrafficGenerator(AbstractMeasurer): if not target_duration: target_duration = approximated_duration transmit_rate = self._rate + unsent = 0 if self.transaction_type == u"packet": partial_attempt_count = self._sent - expected_attempt_count = self._sent - fail_count = self._loss + packet_rate = transmit_rate * self.ppta + # We have a float. TRex way of rounding it is not obvious. + # The biggest source of mismatch is Inter Stream Gap. + # So the code tolerates 10 usec of missing packets. + expected_attempt_count = (target_duration - 1e-5) * packet_rate + expected_attempt_count = math.ceil(expected_attempt_count) + # TRex can send more. + expected_attempt_count = max(expected_attempt_count, self._sent) + unsent = expected_attempt_count - self._sent + pass_count = self._received + fail_count = expected_attempt_count - pass_count elif self.transaction_type == u"udp_cps": if not self.transaction_scale: raise RuntimeError(u"Add support for no-limit udp_cps.") partial_attempt_count = self._l7_data[u"client"][u"sent"] # We do not care whether TG is slow, it should have attempted all. expected_attempt_count = self.transaction_scale + unsent = expected_attempt_count - partial_attempt_count pass_count = self._l7_data[u"client"][u"received"] fail_count = expected_attempt_count - pass_count elif self.transaction_type == u"tcp_cps": @@ -1263,6 +1278,7 @@ class TrafficGenerator(AbstractMeasurer): partial_attempt_count = ctca # We do not care whether TG is slow, it should have attempted all. expected_attempt_count = self.transaction_scale + unsent = expected_attempt_count - partial_attempt_count # From TCP point of view, server/connects counts full connections, # but we are testing NAT session so client/connects counts that # (half connections from TCP point of view). @@ -1273,7 +1289,8 @@ class TrafficGenerator(AbstractMeasurer): raise RuntimeError(u"Add support for no-limit udp_pps.") partial_attempt_count = self._sent expected_attempt_count = self.transaction_scale * self.ppta - fail_count = self._loss + (expected_attempt_count - self._sent) + unsent = expected_attempt_count - self._sent + fail_count = self._loss + unsent elif self.transaction_type == u"tcp_pps": if not self.transaction_scale: raise RuntimeError(u"Add support for no-limit tcp_pps.") @@ -1286,9 +1303,12 @@ class TrafficGenerator(AbstractMeasurer): # A simple workaround is to add absolute difference. # Probability of retransmissions exactly cancelling # packets unsent due to duration stretching is quite low. - fail_count = self._loss + abs(expected_attempt_count - self._sent) + unsent = abs(expected_attempt_count - self._sent) + fail_count = self._loss + unsent else: raise RuntimeError(f"Unknown parsing {self.transaction_type!r}") + if unsent: + logger.debug(f"Unsent packets/transactions: {unsent}") if fail_count < 0 and not self.negative_loss: fail_count = 0 measurement = ReceiveRateMeasurement( |