diff options
-rw-r--r-- | resources/libraries/python/TrafficGenerator.py | 185 | ||||
-rwxr-xr-x | resources/tools/trex/trex_stateless_profile.py | 267 |
2 files changed, 106 insertions, 346 deletions
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py index 6123b8753a..10fee9afbf 100644 --- a/resources/libraries/python/TrafficGenerator.py +++ b/resources/libraries/python/TrafficGenerator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -168,13 +168,14 @@ class TrafficGenerator(AbstractMeasurer): """ return self._latency - def initialize_traffic_generator(self, tg_node, tg_if1, tg_if2, - tg_if1_adj_node, tg_if1_adj_if, - tg_if2_adj_node, tg_if2_adj_if, - test_type, - tg_if1_dst_mac=None, tg_if2_dst_mac=None): + def initialize_traffic_generator( + self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if, + tg_if2_adj_node, tg_if2_adj_if, test_type, tg_if1_dst_mac=None, + tg_if2_dst_mac=None): """TG initialization. + TODO: Document why do we need (and how do we use) _ifaces_reordered. + :param tg_node: Traffic generator node. :param tg_if1: TG - name of first interface. :param tg_if2: TG - name of second interface. @@ -386,9 +387,10 @@ class TrafficGenerator(AbstractMeasurer): if int(ret) != 0: raise RuntimeError('TRex stateless runtime error') - def trex_stl_start_remote_exec(self, duration, rate, framesize, - traffic_type, async_call=False, - latency=True, warmup_time=5.0): + def trex_stl_start_remote_exec( + self, duration, rate, framesize, traffic_type, async_call=False, + latency=True, warmup_time=5.0, unidirection=False, tx_port=0, + rx_port=1): """Execute script on remote node over ssh to start traffic. :param duration: Time expresed in seconds for how long to send traffic. @@ -399,6 +401,11 @@ class TrafficGenerator(AbstractMeasurer): :param async_call: If enabled then don't wait for all incomming trafic. :param latency: With latency measurement. :param warmup_time: Warmup time period. + :param unidirection: Traffic is unidirectional. Default: False + :param tx_port: Traffic generator transmit port for first flow. + Default: 0 + :param rx_port: Traffic generator receive port for first flow. + Default: 1 :type duration: float :type rate: str :type framesize: str @@ -406,34 +413,33 @@ class TrafficGenerator(AbstractMeasurer): :type async_call: bool :type latency: bool :type warmup_time: float - :returns: Nothing + :type unidirection: bool + :type tx_port: int + :type rx_port: int :raises RuntimeError: In case of TG driver issue. """ ssh = SSH() ssh.connect(self._node) + reorder = self._ifaces_reordered # Just to make the next line fit. + p_0, p_1 = (rx_port, tx_port) if reorder else (tx_port, rx_port) + command = ( + "sh -c '{tool}/resources/tools/trex/trex_stateless_profile.py" + " --profile {prof}/resources/traffic_profiles/trex/{traffic}.py" + " --duration {duration} --frame_size {framesize} --rate {rate}" + " --warmup_time {warmup} --port_0 {p_0} --port_1 {p_1}").format( + tool=Constants.REMOTE_FW_DIR, prof=Constants.REMOTE_FW_DIR, + traffic=traffic_type, duration=duration, framesize=framesize, + rate=rate, warmup=warmup_time, p_0=p_0, p_1=p_1) + if async_call: + command += " --async" + if latency: + command += " --latency" + if unidirection: + command += " --unidirection" + command += "'" - _async = "--async" if async_call else "" - _latency = "--latency" if latency else "" - _p0, _p1 = (2, 1) if self._ifaces_reordered else (1, 2) - - profile_path = ("{0}/resources/traffic_profiles/trex/" - "{1}.py".format(Constants.REMOTE_FW_DIR, - traffic_type)) (ret, stdout, _) = ssh.exec_command( - "sh -c " - "'{0}/resources/tools/trex/trex_stateless_profile.py " - "--profile {1} " - "--duration {2} " - "--frame_size {3} " - "--rate {4} " - "--warmup_time {5} " - "--port_0 {6} " - "--port_1 {7} " - "{8} " # --async - "{9}'". # --latency - format(Constants.REMOTE_FW_DIR, profile_path, duration, framesize, - rate, warmup_time, _p0 - 1, _p1 - 1, _async, _latency), - timeout=float(duration) + 60) + command, timeout=float(duration) + 60) if int(ret) != 0: raise RuntimeError('TRex stateless runtime error') @@ -446,95 +452,15 @@ class TrafficGenerator(AbstractMeasurer): else: # last line from console output line = stdout.splitlines()[-1] - self._result = line logger.info('TrafficGen result: {0}'.format(self._result)) - self._received = self._result.split(', ')[1].split('=')[1] self._sent = self._result.split(', ')[2].split('=')[1] self._loss = self._result.split(', ')[3].split('=')[1] - self._latency = [] self._latency.append(self._result.split(', ')[4].split('=')[1]) self._latency.append(self._result.split(', ')[5].split('=')[1]) - def trex_stl_start_unidirection( - self, duration, rate, framesize, traffic_type, async_call=False, - latency=False, warmup_time=5.0, tx_port=0, rx_port=1): - """Execute script on remote node over ssh to start unidirection traffic. - The purpose of this function is to support performance test that need to - measure unidirectional traffic, e.g. Load balancer maglev mode and l3dsr - mode test. - - :param duration: Time expresed in seconds for how long to send traffic. - :param rate: Traffic rate expressed with units (pps, %) - :param framesize: L2 frame size to send (without padding and IPG). - :param traffic_type: Module name as a traffic type identifier. - See resources/traffic_profiles/trex for implemented modules. - :param latency: With latency measurement. - :param async_call: If enabled then don't wait for all incomming trafic. - :param warmup_time: Warmup time period. - :param tx_port: Traffic generator transmit port. - :param rx_port: Traffic generator receive port. - :type duration: float - :type rate: str - :type framesize: str - :type traffic_type: str - :type latency: bool - :type async_call: bool - :type warmup_time: float - :type tx_port: integer - :type rx_port: integer - :raises RuntimeError: In case of TG driver issue. - """ - ssh = SSH() - ssh.connect(self._node) - - _latency = "--latency" if latency else "" - _async = "--async" if async_call else "" - - profile_path = ("{0}/resources/traffic_profiles/trex/" - "{1}.py".format(Constants.REMOTE_FW_DIR, - traffic_type)) - (ret, stdout, _) = ssh.exec_command( - "sh -c " - "'{0}/resources/tools/trex/trex_stateless_profile.py " - "--profile {1} " - "--duration {2} " - "--frame_size {3} " - "--rate {4} " - "--warmup_time {5} " - "--port_0 {6} " - "--port_1 {7} " - "{8} " # --async - "{9} " # --latency - "{10}'". # --unidirection - format(Constants.REMOTE_FW_DIR, profile_path, duration, framesize, - rate, warmup_time, tx_port, rx_port, _async, _latency, - "--unidirection"), - timeout=float(duration) + 60) - - if int(ret) != 0: - raise RuntimeError('TRex unidirection runtime error') - elif async_call: - #no result - self._received = None - self._sent = None - self._loss = None - self._latency = None - else: - # last line from console output - line = stdout.splitlines()[-1] - - self._result = line - logger.info('TrafficGen result: {0}'.format(self._result)) - - self._received = self._result.split(', ')[1].split('=')[1] - self._sent = self._result.split(', ')[2].split('=')[1] - self._loss = self._result.split(', ')[3].split('=')[1] - self._latency = [] - self._latency.append(self._result.split(', ')[4].split('=')[1]) - def stop_traffic_on_tg(self): """Stop all traffic on TG. @@ -552,6 +478,19 @@ class TrafficGenerator(AbstractMeasurer): rx_port=1): """Send traffic from all configured interfaces on TG. + Note that bidirectional traffic also contains flows + transmitted from rx_port and received in tx_port. + But some tests use asymmetric traffic, so those arguments are relevant. + + Also note that traffic generator uses DPDK driver which might + reorder port numbers based on wiring and PCI numbering. + This method handles that, so argument values are invariant, + but you can see swapped valued in debug logs. + + TODO: Is it better to have less descriptive argument names + just to make them less probable to be viewed as misleading or confusing? + See https://gerrit.fd.io/r/#/c/17625/11/resources/libraries/python/TrafficGenerator.py@406 + :param duration: Duration of test traffic generation in seconds. :param rate: Offered load per interface (e.g. 1%, 3gbps, 4mpps, ...). :param framesize: Frame size (L2) in Bytes. @@ -560,9 +499,11 @@ class TrafficGenerator(AbstractMeasurer): :param warmup_time: Warmup phase in seconds. :param async_call: Async mode. :param latency: With latency measurement. - :param unidirection: Traffic is unidirectional. - :param tx_port: Traffic generator transmit port. - :param rx_port: Traffic generator receive port. + :param unidirection: Traffic is unidirectional. Default: False + :param tx_port: Traffic generator transmit port for first flow. + Default: 0 + :param rx_port: Traffic generator receive port for first flow. + Default: 1 :type duration: str :type rate: str :type framesize: str @@ -571,8 +512,8 @@ class TrafficGenerator(AbstractMeasurer): :type async_call: bool :type latency: bool :type unidirection: bool - :type tx_port: integer - :type rx_port: integer + :type tx_port: int + :type rx_port: int :returns: TG output. :rtype: str :raises RuntimeError: If TG is not set, or if node is not TG, @@ -590,15 +531,9 @@ class TrafficGenerator(AbstractMeasurer): if node['subtype'] is None: raise RuntimeError('TG subtype not defined') elif node['subtype'] == NodeSubTypeTG.TREX: - if unidirection: - self.trex_stl_start_unidirection(duration, rate, framesize, - traffic_type, tx_port, - rx_port, async_call, latency, - warmup_time) - else: - self.trex_stl_start_remote_exec(duration, rate, framesize, - traffic_type, async_call, - latency, warmup_time) + self.trex_stl_start_remote_exec( + duration, rate, framesize, traffic_type, async_call, latency, + warmup_time, unidirection, tx_port, rx_port) else: raise NotImplementedError("TG subtype not supported") diff --git a/resources/tools/trex/trex_stateless_profile.py b/resources/tools/trex/trex_stateless_profile.py index de29ff505a..61b244e21a 100755 --- a/resources/tools/trex/trex_stateless_profile.py +++ b/resources/tools/trex/trex_stateless_profile.py @@ -1,6 +1,6 @@ #!/usr/bin/python -# Copyright (c) 2017 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -98,43 +98,6 @@ def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0, :type unidirection: bool """ - #unidirection traffic - if unidirection: - send_traffic_unidirection(profile_file, duration, framesize, rate, - warmup_time, port_0, port_1, latency, - async_start) - #bidirection traffic - else: - send_traffic_bidirection(profile_file, duration, framesize, rate, - warmup_time, port_0, port_1, latency, - async_start) - - -def send_traffic_bidirection(profile_file, duration, framesize, rate, - warmup_time, port_0, port_1, latency, - async_start=False): - """Send traffic bidirection and measure packet loss and latency. - - :param profile_file: A python module with T-rex traffic profile. - :param framesize: Frame size. - :param duration: Duration of traffic run in seconds (-1=infinite). - :param rate: Traffic rate [percentage, pps, bps]. - :param warmup_time: Traffic warm-up time in seconds, 0 = disable. - :param port_0: Port 0 on the traffic generator. - :param port_1: Port 1 on the traffic generator. - :param latency: With latency stats. - :param async_start: Start the traffic and exit. - :type profile_file: str - :type framesize: int or str - :type duration: float - :type rate: str - :type warmup_time: float - :type port_0: int - :type port_1: int - :type latency: bool - :type async_start: bool - """ - client = None total_rcvd = 0 total_sent = 0 @@ -167,33 +130,37 @@ def send_traffic_bidirection(profile_file, duration, framesize, rate, resolve=False) if isinstance(framesize, int): client.add_streams(streams[0], ports=[port_0]) - client.add_streams(streams[1], ports=[port_1]) + if not unidirection: + client.add_streams(streams[1], ports=[port_1]) elif isinstance(framesize, str): client.add_streams(streams[0:3], ports=[port_0]) - client.add_streams(streams[3:6], ports=[port_1]) + if not unidirection: + client.add_streams(streams[3:6], ports=[port_1]) if latency: try: if isinstance(framesize, int): client.add_streams(streams[2], ports=[port_0]) - client.add_streams(streams[3], ports=[port_1]) + if not unidirection: + client.add_streams(streams[3], ports=[port_1]) elif isinstance(framesize, str): latency = False except STLError: # Disable latency if NIC does not support requested stream type print("##### FAILED to add latency streams #####") latency = False + ports = [port_0] + if not unidirection: + ports.append(port_1) # Warm-up phase: if warmup_time > 0: # Clear the stats before injecting: client.clear_stats() # Choose rate and start traffic: - client.start(ports=[port_0, port_1], mult=rate, - duration=warmup_time) + client.start(ports=ports, mult=rate, duration=warmup_time) # Block until done: - client.wait_on_traffic(ports=[port_0, port_1], - timeout=warmup_time+30) + client.wait_on_traffic(ports=ports, timeout=warmup_time+30) if client.get_warnings(): for warning in client.get_warnings(): @@ -206,11 +173,15 @@ def send_traffic_bidirection(profile_file, duration, framesize, rate, print(json.dumps(stats, indent=4, separators=(',', ': '), sort_keys=True)) - lost_a = stats[0]["opackets"] - stats[1]["ipackets"] - lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"] + if not unidirection: + lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"] - print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a)) - print("packets lost from 1 --> 0: {0} pkts".format(lost_b)) + print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format( + p_0=port_0, p_1=port_1, v=lost_a)) + if not unidirection: + print("packets lost from {p_1} --> {p_0}: {v} pkts".format( + p_0=port_0, p_1=port_1, v=lost_b)) # Clear the stats before injecting: client.clear_stats() @@ -218,11 +189,11 @@ def send_traffic_bidirection(profile_file, duration, framesize, rate, lost_b = 0 # Choose rate and start traffic: - client.start(ports=[port_0, port_1], mult=rate, duration=duration) + client.start(ports=ports, mult=rate, duration=duration) if not async_start: # Block until done: - client.wait_on_traffic(ports=[port_0, port_1], timeout=duration+30) + client.wait_on_traffic(ports=ports, timeout=duration+30) if client.get_warnings(): for warning in client.get_warnings(): @@ -235,24 +206,33 @@ def send_traffic_bidirection(profile_file, duration, framesize, rate, print(json.dumps(stats, indent=4, separators=(',', ': '), sort_keys=True)) - lost_a = stats[0]["opackets"] - stats[1]["ipackets"] - lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"] + if not unidirection: + lost_b = stats[port_1]["opackets"] - stats[port_0]["ipackets"] if latency: lat_a = fmt_latency( - str(stats["latency"][0]["latency"]["total_min"]), - str(stats["latency"][0]["latency"]["average"]), - str(stats["latency"][0]["latency"]["total_max"])) - lat_b = fmt_latency( - str(stats["latency"][1]["latency"]["total_min"]), - str(stats["latency"][1]["latency"]["average"]), - str(stats["latency"][1]["latency"]["total_max"])) - - total_sent = stats[0]["opackets"] + stats[1]["opackets"] - total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"] - - print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a)) - print("packets lost from 1 --> 0: {0} pkts".format(lost_b)) + str(stats["latency"][port_0]["latency"]["total_min"]), + str(stats["latency"][port_0]["latency"]["average"]), + str(stats["latency"][port_0]["latency"]["total_max"])) + if not unidirection: + lat_b = fmt_latency( + str(stats["latency"][port_1]["latency"]["total_min"]), + str(stats["latency"][port_1]["latency"]["average"]), + str(stats["latency"][port_1]["latency"]["total_max"])) + + if not unidirection: + total_sent = stats[0]["opackets"] + stats[1]["opackets"] + total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"] + else: + total_sent = stats[port_0]["opackets"] + total_rcvd = stats[port_1]["ipackets"] + + print("\npackets lost from {p_0} --> {p_1}: {v} pkts".format( + p_0=port_0, p_1=port_1, v=lost_a)) + if not unidirection: + print("packets lost from {p_1} --> {p_0}: {v} pkts".format( + p_0=port_0, p_1=port_1, v=lost_b)) except STLError as err: sys.stderr.write("{0}\n".format(err)) @@ -272,161 +252,6 @@ def send_traffic_bidirection(profile_file, duration, framesize, rate, lat_a, lat_b)) -def send_traffic_unidirection(profile_file, duration, framesize, rate, - warmup_time, port_0, port_1, latency, - async_start=False): - """Send traffic unidirection and measure packet loss and latency. - - :param profile_file: A python module with T-rex traffic profile. - :param framesize: Frame size. - :param duration: Duration of traffic run in seconds (-1=infinite). - :param rate: Traffic rate [percentage, pps, bps]. - :param warmup_time: Traffic warm-up time in seconds, 0 = disable. - :param port_0: Port 0 on the traffic generator. - :param port_1: Port 1 on the traffic generator. - :param latency: With latency stats. - :param async_start: Start the traffic and exit. - :type profile_file: str - :type framesize: int or str - :type duration: float - :type rate: str - :type warmup_time: float - :type port_0: int - :type port_1: int - :type latency: bool - :type async_start: bool - """ - - client = None - total_rcvd = 0 - total_sent = 0 - lost_a = 0 - lat_a = "-1/-1/-1" - - # Read the profile: - try: - print("### Profile file:\n{}".format(profile_file)) - profile = STLProfile.load(profile_file, direction=0, port_id=0, - framesize=framesize) - streams = profile.get_streams() - except STLError as err: - print("Error while loading profile '{0}' {1}".format(profile_file, err)) - sys.exit(1) - - try: - # Create the client: - client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET) - # Connect to server: - client.connect() - # Prepare our ports: - if port_0 == port_1: - client.reset(ports=[port_0]) - client.remove_all_streams(ports=[port_0]) - - if "macsrc" in profile_file: - client.set_port_attr(ports=[port_0], promiscuous=True, - resolve=False) - else: - client.reset(ports=[port_0, port_1]) - client.remove_all_streams(ports=[port_0, port_1]) - - if "macsrc" in profile_file: - client.set_port_attr(ports=[port_0, port_1], promiscuous=True, - resolve=False) - - if isinstance(framesize, int): - client.add_streams(streams[0], ports=[port_0]) - elif isinstance(framesize, str): - client.add_streams(streams[0:3], ports=[port_0]) - if latency: - try: - if isinstance(framesize, int): - client.add_streams(streams[2], ports=[port_0]) - elif isinstance(framesize, str): - latency = False - except STLError: - # Disable latency if NIC does not support requested stream type - print("##### FAILED to add latency streams #####") - latency = False - - # Warm-up phase: - if warmup_time > 0: - # Clear the stats before injecting: - client.clear_stats() - - # Choose rate and start traffic: - client.start(ports=[port_0], mult=rate, - duration=warmup_time) - - # Block until done: - client.wait_on_traffic(ports=[port_0], - timeout=warmup_time+30) - - if client.get_warnings(): - for warning in client.get_warnings(): - print(warning) - - # Read the stats after the test: - stats = client.get_stats() - - print("##### Warmup statistics #####") - print(json.dumps(stats, indent=4, separators=(',', ': '), - sort_keys=True)) - - lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"] - print("\npackets lost : {0} pkts".format(lost_a)) - - # Clear the stats before injecting: - client.clear_stats() - lost_a = 0 - - # Choose rate and start traffic: - client.start(ports=[port_0], mult=rate, duration=duration) - - if not async_start: - # Block until done: - client.wait_on_traffic(ports=[port_0], timeout=duration+30) - - if client.get_warnings(): - for warning in client.get_warnings(): - print(warning) - - # Read the stats after the test - stats = client.get_stats() - - print("##### Statistics #####") - print(json.dumps(stats, indent=4, separators=(',', ': '), - sort_keys=True)) - - lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"] - - if latency: - lat_a = fmt_latency( - str(stats["latency"][0]["latency"]["total_min"]), - str(stats["latency"][0]["latency"]["average"]), - str(stats["latency"][0]["latency"]["total_max"])) - - total_sent = stats[port_0]["opackets"] - total_rcvd = stats[port_1]["ipackets"] - - print("\npackets lost : {0} pkts".format(lost_a)) - - except STLError as err: - sys.stderr.write("{0}\n".format(err)) - sys.exit(1) - - finally: - if async_start: - if client: - client.disconnect(stop_traffic=False, release_ports=True) - else: - if client: - client.disconnect() - print("rate={0}, totalReceived={1}, totalSent={2}, " - "frameLoss={3}, latencyStream0(usec)={4}". - format(rate, total_rcvd, total_sent, lost_a, lat_a)) - - def main(): """Main function for the traffic generator using T-rex. |