diff options
author | Peter Mikus <pmikus@cisco.com> | 2016-05-20 11:45:13 +0200 |
---|---|---|
committer | Miroslav Miklus <mmiklus@cisco.com> | 2016-07-19 12:32:58 +0000 |
commit | bb69fe7a166277018230dfe79900f0cf1603a5d3 (patch) | |
tree | 1fb5192b4446553aef62cb330f1cfdc6b1290246 /resources | |
parent | f7feaf7804f267c9d7880917f6baf9d1bdb21584 (diff) |
CSIT-102: Add latency measurement to performance testing
- Add latency stream as a background stream in both directions
- Latency background stream is not using the VM transformation engine.
Raw stream with single packet is created.
- Latency background stream has 1kpps rate with packet of same size as
traffic stream.
- Display latency values (min/avg/max) in results of TC and reporting
remaining data including histogram and jitter inside of the
search/pass keyword.
Change-Id: I78ce4659b57caab08d5729f51a1e14d518fd3273
Signed-off-by: Peter Mikus <pmikus@cisco.com>
Signed-off-by: pmikus <pmikus@cisco.com>
Diffstat (limited to 'resources')
-rw-r--r-- | resources/libraries/python/DropRateSearch.py | 6 | ||||
-rw-r--r-- | resources/libraries/python/TrafficGenerator.py | 56 | ||||
-rw-r--r-- | resources/libraries/robot/performance.robot | 37 | ||||
-rwxr-xr-x | resources/tools/t-rex/t-rex-stateless.py | 150 |
4 files changed, 177 insertions, 72 deletions
diff --git a/resources/libraries/python/DropRateSearch.py b/resources/libraries/python/DropRateSearch.py index 1f8e5618fe..b0f15b25db 100644 --- a/resources/libraries/python/DropRateSearch.py +++ b/resources/libraries/python/DropRateSearch.py @@ -453,14 +453,14 @@ class DropRateSearch(object): def verify_search_result(self): """Fail if search was not successful. - :return: Result rate. - :rtype: float + :return: Result rate and latency stats. + :rtype: tuple """ if self._search_result == SearchResults.FAILURE: raise Exception('Search FAILED') elif self._search_result in [SearchResults.SUCCESS, SearchResults.SUSPICIOUS]: - return self._search_result_rate + return self._search_result_rate, self._latency_stats def binary_search(self, b_min, b_max, traffic_type, skip_max_rate=False): """Binary search of rate with loss below acceptance criteria. diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py index 91a43fb0ca..33ff597c8e 100644 --- a/resources/libraries/python/TrafficGenerator.py +++ b/resources/libraries/python/TrafficGenerator.py @@ -46,7 +46,10 @@ class TGDropRateSearchImpl(DropRateSearch): unit_rate = str(rate) + self.get_rate_type_str() tg_instance.trex_stl_start_remote_exec(self.get_duration(), unit_rate, frame_size, - traffic_type, False) + traffic_type) + # Get latency stats from stream + self._latency_stats = tg_instance.get_latency() + loss = tg_instance.get_loss() sent = tg_instance.get_sent() if self.loss_acceptance_type_is_percentage(): @@ -74,6 +77,7 @@ class TrafficGenerator(object): self._result = None self._loss = None self._sent = None + self._latency = None self._received = None self._node = None # T-REX interface order mapping @@ -103,6 +107,14 @@ class TrafficGenerator(object): """ return self._received + def get_latency(self): + """Return min/avg/max latency. + + :return: Latency stats. + :rtype: list + """ + return self._latency + #pylint: disable=too-many-arguments, too-many-locals def initialize_traffic_generator(self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if, @@ -273,7 +285,8 @@ class TrafficGenerator(object): raise RuntimeError('T-rex stateless runtime error') def trex_stl_start_remote_exec(self, duration, rate, framesize, - traffic_type, async_call, warmup_time=5): + traffic_type, async_call=False, + latency=True, warmup_time=5): """Execute script on remote node over ssh to start traffic. :param duration: Time expresed in seconds for how long to send traffic. @@ -281,12 +294,14 @@ class TrafficGenerator(object): :param framesize: L2 frame size to send (without padding and IPG). :param traffic_type: Traffic profile. :param async_call: If enabled then don't wait for all incomming trafic. + :param latency: With latency measurement. :param warmup_time: Warmup time period. :type duration: int :type rate: str :type framesize: int :type traffic_type: str :type async_call: bool + :type latency: bool :type warmup_time: int :return: Nothing """ @@ -295,10 +310,9 @@ class TrafficGenerator(object): _p0 = 1 _p1 = 2 - _async = "" + _async = "--async" if async_call else "" + _latency = "--latency" if latency else "" - if async_call: - _async = "--async" if self._ifaces_reordered != 0: _p0, _p1 = _p1, _p0 @@ -312,9 +326,10 @@ class TrafficGenerator(object): "--p{5}_src_start_ip 20.20.20.1 " "--p{5}_src_end_ip 20.20.20.254 " "--p{5}_dst_start_ip 10.10.10.1 " - "{6} --warmup_time={7}'".format(Constants.REMOTE_FW_DIR, - duration, rate, framesize, _p0, - _p1, _async, warmup_time), + "{6} {7} --warmup_time={8}'".format(Constants.REMOTE_FW_DIR, + duration, rate, framesize, + _p0, _p1, _async, _latency, + warmup_time), timeout=int(duration)+60) elif traffic_type in ["3-node-IPv4"]: (ret, stdout, stderr) = ssh.exec_command( @@ -326,9 +341,10 @@ class TrafficGenerator(object): "--p{5}_src_start_ip 20.20.20.2 " "--p{5}_src_end_ip 20.20.20.254 " "--p{5}_dst_start_ip 10.10.10.2 " - "{6} --warmup_time={7}'".format(Constants.REMOTE_FW_DIR, - duration, rate, framesize, _p0, - _p1, _async, warmup_time), + "{6} {7} --warmup_time={8}'".format(Constants.REMOTE_FW_DIR, + duration, rate, framesize, + _p0, _p1, _async, _latency, + warmup_time), timeout=int(duration)+60) elif traffic_type in ["3-node-IPv6"]: (ret, stdout, stderr) = ssh.exec_command( @@ -340,9 +356,10 @@ class TrafficGenerator(object): "--p{5}_src_start_ip 2001:2::2 " "--p{5}_src_end_ip 2001:2::FE " "--p{5}_dst_start_ip 2001:1::2 " - "{6} --warmup_time={7}'".format(Constants.REMOTE_FW_DIR, - duration, rate, framesize, _p0, - _p1, _async, warmup_time), + "{6} {7} --warmup_time={8}'".format(Constants.REMOTE_FW_DIR, + duration, rate, framesize, + _p0, _p1, _async, _latency, + warmup_time), timeout=int(duration)+60) else: raise NotImplementedError('Unsupported traffic type') @@ -358,6 +375,7 @@ class TrafficGenerator(object): self._received = None self._sent = None self._loss = None + self._latency = None else: # last line from console output line = stdout.splitlines()[-1] @@ -368,6 +386,9 @@ class TrafficGenerator(object): self._received = self._result.split(', ')[1].split('=')[1] self._sent = self._result.split(', ')[2].split('=')[1] self._loss = self._result.split(', ')[3].split('=')[1] + self._latency = [] + self._latency.append(self._result.split(', ')[4].split('=')[1]) + self._latency.append(self._result.split(', ')[5].split('=')[1]) def stop_traffic_on_tg(self): """Stop all traffic on TG @@ -380,17 +401,20 @@ class TrafficGenerator(object): self.trex_stl_stop_remote_exec(self._node) def send_traffic_on_tg(self, duration, rate, framesize, - traffic_type, warmup_time=5, async_call=False): + traffic_type, warmup_time=5, async_call=False, + latency=True): """Send traffic from all configured interfaces on TG. :param duration: Duration of test traffic generation in seconds. :param rate: Offered load per interface (e.g. 1%, 3gbps, 4mpps, ...). :param framesize: Frame size (L2) in Bytes. :param traffic_type: Traffic profile. + :param latency: With latency measurement. :type duration: str :type rate: str :type framesize: str :type traffic_type: str + :type latency: bool :return: TG output. :rtype: str """ @@ -406,7 +430,7 @@ class TrafficGenerator(object): raise Exception('TG subtype not defined') elif node['subtype'] == NodeSubTypeTG.TREX: self.trex_stl_start_remote_exec(duration, rate, framesize, - traffic_type, async_call, + traffic_type, async_call, latency, warmup_time=warmup_time) else: raise NotImplementedError("TG subtype not supported") diff --git a/resources/libraries/robot/performance.robot b/resources/libraries/robot/performance.robot index 4dcd84e223..63295b7159 100644 --- a/resources/libraries/robot/performance.robot +++ b/resources/libraries/robot/performance.robot @@ -343,8 +343,9 @@ | | Set Search Frame Size | ${framesize} | | Set Search Rate Type pps | | Linear Search | ${start_rate} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of NDR search | ${rate_per_stream} | ${framesize} | 2 +| | ... | ${latency} | | Traffic should pass with no loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | fail_on_loss=${False} @@ -383,9 +384,10 @@ | | Run Keyword If | '${loss_acceptance_type}' == 'percentage' | | ... | Set Loss Acceptance Type Percentage | | Linear Search | ${start_rate} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of PDR search | ${rate_per_stream} | ${framesize} | 2 | | ... | ${loss_acceptance} | ${loss_acceptance_type} +| | ... | ${latency} | | Traffic should pass with partial loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | ${loss_acceptance} @@ -421,8 +423,9 @@ | | Set Search Rate Type pps | | Set Binary Convergence Threshold | ${threshold} | | Binary Search | ${binary_min} | ${binary_max} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of NDR search | ${rate_per_stream} | ${framesize} | 2 +| | ... | ${latency} | | Traffic should pass with no loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | fail_on_loss=${False} @@ -463,9 +466,10 @@ | | ... | Set Loss Acceptance Type Percentage | | Set Binary Convergence Threshold | ${threshold} | | Binary Search | ${binary_min} | ${binary_max} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of PDR search | ${rate_per_stream} | ${framesize} | 2 | | ... | ${loss_acceptance} | ${loss_acceptance_type} +| | ... | ${latency} | | Traffic should pass with partial loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | ${loss_acceptance} @@ -502,8 +506,9 @@ | | Set Search Rate Type pps | | Set Binary Convergence Threshold | ${threshold} | | Combined Search | ${start_rate} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of NDR search | ${rate_per_stream} | ${framesize} | 2 +| | ... | ${latency} | | Traffic should pass with no loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | fail_on_loss=${False} @@ -546,9 +551,10 @@ | | ... | Set Loss Acceptance Type Percentage | | Set Binary Convergence Threshold | ${threshold} | | Combined Search | ${start_rate} | ${topology_type} -| | ${rate_per_stream}= | Verify Search Result +| | ${rate_per_stream} | ${latency}= | Verify Search Result | | Display result of PDR search | ${rate_per_stream} | ${framesize} | 2 | | ... | ${loss_acceptance} | ${loss_acceptance_type} +| | ... | ${latency} | | Traffic should pass with partial loss | ${duration} | ${rate_per_stream}pps | | ... | ${framesize} | ${topology_type} | | ... | ${loss_acceptance} @@ -563,14 +569,17 @@ | | ... | - ${rate_per_stream} - Measured rate per stream [pps]. Type: string | | ... | - ${framesize} - L2 Frame Size [B]. Type: integer | | ... | - ${nr_streams} - Total number of streams. Type: integer +| | ... | - ${latency} - Latency stats. Type: dictionary | | ... | | ... | *Return:* | | ... | - No value returned | | ... | | ... | *Example:* | | ... -| | ... | \| Display result of NDR search \| 4400000 \| 64 \| 2 +| | ... | \| Display result of NDR search \| 4400000 \| 64 \| 2 \ +| | ... | \| (0, 10/10/10) \| | | [Arguments] | ${rate_per_stream} | ${framesize} | ${nr_streams} +| | ... | ${latency} | | ${rate_total}= | Evaluate | ${rate_per_stream}*${nr_streams} | | ${bandwidth_total}= | Evaluate | ${rate_total}*(${framesize}+20)*8/(10**9) | | Set Test Message | FINAL_RATE: ${rate_total} pps @@ -578,6 +587,9 @@ | | ... | append=yes | | Set Test Message | ${\n}FINAL_BANDWIDTH: ${bandwidth_total} Gbps (untagged) | | ... | append=yes +| | :FOR | ${idx} | ${lat} | IN ENUMERATE | @{latency} +| | | Set Test Message | ${\n}LATENCY_STREAM_${idx}: ${lat} usec (min/avg/max) +| | ... | append=yes | Display result of PDR search | | [Documentation] | Display result of PDR search in packet per seconds (total @@ -589,6 +601,7 @@ | | ... | - ${nr_streams} - Total number of streams. Type: integer | | ... | - ${loss_acceptance} - Accepted loss during search. Type: float | | ... | - ${loss_acceptance_type} - Percentage or frames. Type: string +| | ... | - ${latency} - Latency stats. Type: dictionary | | ... | | ... | *Return:* | | ... | - No value returned @@ -596,9 +609,9 @@ | | ... | *Example:* | | ... | | ... | \| Display result of PDR search \| 4400000 \| 64 \| 2 \| 0.5 \ -| | ... | \| percentage +| | ... | \| percentage \| (0, 10/10/10) \| | | [Arguments] | ${rate_per_stream} | ${framesize} | ${nr_streams} -| | ... | ${loss_acceptance} | ${loss_acceptance_type} +| | ... | ${loss_acceptance} | ${loss_acceptance_type} | ${latency} | | ${rate_total}= | Evaluate | ${rate_per_stream}*${nr_streams} | | ${bandwidth_total}= | Evaluate | ${rate_total}*(${framesize}+20)*8/(10**9) | | Set Test Message | FINAL_RATE: ${rate_total} pps @@ -606,6 +619,9 @@ | | ... | append=yes | | Set Test Message | ${\n}FINAL_BANDWIDTH: ${bandwidth_total} Gbps (untagged) | | ... | append=yes +| | :FOR | ${idx} | ${lat} | IN ENUMERATE | @{latency} +| | | Set Test Message | ${\n}LATENCY_STREAM_${idx}: ${lat} usec (min/avg/max) +| | ... | append=yes | | Set Test Message | ${\n}LOSS_ACCEPTANCE: ${loss_acceptance} ${loss_acceptance_type} | | ... | append=yes @@ -670,7 +686,8 @@ | Clear and show runtime counters with running traffic | | [Arguments] | ${duration} | ${rate} | ${framesize} | ${topology_type} | | Send traffic on tg | -1 | ${rate} | ${framesize} -| | ... | ${topology_type} | warmup_time=0 | async_call=True +| | ... | ${topology_type} | warmup_time=0 | async_call=${True} +| | ... | latency=${False} | | Clear runtime counters on all DUTs | | Sleep | ${duration} | | Show runtime counters on all DUTs diff --git a/resources/tools/t-rex/t-rex-stateless.py b/resources/tools/t-rex/t-rex-stateless.py index 77895d7b68..7ff4c3713c 100755 --- a/resources/tools/t-rex/t-rex-stateless.py +++ b/resources/tools/t-rex/t-rex-stateless.py @@ -109,7 +109,7 @@ def create_packets(traffic_options, frame_size=64): :type traffic_options: list :type frame_size: int :return: Packet instances. - :rtype STLPktBuilder + :rtype: Tuple of STLPktBuilder """ if frame_size < 64: @@ -150,11 +150,15 @@ def create_packets(traffic_options, frame_size=64): ], split_by_field="src") pkt_a = STLPktBuilder(pkt=base_pkt_a/generate_payload( - fsize_no_fcs-len(base_pkt_a)), vm=vm1) + max(0, fsize_no_fcs-len(base_pkt_a))), vm=vm1) pkt_b = STLPktBuilder(pkt=base_pkt_b/generate_payload( - fsize_no_fcs-len(base_pkt_b)), vm=vm2) + max(0, fsize_no_fcs-len(base_pkt_b))), vm=vm2) + lat_a = STLPktBuilder(pkt=base_pkt_a/generate_payload( + max(0, fsize_no_fcs-len(base_pkt_a)))) + lat_b = STLPktBuilder(pkt=base_pkt_b/generate_payload( + max(0, fsize_no_fcs-len(base_pkt_b)))) - return(pkt_a, pkt_b) + return(pkt_a, pkt_b, lat_a, lat_b) def create_packets_v6(traffic_options, frame_size=78): @@ -165,7 +169,7 @@ def create_packets_v6(traffic_options, frame_size=78): :type traffic_options: List :type frame_size: int :return: Packet instances. - :rtype STLPktBuilder + :rtype: Tuple of STLPktBuilder """ if frame_size < 78: @@ -213,25 +217,36 @@ def create_packets_v6(traffic_options, frame_size=78): max(0, fsize_no_fcs-len(base_pkt_a))), vm=vm1) pkt_b = STLPktBuilder(pkt=base_pkt_b/generate_payload( max(0, fsize_no_fcs-len(base_pkt_b))), vm=vm2) + lat_a = STLPktBuilder(pkt=base_pkt_a/generate_payload( + max(0, fsize_no_fcs-len(base_pkt_a)))) + lat_b = STLPktBuilder(pkt=base_pkt_b/generate_payload( + max(0, fsize_no_fcs-len(base_pkt_b)))) - return(pkt_a, pkt_b) + return(pkt_a, pkt_b, lat_a, lat_b) -def simple_burst(pkt_a, pkt_b, duration, rate, warmup_time, async_start): +def simple_burst(pkt_a, pkt_b, pkt_lat_a, pkt_lat_b, duration, rate, + warmup_time, async_start, latency): """Run the traffic with specific parameters. :param pkt_a: Base packet for stream 1. :param pkt_b: Base packet for stream 2. + :param pkt_lat_a: Base packet for latency stream 1. + :param pkt_lat_b: Base packet for latency stream 2. :param duration: Duration of traffic run in seconds (-1=infinite). :param rate: Rate of traffic run [percentage, pps, bps]. :param warmup_time: Warm up duration. - :async_start: Start the traffic and exit + :param async_start: Start the traffic and exit. + :param latency: With latency stats. :type pkt_a: STLPktBuilder :type pkt_b: STLPktBuilder + :type pkt_lat_a: STLPktBuilder + :type pkt_lat_b: STLPktBuilder :type duration: int :type rate: string :type warmup_time: int :type async_start: bool + :type latency: bool :return: nothing """ @@ -242,37 +257,60 @@ def simple_burst(pkt_a, pkt_b, duration, rate, warmup_time, async_start): total_sent = 0 lost_a = 0 lost_b = 0 + lat_a = 'NA' + lat_b = 'NA' try: # turn this off if too many logs #client.set_verbose("high") - # create two streams - stream1 = STLStream(packet=pkt_a, - mode=STLTXCont(pps=100)) - - # second stream with a phase of 10ns (inter stream gap) - stream2 = STLStream(packet=pkt_b, - isg=10.0, - mode=STLTXCont(pps=100)) - # connect to server client.connect() # prepare our ports (my machine has 0 <--> 1 with static route) client.reset(ports=[0, 1]) - # add both streams to ports + # create two traffic streams without latency stats + stream1 = STLStream(packet=pkt_a, + mode=STLTXCont(pps=1000)) + # second traffic stream with a phase of 10ns (inter stream gap) + stream2 = STLStream(packet=pkt_b, + isg=10.0, + mode=STLTXCont(pps=1000)) client.add_streams(stream1, ports=[0]) client.add_streams(stream2, ports=[1]) + if latency: + # create two traffic streams with latency stats + lat_stream1 = STLStream(packet=pkt_lat_a, + flow_stats=STLFlowLatencyStats(pg_id=0), + mode=STLTXCont(pps=1000)) + # second traffic stream with a phase of 10ns (inter stream gap) + lat_stream2 = STLStream(packet=pkt_lat_b, + isg=10.0, + flow_stats=STLFlowLatencyStats(pg_id=1), + mode=STLTXCont(pps=1000)) + client.add_streams(lat_stream1, ports=[0]) + client.add_streams(lat_stream2, ports=[1]) + #warmup phase if warmup_time > 0: + # clear the stats before injecting client.clear_stats() + + # choose rate and start traffic client.start(ports=[0, 1], mult=rate, duration=warmup_time) + + # block until done client.wait_on_traffic(ports=[0, 1], timeout=(warmup_time+30)) + + if client.get_warnings(): + for warning in client.get_warnings(): + print_error(warning) + + # read the stats after the test stats = client.get_stats() - print stats + print "#####warmup statistics#####" print json.dumps(stats, indent=4, separators=(',', ': '), sort_keys=True) @@ -282,11 +320,8 @@ def simple_burst(pkt_a, pkt_b, duration, rate, warmup_time, async_start): print "\npackets lost from 0 --> 1: {0} pkts".format(lost_a) print "packets lost from 1 --> 0: {0} pkts".format(lost_b) - # clear the stats before injecting client.clear_stats() - total_rcvd = 0 - total_sent = 0 lost_a = 0 lost_b = 0 @@ -297,16 +332,29 @@ def simple_burst(pkt_a, pkt_b, duration, rate, warmup_time, async_start): # block until done client.wait_on_traffic(ports=[0, 1], timeout=(duration+30)) + if client.get_warnings(): + for warning in client.get_warnings(): + print_error(warning) + # read the stats after the test stats = client.get_stats() print "#####statistics#####" print json.dumps(stats, indent=4, separators=(',', ': '), sort_keys=True) - lost_a = stats[0]["opackets"] - stats[1]["ipackets"] lost_b = stats[1]["opackets"] - stats[0]["ipackets"] + if latency: + lat_a = "/".join((\ + str(stats["latency"][0]["latency"]["total_min"]),\ + str(stats["latency"][0]["latency"]["average"]),\ + str(stats["latency"][0]["latency"]["total_max"]))) + lat_b = "/".join((\ + str(stats["latency"][1]["latency"]["total_min"]),\ + str(stats["latency"][1]["latency"]["average"]),\ + str(stats["latency"][1]["latency"]["total_max"]))) + total_sent = stats[0]["opackets"] + stats[1]["opackets"] total_rcvd = stats[0]["ipackets"] + stats[1]["ipackets"] @@ -322,8 +370,11 @@ def simple_burst(pkt_a, pkt_b, duration, rate, warmup_time, async_start): client.disconnect(stop_traffic=False, release_ports=True) else: client.disconnect() - print "rate={0}, totalReceived={1}, totalSent={2}, frameLoss={3}"\ - .format(rate, total_rcvd, total_sent, lost_a+lost_b) + print "rate={0}, totalReceived={1}, totalSent={2}, "\ + "frameLoss={3}, latencyStream0(usec)={4}, "\ + "latencyStream1(usec)={5}".format(rate, total_rcvd,\ + total_sent, lost_a+lost_b, lat_a, lat_b) + def print_error(msg): """Print error message on stderr. @@ -336,16 +387,12 @@ def print_error(msg): sys.stderr.write(msg+'\n') -def main(): - """Main function.""" +def parse_args(): + """Parse arguments from cmd line. - _traffic_options = {} - #default L3 profile is IPv4 - _use_ipv6 = False - #default warmup time is 5 seconds - _warmup_time = 5 - #default behaviour of this script is sychronous - _async_call = False + :return: Parsed arguments. + :rtype ArgumentParser + """ parser = argparse.ArgumentParser() parser.add_argument("-d", "--duration", required=True, type=int, @@ -355,10 +402,16 @@ def main(): parser.add_argument("-r", "--rate", required=True, help="Traffic rate with included units (%, pps)") parser.add_argument("-6", "--use_IPv6", action="store_true", + default=False, help="Use IPv6 traffic profile instead of IPv4") parser.add_argument("--async", action="store_true", + default=False, help="Non-blocking call of the script") + parser.add_argument("--latency", action="store_true", + default=False, + help="Add latency stream") parser.add_argument("-w", "--warmup_time", type=int, + default=5, help="Traffic warmup time in seconds, 0 = disable") # parser.add_argument("--p1_src_mac", # help="Port 1 source MAC address") @@ -385,29 +438,40 @@ def main(): # parser.add_argument("--p2_dst_end_ip", # help="Port 2 destination end IP address") - args = parser.parse_args() + return parser.parse_args() + + +def main(): + """Main function.""" + + args = parse_args() _duration = args.duration _frame_size = args.frame_size _rate = args.rate _use_ipv6 = args.use_IPv6 _async_call = args.async + _latency = args.latency + _warmup_time = args.warmup_time - if args.warmup_time is not None: - _warmup_time = args.warmup_time - + _traffic_options = {} for attr in [a for a in dir(args) if a.startswith('p')]: if getattr(args, attr) is not None: _traffic_options[attr] = getattr(args, attr) if _use_ipv6: - pkt_a, pkt_b = create_packets_v6(_traffic_options, - frame_size=_frame_size) + # WARNING: Trex limitation to IPv4 only. IPv6 is not yet supported. + print_error('IPv6 latency is not supported yet. Running without lat.') + _latency = False + + pkt_a, pkt_b, lat_a, lat_b = create_packets_v6(_traffic_options, + frame_size=_frame_size) else: - pkt_a, pkt_b = create_packets(_traffic_options, - frame_size=_frame_size) + pkt_a, pkt_b, lat_a, lat_b = create_packets(_traffic_options, + frame_size=_frame_size) - simple_burst(pkt_a, pkt_b, _duration, _rate, _warmup_time, _async_call) + simple_burst(pkt_a, pkt_b, lat_a, lat_b, _duration, _rate, _warmup_time, + _async_call, _latency) if __name__ == "__main__": sys.exit(main()) |