aboutsummaryrefslogtreecommitdiffstats
path: root/GPL/tools/trex
diff options
context:
space:
mode:
Diffstat (limited to 'GPL/tools/trex')
-rw-r--r--GPL/tools/trex/trex_astf_assert.py22
-rw-r--r--GPL/tools/trex/trex_astf_profile.py455
-rw-r--r--GPL/tools/trex/trex_astf_stop.py96
-rw-r--r--GPL/tools/trex/trex_stl_assert.py27
-rw-r--r--GPL/tools/trex/trex_stl_profile.py287
-rw-r--r--GPL/tools/trex/trex_stl_stop.py98
6 files changed, 494 insertions, 491 deletions
diff --git a/GPL/tools/trex/trex_astf_assert.py b/GPL/tools/trex/trex_astf_assert.py
index d6d74bcff8..107253f5f0 100644
--- a/GPL/tools/trex/trex_astf_assert.py
+++ b/GPL/tools/trex/trex_astf_assert.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2022 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,9 +38,9 @@ Functionality:
import sys
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, u"/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient, TRexError
def main():
diff --git a/GPL/tools/trex/trex_astf_profile.py b/GPL/tools/trex/trex_astf_profile.py
index 0542486105..44d81e92f9 100644
--- a/GPL/tools/trex/trex_astf_profile.py
+++ b/GPL/tools/trex/trex_astf_profile.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2023 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +25,7 @@
"""This module gets T-Rex advanced stateful (astf) traffic profile together
with other parameters, reads the profile and sends the traffic. At the end, it
-measures the packet loss and latency.
+parses for various counters.
"""
import argparse
@@ -24,9 +34,9 @@ import sys
import time
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, u"/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient, ASTFProfile, TRexError
def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
@@ -60,8 +70,18 @@ def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
def simple_burst(
- profile_file, duration, framesize, mult, warmup_time, port_0, port_1,
- latency, async_start=False, traffic_directions=2):
+ profile_file,
+ duration,
+ framesize,
+ n_data_frames,
+ multiplier,
+ port_0,
+ port_1,
+ latency,
+ async_start=False,
+ traffic_directions=2,
+ delay=0.0,
+ ):
"""Send traffic and measure packet loss and latency.
Procedure:
@@ -70,38 +90,54 @@ def simple_burst(
- resets the ports,
- removes all existing streams,
- adds streams from the traffic profile to the ports,
- - if the warm-up time is more than 0, sends the warm-up traffic, reads the
- statistics,
- clears the statistics from the client,
- starts the traffic,
- waits for the defined time (or runs forever if async mode is defined),
- - stops the traffic,
+ - explicitly stops the traffic,
- reads and displays the statistics and
- disconnects from the client.
+ Duration details:
+ Contrary to stateless mode, ASTF profiles typically limit the number
+ of flows/transactions that can happen.
+ The caller is expected to set the duration parameter to idealized value,
+ but set the delay arguments when TRex is expected
+ to finish processing replies later (including a window for latency).
+ See *_traffic_duration output fields for TRex's measurement
+ of the real traffic duration (should be without any inactivity overheads).
+ If traffic has not ended by the final time, the traffic
+ is stopped explicitly, counters reflect the state just after the stop.
+
+ TODO: Support tests which focus only on some transaction phases,
+ e.g. TCP tests ignoring init and teardown separated by delays.
+ Currently, approximated time measures the whole traffic duration.
+
:param profile_file: A python module with T-rex traffic profile.
- :param duration: Duration of traffic run in seconds (-1=infinite).
+ :param duration: Expected duration for all transactions to finish,
+ without any TRex related delays, without even latency.
:param framesize: Frame size.
- :param mult: Multiplier of profile CPS.
- :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
+ :param n_data_frames: Controls "size" of transaction for TPUT tests.
+ :param multiplier: Multiplier of profile CPS.
:param port_0: Port 0 on the traffic generator.
:param port_1: Port 1 on the traffic generator.
:param latency: With latency stats.
:param async_start: Start the traffic and exit.
:param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
+ :param delay: Time increase [s] for sleep duration.
:type profile_file: str
:type duration: float
:type framesize: int or str
- :type mult: int
- :type warmup_time: float
+ :type n_data_frames: int
+ :type multiplier: int
:type port_0: int
:type port_1: int
:type latency: bool
:type async_start: bool
:type traffic_directions: int
+ :type delay: float
"""
client = None
- total_rcvd = 0
+ total_received = 0
total_sent = 0
lost_a = 0
lost_b = 0
@@ -111,7 +147,6 @@ def simple_burst(
lat_b_hist = u""
l7_data = u""
stats = dict()
- stats_sampling = 1.0
approximated_duration = 0
# Read the profile.
@@ -119,7 +154,11 @@ def simple_burst(
# TODO: key-values pairs to the profile file
# - ips ?
print(f"### Profile file:\n{profile_file}")
- profile = ASTFProfile.load(profile_file, framesize=framesize)
+ profile = ASTFProfile.load(
+ profile_file,
+ framesize=framesize,
+ n_data_frames=n_data_frames,
+ )
except TRexError:
print(f"Error while loading profile '{profile_file}'!")
raise
@@ -135,40 +174,7 @@ def simple_burst(
# Load the profile.
client.load_profile(profile)
- ports = [port_0]
- if traffic_directions > 1:
- ports.append(port_1)
-
- # Warm-up phase.
- if warmup_time > 0:
- # Clear the stats before injecting.
- client.clear_stats()
- # Choose CPS and start traffic.
- client.start(mult=mult, duration=warmup_time)
- time_start = time.monotonic()
-
- # Read the stats after the warmup duration (no sampling needed).
- time.sleep(warmup_time)
- stats[time.monotonic()-time_start] = client.get_stats()
-
- if client.get_warnings():
- for warning in client.get_warnings():
- print(warning)
-
- client.reset()
-
- print(u"##### Warmup Statistics #####")
- print(json.dumps(stats, indent=4, separators=(u",", u": ")))
-
- # TODO: check stats format
- stats = stats[sorted(stats.keys())[-1]]
- lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
-
- print(f"packets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ ports = [port_0, port_1]
# Clear the stats before injecting.
lost_a = 0
@@ -177,60 +183,49 @@ def simple_burst(
# Choose CPS and start traffic.
client.start(
- mult=mult, duration=duration, nc=True,
- latency_pps=mult if latency else 0, client_mask=2**len(ports)-1
+ mult=multiplier,
+ duration=duration,
+ nc=True,
+ latency_pps=int(multiplier) if latency else 0,
+ client_mask=2**len(ports)-1,
)
- time_start = time.monotonic()
- # t-rex starts the packet flow with the delay
- stats[time.monotonic()-time_start] = client.get_stats(ports=[port_0])
- while stats[sorted(stats.keys())[-1]][port_0][u"opackets"] == 0:
- stats.clear()
- time.sleep(0.001)
- stats[time.monotonic() - time_start] = \
- client.get_stats(ports=[port_0])
- else:
- trex_start_time = list(sorted(stats.keys()))[-1]
- time_start += trex_start_time
- stats.clear()
+ time_stop = time.monotonic() + duration + delay
if async_start:
# For async stop, we need to export the current snapshot.
xsnap0 = client.ports[port_0].get_xstats().reference_stats
print(f"Xstats snapshot 0: {xsnap0!r}")
- if traffic_directions > 1:
- xsnap1 = client.ports[port_1].get_xstats().reference_stats
- print(f"Xstats snapshot 1: {xsnap1!r}")
+ xsnap1 = client.ports[port_1].get_xstats().reference_stats
+ print(f"Xstats snapshot 1: {xsnap1!r}")
else:
- time.sleep(
- stats_sampling if stats_sampling < duration else duration
+ time.sleep(duration + delay)
+ # Do not block yet, the existing transactions may take long time
+ # to finish. We need an action that is almost reset(),
+ # but without clearing stats.
+ client.stop(block=False)
+ client.stop_latency()
+ client.remove_rx_queue(client.get_all_ports())
+ # Now we can wait for the real traffic stop.
+ client.stop(block=True)
+
+ # Read the stats after the traffic stopped (or time up).
+ stats[time.monotonic() - time_stop] = client.get_stats(
+ ports=ports
)
- # Do not block until done.
- while client.is_traffic_active(ports=ports):
- # Sample the stats.
- stats[time.monotonic()-time_start] = \
- client.get_stats(ports=ports)
- time.sleep(
- stats_sampling if stats_sampling < duration else duration
- )
- else:
- # Read the stats after the test
- stats[time.monotonic()-time_start] = \
- client.get_stats(ports=ports)
if client.get_warnings():
for warning in client.get_warnings():
print(warning)
- client.reset()
+ # No profile cleanup here, reset will be done in the finally block.
print(u"##### Statistics #####")
print(json.dumps(stats, indent=4, separators=(u",", u": ")))
- approximated_duration = list(sorted(stats.keys()))[-1]
+ approximated_duration = duration + list(sorted(stats.keys()))[-1]
stats = stats[sorted(stats.keys())[-1]]
lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
+ lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
# TODO: Latency measurement not used at this phase. This part will
# be aligned in another commit.
@@ -250,126 +245,126 @@ def simple_burst(
str(lat_obj[u"max_usec"]), u"-")
lat_b_hist = str(lat_obj[u"histogram"])
- if traffic_directions > 1:
- total_sent = \
- stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
- total_rcvd = \
- stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
- client_stats = stats[u"traffic"][u"client"]
- server_stats = stats[u"traffic"][u"server"]
- # Some zero counters are not sent
- # Active and established flows UDP/TCP
+ total_sent = \
+ stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
+ total_received = \
+ stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
+ client_sent = stats[port_0][u"opackets"]
+ client_received = stats[port_0][u"ipackets"]
+ client_stats = stats[u"traffic"][u"client"]
+ server_stats = stats[u"traffic"][u"server"]
+ # Some zero counters are not sent
+ # Active and established flows UDP/TCP
+ # Client
+ c_act_flows = client_stats[u"m_active_flows"]
+ c_est_flows = client_stats[u"m_est_flows"]
+ c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
+ l7_data = f"client_active_flows={c_act_flows}; "
+ l7_data += f"client_established_flows={c_est_flows}; "
+ l7_data += f"client_traffic_duration={c_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={c_err_rx_throttled}; "
+ # Number of client side flows that were not opened
+ # due to flow-table overflow
+ c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
+ l7_data += f"client_err_nf_throttled={c_err_nf_throttled}; "
+ # Too many flows
+ c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
+ l7_data += f"client_err_flow_overflow={c_err_flow_overflow}; "
+ # Server
+ s_act_flows = server_stats[u"m_active_flows"]
+ s_est_flows = server_stats[u"m_est_flows"]
+ s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
+ l7_data += f"server_active_flows={s_act_flows}; "
+ l7_data += f"server_established_flows={s_est_flows}; "
+ l7_data += f"server_traffic_duration={s_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={s_err_rx_throttled}; "
+ if u"udp" in profile_file:
# Client
- c_act_flows = client_stats[u"m_active_flows"]
- c_est_flows = client_stats[u"m_est_flows"]
- c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
- l7_data = f"client_active_flows={c_act_flows}, "
- l7_data += f"client_established_flows={c_est_flows}, "
- l7_data += f"client_traffic_duration={c_traffic_duration}, "
- # Possible errors
- # Too many packets in NIC rx queue
- c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={c_err_rx_throttled}, "
- # Number of client side flows that were not opened
- # due to flow-table overflow
- c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
- l7_data += f"client_err_nf_throttled={c_err_nf_throttled}, "
- # Too many flows
- c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
- l7_data += f"client_err_flow_overflow={c_err_flow_overflow}, "
+ # Established connections
+ c_udp_connects = client_stats.get(u"udps_connects", 0)
+ l7_data += f"client_udp_connects={c_udp_connects}; "
+ # Closed connections
+ c_udp_closed = client_stats.get(u"udps_closed", 0)
+ l7_data += f"client_udp_closed={c_udp_closed}; "
+ # Sent bytes
+ c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}; "
+ # Sent packets
+ c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"client_udp_tx_packets={c_udp_sndpkt}; "
+ # Received bytes
+ c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}; "
+ # Received packets
+ c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}; "
+ # Keep alive drops
+ c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
+ l7_data += f"client_udp_keep_drops={c_udp_keepdrops}; "
+ # Client without flow
+ c_err_cwf = client_stats.get(u"err_cwf", 0)
+ l7_data += f"client_err_cwf={c_err_cwf}; "
# Server
- s_act_flows = server_stats[u"m_active_flows"]
- s_est_flows = server_stats[u"m_est_flows"]
- s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
- l7_data += f"server_active_flows={s_act_flows}, "
- l7_data += f"server_established_flows={s_est_flows}, "
- l7_data += f"server_traffic_duration={s_traffic_duration}, "
- # Possible errors
- # Too many packets in NIC rx queue
- s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={s_err_rx_throttled}, "
- if u"udp" in profile_file:
- # Client
- # Established connections
- c_udp_connects = client_stats.get(u"udps_connects", 0)
- l7_data += f"client_udp_connects={c_udp_connects}, "
- # Closed connections
- c_udp_closed = client_stats.get(u"udps_closed", 0)
- l7_data += f"client_udp_closed={c_udp_closed}, "
- # Sent bytes
- c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
- l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}, "
- # Sent packets
- c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
- l7_data += f"client_udp_tx_packets={c_udp_sndpkt}, "
- # Received bytes
- c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}, "
- # Received packets
- c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}, "
- # Keep alive drops
- c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
- l7_data += f"client_udp_keep_drops={c_udp_keepdrops}, "
- # Server
- # Accepted connections
- s_udp_accepts = server_stats.get(u"udps_accepts", 0)
- l7_data += f"server_udp_accepts={s_udp_accepts}, "
- # Closed connections
- s_udp_closed = server_stats.get(u"udps_closed", 0)
- l7_data += f"server_udp_closed={s_udp_closed}, "
- # Sent bytes
- s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
- l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}, "
- # Sent packets
- s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
- l7_data += f"server_udp_tx_packets={s_udp_sndpkt}, "
- # Received bytes
- s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}, "
- # Received packets
- s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}, "
- elif u"tcp" in profile_file:
- # Client
- # Initiated connections
- c_tcp_connatt = client_stats.get(u"tcps_connattempt", 0)
- l7_data += f"client_tcp_connect_inits={c_tcp_connatt}, "
- # Established connections
- c_tcp_connects = client_stats.get(u"tcps_connects", 0)
- l7_data += f"client_tcp_connects={c_tcp_connects}, "
- # Closed connections
- c_tcp_closed = client_stats.get(u"tcps_closed", 0)
- l7_data += f"client_tcp_closed={c_tcp_closed}, "
- # Send bytes
- c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}, "
- # Received bytes
- c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}, "
- # Server
- # Accepted connections
- s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
- l7_data += f"server_tcp_accepts={s_tcp_accepts}, "
- # Established connections
- s_tcp_connects = server_stats.get(u"tcps_connects", 0)
- l7_data += f"server_tcp_connects={s_tcp_connects}, "
- # Closed connections
- s_tcp_closed = server_stats.get(u"tcps_closed", 0)
- l7_data += f"server_tcp_closed={s_tcp_closed}, "
- # Sent bytes
- s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}, "
- # Received bytes
- s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}, "
- else:
- total_sent = stats[port_0][u"opackets"]
- total_rcvd = stats[port_1][u"ipackets"]
+ # Accepted connections
+ s_udp_accepts = server_stats.get(u"udps_accepts", 0)
+ l7_data += f"server_udp_accepts={s_udp_accepts}; "
+ # Closed connections
+ s_udp_closed = server_stats.get(u"udps_closed", 0)
+ l7_data += f"server_udp_closed={s_udp_closed}; "
+ # Sent bytes
+ s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}; "
+ # Sent packets
+ s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"server_udp_tx_packets={s_udp_sndpkt}; "
+ # Received bytes
+ s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}; "
+ # Received packets
+ s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}; "
+ elif u"tcp" in profile_file:
+ # Client
+ # Connection attempts
+ c_tcp_connattempt = client_stats.get(u"tcps_connattempt", 0)
+ l7_data += f"client_tcp_connattempt={c_tcp_connattempt}; "
+ # Established connections
+ c_tcp_connects = client_stats.get(u"tcps_connects", 0)
+ l7_data += f"client_tcp_connects={c_tcp_connects}; "
+ # Closed connections
+ c_tcp_closed = client_stats.get(u"tcps_closed", 0)
+ l7_data += f"client_tcp_closed={c_tcp_closed}; "
+ # Send bytes
+ c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}; "
+ # Received bytes
+ c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}; "
+ # Server
+ # Accepted connections
+ s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
+ l7_data += f"server_tcp_accepts={s_tcp_accepts}; "
+ # Established connections
+ s_tcp_connects = server_stats.get(u"tcps_connects", 0)
+ l7_data += f"server_tcp_connects={s_tcp_connects}; "
+ # Closed connections
+ s_tcp_closed = server_stats.get(u"tcps_closed", 0)
+ l7_data += f"server_tcp_closed={s_tcp_closed}; "
+ # Sent bytes
+ s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}; "
+ # Received bytes
+ s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}; "
print(f"packets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
except TRexError:
print(u"T-Rex ASTF runtime error!", file=sys.stderr)
@@ -380,17 +375,20 @@ def simple_burst(
if async_start:
client.disconnect(stop_traffic=False, release_ports=True)
else:
- client.clear_profile()
+ client.reset()
client.disconnect()
print(
- f"trex_start_time={trex_start_time}, "
- f"cps={mult!r}, total_received={total_rcvd}, "
- f"total_sent={total_sent}, frame_loss={lost_a + lost_b}, "
- f"approximated_duration={approximated_duration}, "
- f"latency_stream_0(usec)={lat_a}, "
- f"latency_stream_1(usec)={lat_b}, "
- f"latency_hist_stream_0={lat_a_hist}, "
- f"latency_hist_stream_1={lat_b_hist}, "
+ f"multiplier={multiplier!r}; "
+ f"total_received={total_received}; "
+ f"total_sent={total_sent}; "
+ f"frame_loss={lost_a + lost_b}; "
+ f"approximated_duration={approximated_duration}; "
+ f"latency_stream_0(usec)={lat_a}; "
+ f"latency_stream_1(usec)={lat_b}; "
+ f"latency_hist_stream_0={lat_a_hist}; "
+ f"latency_hist_stream_1={lat_b_hist}; "
+ f"client_sent={client_sent}; "
+ f"client_received={client_received}; "
f"{l7_data}"
)
@@ -408,19 +406,19 @@ def main():
)
parser.add_argument(
u"-d", u"--duration", required=True, type=float,
- help=u"Duration of traffic run."
+ help=u"Duration of the whole traffic run, including overheads."
)
parser.add_argument(
u"-s", u"--frame_size", required=True,
help=u"Size of a Frame without padding and IPG."
)
parser.add_argument(
- u"-m", u"--mult", required=True, type=int,
- help=u"Multiplier of profile CPS."
+ u"--n_data_frames", type=int, default=5,
+ help=u"Use this many data frames per transaction and direction (TPUT)."
)
parser.add_argument(
- u"-w", u"--warmup_time", type=float, default=5.0,
- help=u"Traffic warm-up time in seconds, 0 = disable."
+ u"-m", u"--multiplier", required=True, type=float,
+ help=u"Multiplier of profile CPS."
)
parser.add_argument(
u"--port_0", required=True, type=int,
@@ -442,6 +440,10 @@ def main():
u"--traffic_directions", type=int, default=2,
help=u"Send bi- (2) or uni- (1) directional traffic."
)
+ parser.add_argument(
+ u"--delay", required=True, type=float, default=0.0,
+ help=u"Allowed time overhead, sleep time is increased by this [s]."
+ )
args = parser.parse_args()
@@ -451,10 +453,17 @@ def main():
framesize = args.frame_size
simple_burst(
- profile_file=args.profile, duration=args.duration, framesize=framesize,
- mult=args.mult, warmup_time=args.warmup_time, port_0=args.port_0,
- port_1=args.port_1, latency=args.latency, async_start=args.async_start,
- traffic_directions=args.traffic_directions
+ profile_file=args.profile,
+ duration=args.duration,
+ framesize=framesize,
+ n_data_frames=args.n_data_frames,
+ multiplier=args.multiplier,
+ port_0=args.port_0,
+ port_1=args.port_1,
+ latency=args.latency,
+ async_start=args.async_start,
+ traffic_directions=args.traffic_directions,
+ delay=args.delay,
)
diff --git a/GPL/tools/trex/trex_astf_stop.py b/GPL/tools/trex/trex_astf_stop.py
index 655bdf1a97..73c058390c 100644
--- a/GPL/tools/trex/trex_astf_stop.py
+++ b/GPL/tools/trex/trex_astf_stop.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2023 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,11 +30,6 @@ Requirements:
- compiled and running T-REX process (eg. ./t-rex-64 -i)
- trex.astf.api library
- Script must be executed on a node with T-REX instance
-
-Functionality:
-1. Stop any running traffic
-2. Optionally restore reference counter values.
-3. Return conter differences.
"""
import argparse
@@ -34,72 +39,63 @@ import sys
from collections import OrderedDict # Needed to parse xstats representation.
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient
def main():
"""Stop traffic if any is running. Report xstats."""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"--xstat0", type=str, default=u"",
- help=u"Reference xstat object if any."
- )
- parser.add_argument(
- u"--xstat1", type=str, default=u"",
- help=u"Reference xstat object if any."
+ "--xstat", type=str, nargs="*", help="Reference xstat object if any."
)
args = parser.parse_args()
client = ASTFClient()
try:
- # connect to server
client.connect()
-
client.acquire(force=True)
client.stop()
+ xstats = list()
# Read the stats after the test,
# we need to update values before the last trial started.
- if args.xstat0:
- snapshot = eval(args.xstat0)
- client.ports[0].get_xstats().reference_stats = snapshot
- if args.xstat1:
- snapshot = eval(args.xstat1)
- client.ports[1].get_xstats().reference_stats = snapshot
- # Now we can call the official method to get differences.
- xstats0 = client.get_xstats(0)
- xstats1 = client.get_xstats(1)
-
- # If TRexError happens, let the script fail with stack trace.
+ for i in range(len(client.ports)):
+ if args.xstat[i]:
+ snapshot = eval(args.xstat[i])
+ client.ports[i].get_xstats().reference_stats = snapshot
+ # Now we can call the official method to get differences.
+ xstats.append(client.get_xstats(i))
+ print(f"##### statistics port {i} #####")
+ print(json.dumps(xstats[i], indent=4, separators=(",", ": ")))
finally:
- client.clear_profile()
+ client.reset()
client.disconnect()
- # TODO: check xstats format
- print(u"##### statistics port 0 #####")
- print(json.dumps(xstats0, indent=4, separators=(u",", u": ")))
- print(u"##### statistics port 1 #####")
- print(json.dumps(xstats1, indent=4, separators=(u",", u": ")))
-
- tx_0, rx_0 = xstats0[u"tx_good_packets"], xstats0[u"rx_good_packets"]
- tx_1, rx_1 = xstats1[u"tx_good_packets"], xstats1[u"rx_good_packets"]
- lost_a, lost_b = tx_0 - rx_1, tx_1 - rx_0
+ for idx,stat in enumerate(zip(xstats[0::2], xstats[1::2])):
+ lost_r = stat[0]["tx_good_packets"] - stat[1]["rx_good_packets"]
+ lost_l = stat[1]["tx_good_packets"] - stat[0]["rx_good_packets"]
+ print(f"packets lost from {idx*2} --> {idx*2+1}: {lost_r} pkts")
+ print(f"packets lost from {idx*2+1} --> {idx*2}: {lost_l} pkts")
- print(f"packets lost from 0 --> 1: {lost_a} pkts")
- print(f"packets lost from 1 --> 0: {lost_b} pkts")
+ total_rcvd = 0
+ total_sent = 0
+ for stat in xstats:
+ total_rcvd += stat["rx_good_packets"]
+ total_sent += stat["tx_good_packets"]
- total_rcvd, total_sent = rx_0 + rx_1, tx_0 + tx_1
- total_lost = total_sent - total_rcvd
- # TODO: Add latency.
print(
- f"cps='unknown', total_received={total_rcvd}, total_sent={total_sent}, "
- f"frame_loss={total_lost}, "
- f"latency_stream_0(usec)=-1/-1/-1, latency_stream_1(usec)=-1/-1/-1, "
- u"latency_hist_stream_0={}, latency_hist_stream_1={}, "
+ f"cps='unknown'; "
+ f"total_received={total_rcvd}; "
+ f"total_sent={total_sent}; "
+ f"frame_loss={total_sent - total_rcvd}; "
+ f"latency_stream_0(usec)=-1/-1/-1; "
+ f"latency_stream_1(usec)=-1/-1/-1; "
+ f"latency_hist_stream_0=; "
+ f"latency_hist_stream_1=; "
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_assert.py b/GPL/tools/trex/trex_stl_assert.py
index 7c7e9215b8..d4a092b4b9 100644
--- a/GPL/tools/trex/trex_stl_assert.py
+++ b/GPL/tools/trex/trex_stl_assert.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2023 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,17 +30,14 @@ Requirements:
- compiled and running T-Rex process (eg. ./t-rex-64 -i)
- trex.stl.api library
- Script must be executed on a node with T-Rex instance.
-
-Functionality:
-1. Verify the API functionality and get server information.
"""
import sys
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient, STLError
def main():
@@ -49,5 +56,5 @@ def main():
client.disconnect()
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_profile.py b/GPL/tools/trex/trex_stl_profile.py
index ac09b8dda1..ac53e90571 100644
--- a/GPL/tools/trex/trex_stl_profile.py
+++ b/GPL/tools/trex/trex_stl_profile.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2023 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,9 +34,9 @@ import sys
import time
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient, STLProfile, STLError
def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
@@ -56,12 +66,21 @@ def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
except ValueError:
t_max = int(-1)
- return u"/".join(str(tmp) for tmp in (t_min, t_avg, t_max, hdrh))
+ return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max, hdrh))
def simple_burst(
- profile_file, duration, framesize, rate, warmup_time, port_0, port_1,
- latency, async_start=False, traffic_directions=2, force=False):
+ profile_file,
+ duration,
+ framesize,
+ rate,
+ ports,
+ latency,
+ async_start=False,
+ traffic_directions=2,
+ force=False,
+ delay=0.0,
+ ):
"""Send traffic and measure packet loss and latency.
Procedure:
@@ -83,34 +102,29 @@ def simple_burst(
:param framesize: Frame size.
:param duration: Duration of traffic run in seconds (-1=infinite).
:param rate: Traffic rate [percentage, pps, bps].
- :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
- :param port_0: Port 0 on the traffic generator.
- :param port_1: Port 1 on the traffic generator.
+ :param ports: Port list on the traffic generator.
:param latency: With latency stats.
:param async_start: Start the traffic and exit.
:param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
:param force: Force start regardless of ports state.
+ :param delay: Sleep overhead [s].
:type profile_file: str
:type framesize: int or str
:type duration: float
:type rate: str
- :type warmup_time: float
- :type port_0: int
- :type port_1: int
+ :type ports: list
:type latency: bool
:type async_start: bool
:type traffic_directions: int
:type force: bool
+ :type delay: float
"""
client = None
total_rcvd = 0
total_sent = 0
- approximated_duration = 0
- approximated_rate = 0
- lost_a = 0
- lost_b = 0
- lat_a = u"-1/-1/-1/"
- lat_b = u"-1/-1/-1/"
+ approximated_duration = 0.0
+ lat_a = "-1/-1/-1/"
+ lat_b = "-1/-1/-1/"
# Read the profile:
try:
@@ -130,141 +144,109 @@ def simple_burst(
# Connect to server:
client.connect()
# Prepare our ports (the machine has 0 <--> 1 with static route):
- client.reset(ports=[port_0, port_1])
- client.remove_all_streams(ports=[port_0, port_1])
+ client.reset()
+ client.remove_all_streams()
- if u"macsrc" in profile_file:
- client.set_port_attr(ports=[port_0, port_1], promiscuous=True)
+ if "macsrc" in profile_file:
+ client.set_port_attr(promiscuous=True)
if isinstance(framesize, int):
- last_stream_a = int((len(streams) - 2 ) / 2)
- last_stream_b = (last_stream_a * 2)
- client.add_streams(streams[0:last_stream_a], ports=[port_0])
+ mark_a = len(streams) // 4
+ mark_b = len(streams) // 2
+ for i,j in zip(streams[:mark_a], ports[::2]):
+ client.add_streams(streams=[i], ports=[j])
if traffic_directions > 1:
- client.add_streams(
- streams[last_stream_a:last_stream_b], ports=[port_1])
+ for i,j in zip(streams[mark_a:mark_b], ports[1::2]):
+ print(i, j)
+ client.add_streams(streams=[i], ports=[j])
elif isinstance(framesize, str):
- client.add_streams(streams[0:3], ports=[port_0])
+ mark = 0
+ for i in ports[::2]:
+ client.add_streams(streams=streams[mark:mark+3], ports=[i])
+ mark = mark + 3
if traffic_directions > 1:
- client.add_streams(streams[3:6], ports=[port_1])
+ mark = len(streams) // 2
+ for i in ports[1::2]:
+ client.add_streams(streams=streams[mark:mark+3], ports=[i])
+ mark = mark + 3
if latency:
try:
if isinstance(framesize, int):
- client.add_streams(streams[last_stream_b], ports=[port_0])
+ mark_c = len(streams) // 2
+ mark_d = len(streams) // 2 + len(streams) // 4
+ for i,j in zip(streams[mark_c:mark_d], ports[::2]):
+ client.add_streams(streams=[i], ports=[j])
if traffic_directions > 1:
- client.add_streams(
- streams[last_stream_b + 1], ports=[port_1])
+ for i,j in zip(streams[mark_d:], ports[1::2]):
+ client.add_streams(streams=[i], ports=[j])
elif isinstance(framesize, str):
latency = False
except STLError:
# Disable latency if NIC does not support requested stream type
- print(u"##### FAILED to add latency streams #####")
+ print("##### FAILED to add latency streams #####")
latency = False
- ports = [port_0]
- if traffic_directions > 1:
- ports.append(port_1)
- # Warm-up phase:
- if warmup_time > 0:
- # Clear the stats before injecting:
- client.clear_stats()
-
- # Choose rate and start traffic:
- client.start(
- ports=ports, mult=rate, duration=warmup_time, force=force,
- core_mask=STLClient.CORE_MASK_PIN
- )
-
- # Block until done:
- time_start = time.monotonic()
- client.wait_on_traffic(ports=ports, timeout=warmup_time+30)
- time_stop = time.monotonic()
- approximated_duration = time_stop - time_start
-
- if client.get_warnings():
- for warning in client.get_warnings():
- print(warning)
-
- # Read the stats after the test:
- stats = client.get_stats()
-
- print(u"##### Warmup statistics #####")
- print(json.dumps(stats, indent=4, separators=(u",", u": ")))
-
- lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
-
- print(f"\npackets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
# Clear the stats before injecting:
client.clear_stats()
- lost_a = 0
- lost_b = 0
# Choose rate and start traffic:
client.start(
- ports=ports, mult=rate, duration=duration, force=force,
- core_mask=STLClient.CORE_MASK_PIN
+ ports=ports[::] if traffic_directions == 2 else ports[::2],
+ mult=rate,
+ duration=duration,
+ force=force,
+ core_mask=STLClient.CORE_MASK_PIN,
)
if async_start:
# For async stop, we need to export the current snapshot.
- xsnap0 = client.ports[0].get_xstats().reference_stats
- print(f"Xstats snapshot 0: {xsnap0!r}")
- if traffic_directions > 1:
- xsnap1 = client.ports[1].get_xstats().reference_stats
- print(f"Xstats snapshot 1: {xsnap1!r}")
+ for i in range(len(client.ports)):
+ xsnap = client.ports[i].get_xstats().reference_stats
+ print(f"Xstats snapshot {i}: {xsnap!r}")
else:
- # Block until done:
time_start = time.monotonic()
- client.wait_on_traffic(ports=ports, timeout=duration+30)
+ # wait_on_traffic fails if duration stretches by 30 seconds or more.
+ # TRex has some overhead, wait some more.
+ time.sleep(duration + delay)
+ client.stop()
time_stop = time.monotonic()
- approximated_duration = time_stop - time_start
-
+ approximated_duration = time_stop - time_start - delay
+ # Read the stats after the traffic stopped (or time up).
+ stats = client.get_stats()
if client.get_warnings():
for warning in client.get_warnings():
print(warning)
+ # Now finish the complete reset.
+ client.reset()
- # Read the stats after the test
- stats = client.get_stats()
+ print("##### Statistics #####")
+ print(json.dumps(stats, indent=4, separators=(",", ": ")))
- print(u"##### Statistics #####")
- print(json.dumps(stats, indent=4, separators=(u",", u": ")))
-
- lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
+ nr_ports = len(client.ports)
+ for i,j in zip(range(nr_ports)[0::2], range(nr_ports)[1::2]):
+ lost_r = stats[i]["opackets"] - stats[j]["ipackets"]
+ lost_l = stats[j]["opackets"] - stats[i]["ipackets"]
+ print(f"packets lost from {i} --> {j}: {lost_r} pkts")
+ print(f"packets lost from {j} --> {i}: {lost_l} pkts")
# Stats index is not a port number, but "pgid".
+ # We will take latency read from only first link.
if latency:
- lat_obj = stats[u"latency"][0][u"latency"]
+ lat_obj = stats["latency"][0]["latency"]
lat_a = fmt_latency(
- str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
- str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))
+ str(lat_obj["total_min"]), str(lat_obj["average"]),
+ str(lat_obj["total_max"]), str(lat_obj["hdrh"]))
+ # Do not bother with the other dir latency if unidir.
if traffic_directions > 1:
- lat_obj = stats[u"latency"][1][u"latency"]
+ lat_obj = stats["latency"][1]["latency"]
lat_b = fmt_latency(
- str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
- str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))
+ str(lat_obj["total_min"]), str(lat_obj["average"]),
+ str(lat_obj["total_max"]), str(lat_obj["hdrh"]))
- if traffic_directions > 1:
- total_sent = stats[0][u"opackets"] + stats[1][u"opackets"]
- total_rcvd = stats[0][u"ipackets"] + stats[1][u"ipackets"]
- else:
- total_sent = stats[port_0][u"opackets"]
- total_rcvd = stats[port_1][u"ipackets"]
- try:
- approximated_rate = total_sent / approximated_duration
- except ZeroDivisionError:
- pass
-
- print(f"\npackets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ total_rcvd = stats["total"]["ipackets"]
+ total_sent = stats["total"]["opackets"]
except STLError:
- print(u"T-Rex STL runtime error!", file=sys.stderr)
+ print("T-Rex STL runtime error!", file=sys.stderr)
raise
finally:
@@ -275,13 +257,14 @@ def simple_burst(
if client:
client.disconnect()
print(
- f"rate={rate!r}, total_received={total_rcvd}, "
- f"total_sent={total_sent}, frame_loss={lost_a + lost_b}, "
- f"target_duration={duration!r}, "
- f"approximated_duration={approximated_duration!r}, "
- f"approximated_rate={approximated_rate}, "
- f"latency_stream_0(usec)={lat_a}, "
- f"latency_stream_1(usec)={lat_b}, "
+ f"rate={rate!r}; "
+ f"total_received={total_rcvd}; "
+ f"total_sent={total_sent}; "
+ f"frame_loss={total_sent - total_rcvd}; "
+ f"target_duration={duration!r}; "
+ f"approximated_duration={approximated_duration!r}; "
+ f"latency_stream_0(usec)={lat_a}; "
+ f"latency_stream_1(usec)={lat_b}; "
)
@@ -293,48 +276,44 @@ def main():
"""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"-p", u"--profile", required=True, type=str,
- help=u"Python traffic profile."
- )
- parser.add_argument(
- u"-d", u"--duration", required=True, type=float,
- help=u"Duration of traffic run."
+ "-p", "--profile", required=True, type=str,
+ help="Python traffic profile."
)
parser.add_argument(
- u"-s", u"--frame_size", required=True,
- help=u"Size of a Frame without padding and IPG."
+ "-d", "--duration", required=True, type=float,
+ help="Duration of traffic run."
)
parser.add_argument(
- u"-r", u"--rate", required=True,
- help=u"Traffic rate with included units (pps)."
+ "-s", "--frame_size", required=True,
+ help="Size of a Frame without padding and IPG."
)
parser.add_argument(
- u"-w", u"--warmup_time", type=float, default=5.0,
- help=u"Traffic warm-up time in seconds, 0 = disable."
+ "-r", "--rate", required=True,
+ help="Traffic rate with included units (pps)."
)
parser.add_argument(
- u"--port_0", required=True, type=int,
- help=u"Port 0 on the traffic generator."
+ "--ports", required=True, type=int, nargs="+",
+ help="Port list on the traffic generator."
)
parser.add_argument(
- u"--port_1", required=True, type=int,
- help=u"Port 1 on the traffic generator."
+ "--async_start", action="store_true", default=False,
+ help="Non-blocking call of the script."
)
parser.add_argument(
- u"--async_start", action=u"store_true", default=False,
- help=u"Non-blocking call of the script."
+ "--latency", action="store_true", default=False,
+ help="Add latency stream."
)
parser.add_argument(
- u"--latency", action=u"store_true", default=False,
- help=u"Add latency stream."
+ "--traffic_directions", type=int, default=2,
+ help="Send bi- (2) or uni- (1) directional traffic."
)
parser.add_argument(
- u"--traffic_directions", type=int, default=2,
- help=u"Send bi- (2) or uni- (1) directional traffic."
+ "--force", action="store_true", default=False,
+ help="Force start regardless of ports state."
)
parser.add_argument(
- u"--force", action=u"store_true", default=False,
- help=u"Force start regardless of ports state."
+ "--delay", required=True, type=float, default=0.0,
+ help="Delay assumed for traffic, sleep time is increased by this [s]."
)
args = parser.parse_args()
@@ -345,12 +324,18 @@ def main():
framesize = args.frame_size
simple_burst(
- profile_file=args.profile, duration=args.duration, framesize=framesize,
- rate=args.rate, warmup_time=args.warmup_time, port_0=args.port_0,
- port_1=args.port_1, latency=args.latency, async_start=args.async_start,
- traffic_directions=args.traffic_directions, force=args.force
+ profile_file=args.profile,
+ duration=args.duration,
+ framesize=framesize,
+ rate=args.rate,
+ ports=args.ports,
+ latency=args.latency,
+ async_start=args.async_start,
+ traffic_directions=args.traffic_directions,
+ force=args.force,
+ delay=args.delay,
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_stop.py b/GPL/tools/trex/trex_stl_stop.py
index dcdae7c10a..c03624ba24 100644
--- a/GPL/tools/trex/trex_stl_stop.py
+++ b/GPL/tools/trex/trex_stl_stop.py
@@ -1,11 +1,21 @@
#!/usr/bin/python3
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
+# Copyright (c) 2023 Cisco and/or its affiliates.
+#
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Licensed under the Apache License 2.0 or
+# GNU General Public License v2.0 or later; you may not use this file
+# except in compliance with one of these Licenses. You
+# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
+#
+# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
+# must be under GPLv2+. If at any point in the future it is no longer linked
+# with Scapy (or other GPLv2+ licensed software), you are free to choose
+# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,11 +30,6 @@ Requirements:
- compiled and running T-REX process (eg. ./t-rex-64 -i)
- trex.stl.api library
- Script must be executed on a node with T-REX instance
-
-Functionality:
-1. Stop any running traffic
-2. Optionally restore reference counter values.
-3. Return conter differences.
"""
import argparse
@@ -34,72 +39,63 @@ import sys
from collections import OrderedDict # Needed to parse xstats representation.
sys.path.insert(
- 0, u"/opt/trex-core-2.82/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient
def main():
"""Stop traffic if any is running. Report xstats."""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"--xstat0", type=str, default=u"",
- help=u"Reference xstat object if any."
- )
- parser.add_argument(
- u"--xstat1", type=str, default=u"",
- help=u"Reference xstat object if any."
+ "--xstat", type=str, nargs="*", help="Reference xstat object if any."
)
args = parser.parse_args()
client = STLClient()
try:
- # connect to server
client.connect()
-
client.acquire(force=True)
- # TODO: Support unidirection.
- client.stop(ports=[0, 1])
+ client.stop()
+ xstats = list()
# Read the stats after the test,
# we need to update values before the last trial started.
- if args.xstat0:
- snapshot = eval(args.xstat0)
- client.ports[0].get_xstats().reference_stats = snapshot
- if args.xstat1:
- snapshot = eval(args.xstat1)
- client.ports[1].get_xstats().reference_stats = snapshot
- # Now we can call the official method to get differences.
- xstats0 = client.get_xstats(0)
- xstats1 = client.get_xstats(1)
-
- # If STLError happens, let the script fail with stack trace.
+ for i in range(len(client.ports)):
+ if args.xstat[i]:
+ snapshot = eval(args.xstat[i])
+ client.ports[i].get_xstats().reference_stats = snapshot
+ # Now we can call the official method to get differences.
+ xstats.append(client.get_xstats(i))
+ print(f"##### statistics port {i} #####")
+ print(json.dumps(xstats[i], indent=4, separators=(",", ": ")))
finally:
client.disconnect()
- print(u"##### statistics port 0 #####")
- print(json.dumps(xstats0, indent=4, separators=(u",", u": ")))
- print(u"##### statistics port 1 #####")
- print(json.dumps(xstats1, indent=4, separators=(u",", u": ")))
-
- tx_0, rx_0 = xstats0[u"tx_good_packets"], xstats0[u"rx_good_packets"]
- tx_1, rx_1 = xstats1[u"tx_good_packets"], xstats1[u"rx_good_packets"]
- lost_a, lost_b = tx_0 - rx_1, tx_1 - rx_0
+ for idx,stat in enumerate(zip(xstats[0::2], xstats[1::2])):
+ lost_r = stat[0]["tx_good_packets"] - stat[1]["rx_good_packets"]
+ lost_l = stat[1]["tx_good_packets"] - stat[0]["rx_good_packets"]
+ print(f"packets lost from {idx*2} --> {idx*2+1}: {lost_r} pkts")
+ print(f"packets lost from {idx*2+1} --> {idx*2}: {lost_l} pkts")
- print(f"\npackets lost from 0 --> 1: {lost_a} pkts")
- print(f"packets lost from 1 --> 0: {lost_b} pkts")
+ total_rcvd = 0
+ total_sent = 0
+ for stat in xstats:
+ total_rcvd += stat["rx_good_packets"]
+ total_sent += stat["tx_good_packets"]
- total_rcvd, total_sent = rx_0 + rx_1, tx_0 + tx_1
- total_lost = total_sent - total_rcvd
- # TODO: Add latency.
print(
- f"rate='unknown', total_received={total_rcvd}, "
- f"total_sent={total_sent}, frame_loss={total_lost}, "
- f"target_duration='manual', approximated_duration='manual', "
- f"approximated_rate='unknown', "
- f"latency_stream_0(usec)=-1/-1/-1, latency_stream_1(usec)=-1/-1/-1"
+ f"rate='unknown'; "
+ f"total_received={total_rcvd}; "
+ f"total_sent={total_sent}; "
+ f"frame_loss={total_sent - total_rcvd}; "
+ f"target_duration='manual'; "
+ f"approximated_duration='manual'; "
+ f"approximated_rate='unknown'; "
+ f"latency_stream_0(usec)=-1/-1/-1; "
+ f"latency_stream_1(usec)=-1/-1/-1; "
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()