aboutsummaryrefslogtreecommitdiffstats
path: root/GPL/tools
diff options
context:
space:
mode:
Diffstat (limited to 'GPL/tools')
-rw-r--r--GPL/tools/trex/trex_astf_assert.py6
-rw-r--r--GPL/tools/trex/trex_astf_profile.py306
-rw-r--r--GPL/tools/trex/trex_astf_stop.py73
-rw-r--r--GPL/tools/trex/trex_stl_assert.py11
-rw-r--r--GPL/tools/trex/trex_stl_profile.py187
-rw-r--r--GPL/tools/trex/trex_stl_stop.py69
6 files changed, 310 insertions, 342 deletions
diff --git a/GPL/tools/trex/trex_astf_assert.py b/GPL/tools/trex/trex_astf_assert.py
index 3824fa12df..107253f5f0 100644
--- a/GPL/tools/trex/trex_astf_assert.py
+++ b/GPL/tools/trex/trex_astf_assert.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -38,9 +38,9 @@ Functionality:
import sys
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, u"/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient, TRexError
def main():
diff --git a/GPL/tools/trex/trex_astf_profile.py b/GPL/tools/trex/trex_astf_profile.py
index 6faa2c008f..44d81e92f9 100644
--- a/GPL/tools/trex/trex_astf_profile.py
+++ b/GPL/tools/trex/trex_astf_profile.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -34,9 +34,9 @@ import sys
import time
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, u"/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient, ASTFProfile, TRexError
def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
@@ -73,12 +73,14 @@ def simple_burst(
profile_file,
duration,
framesize,
+ n_data_frames,
multiplier,
port_0,
port_1,
latency,
async_start=False,
traffic_directions=2,
+ delay=0.0,
):
"""Send traffic and measure packet loss and latency.
@@ -98,8 +100,9 @@ def simple_burst(
Duration details:
Contrary to stateless mode, ASTF profiles typically limit the number
of flows/transactions that can happen.
- The caller is expected to set the duration parameter accordingly to
- this limit and multiplier, including any overheads.
+ The caller is expected to set the duration parameter to idealized value,
+ but set the delay arguments when TRex is expected
+ to finish processing replies later (including a window for latency).
See *_traffic_duration output fields for TRex's measurement
of the real traffic duration (should be without any inactivity overheads).
If traffic has not ended by the final time, the traffic
@@ -111,27 +114,27 @@ def simple_burst(
:param profile_file: A python module with T-rex traffic profile.
:param duration: Expected duration for all transactions to finish,
- assuming only tolerable duration stretching happens.
- This includes later start of later transactions
- (according to TPS multiplier) and expected duration of each transaction.
- Critically, this also includes any delay TRex shows when starting
- traffic (but not the similar delay during stopping).
+ without any TRex related delays, without even latency.
:param framesize: Frame size.
+ :param n_data_frames: Controls "size" of transaction for TPUT tests.
:param multiplier: Multiplier of profile CPS.
:param port_0: Port 0 on the traffic generator.
:param port_1: Port 1 on the traffic generator.
:param latency: With latency stats.
:param async_start: Start the traffic and exit.
:param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
+ :param delay: Time increase [s] for sleep duration.
:type profile_file: str
:type duration: float
:type framesize: int or str
+ :type n_data_frames: int
:type multiplier: int
:type port_0: int
:type port_1: int
:type latency: bool
:type async_start: bool
:type traffic_directions: int
+ :type delay: float
"""
client = None
total_received = 0
@@ -151,7 +154,11 @@ def simple_burst(
# TODO: key-values pairs to the profile file
# - ips ?
print(f"### Profile file:\n{profile_file}")
- profile = ASTFProfile.load(profile_file, framesize=framesize)
+ profile = ASTFProfile.load(
+ profile_file,
+ framesize=framesize,
+ n_data_frames=n_data_frames,
+ )
except TRexError:
print(f"Error while loading profile '{profile_file}'!")
raise
@@ -167,9 +174,7 @@ def simple_burst(
# Load the profile.
client.load_profile(profile)
- ports = [port_0]
- if traffic_directions > 1:
- ports.append(port_1)
+ ports = [port_0, port_1]
# Clear the stats before injecting.
lost_a = 0
@@ -179,25 +184,21 @@ def simple_burst(
# Choose CPS and start traffic.
client.start(
mult=multiplier,
- # Increase the input duration slightly,
- # to ensure it does not end before sleep&stop below happens.
- duration=duration + 0.1 if duration > 0 else duration,
+ duration=duration,
nc=True,
latency_pps=int(multiplier) if latency else 0,
client_mask=2**len(ports)-1,
)
- time_start = time.monotonic()
+ time_stop = time.monotonic() + duration + delay
if async_start:
# For async stop, we need to export the current snapshot.
xsnap0 = client.ports[port_0].get_xstats().reference_stats
print(f"Xstats snapshot 0: {xsnap0!r}")
- if traffic_directions > 1:
- xsnap1 = client.ports[port_1].get_xstats().reference_stats
- print(f"Xstats snapshot 1: {xsnap1!r}")
+ xsnap1 = client.ports[port_1].get_xstats().reference_stats
+ print(f"Xstats snapshot 1: {xsnap1!r}")
else:
- time.sleep(duration)
-
+ time.sleep(duration + delay)
# Do not block yet, the existing transactions may take long time
# to finish. We need an action that is almost reset(),
# but without clearing stats.
@@ -208,7 +209,7 @@ def simple_burst(
client.stop(block=True)
# Read the stats after the traffic stopped (or time up).
- stats[time.monotonic() - time_start] = client.get_stats(
+ stats[time.monotonic() - time_stop] = client.get_stats(
ports=ports
)
@@ -216,17 +217,15 @@ def simple_burst(
for warning in client.get_warnings():
print(warning)
- # Now finish the complete reset.
- client.reset()
+ # No profile cleanup here, reset will be done in the finally block.
print(u"##### Statistics #####")
print(json.dumps(stats, indent=4, separators=(u",", u": ")))
- approximated_duration = list(sorted(stats.keys()))[-1]
+ approximated_duration = duration + list(sorted(stats.keys()))[-1]
stats = stats[sorted(stats.keys())[-1]]
lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
+ lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
# TODO: Latency measurement not used at this phase. This part will
# be aligned in another commit.
@@ -246,131 +245,126 @@ def simple_burst(
str(lat_obj[u"max_usec"]), u"-")
lat_b_hist = str(lat_obj[u"histogram"])
- if traffic_directions > 1:
- total_sent = \
- stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
- total_received = \
- stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
- client_sent = stats[port_0][u"opackets"]
- client_received = stats[port_0][u"ipackets"]
- client_stats = stats[u"traffic"][u"client"]
- server_stats = stats[u"traffic"][u"server"]
- # Some zero counters are not sent
- # Active and established flows UDP/TCP
+ total_sent = \
+ stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
+ total_received = \
+ stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
+ client_sent = stats[port_0][u"opackets"]
+ client_received = stats[port_0][u"ipackets"]
+ client_stats = stats[u"traffic"][u"client"]
+ server_stats = stats[u"traffic"][u"server"]
+ # Some zero counters are not sent
+ # Active and established flows UDP/TCP
+ # Client
+ c_act_flows = client_stats[u"m_active_flows"]
+ c_est_flows = client_stats[u"m_est_flows"]
+ c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
+ l7_data = f"client_active_flows={c_act_flows}; "
+ l7_data += f"client_established_flows={c_est_flows}; "
+ l7_data += f"client_traffic_duration={c_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={c_err_rx_throttled}; "
+ # Number of client side flows that were not opened
+ # due to flow-table overflow
+ c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
+ l7_data += f"client_err_nf_throttled={c_err_nf_throttled}; "
+ # Too many flows
+ c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
+ l7_data += f"client_err_flow_overflow={c_err_flow_overflow}; "
+ # Server
+ s_act_flows = server_stats[u"m_active_flows"]
+ s_est_flows = server_stats[u"m_est_flows"]
+ s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
+ l7_data += f"server_active_flows={s_act_flows}; "
+ l7_data += f"server_established_flows={s_est_flows}; "
+ l7_data += f"server_traffic_duration={s_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={s_err_rx_throttled}; "
+ if u"udp" in profile_file:
# Client
- c_act_flows = client_stats[u"m_active_flows"]
- c_est_flows = client_stats[u"m_est_flows"]
- c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
- l7_data = f"client_active_flows={c_act_flows}; "
- l7_data += f"client_established_flows={c_est_flows}; "
- l7_data += f"client_traffic_duration={c_traffic_duration}; "
- # Possible errors
- # Too many packets in NIC rx queue
- c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={c_err_rx_throttled}; "
- # Number of client side flows that were not opened
- # due to flow-table overflow
- c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
- l7_data += f"client_err_nf_throttled={c_err_nf_throttled}; "
- # Too many flows
- c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
- l7_data += f"client_err_flow_overflow={c_err_flow_overflow}; "
+ # Established connections
+ c_udp_connects = client_stats.get(u"udps_connects", 0)
+ l7_data += f"client_udp_connects={c_udp_connects}; "
+ # Closed connections
+ c_udp_closed = client_stats.get(u"udps_closed", 0)
+ l7_data += f"client_udp_closed={c_udp_closed}; "
+ # Sent bytes
+ c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}; "
+ # Sent packets
+ c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"client_udp_tx_packets={c_udp_sndpkt}; "
+ # Received bytes
+ c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}; "
+ # Received packets
+ c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}; "
+ # Keep alive drops
+ c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
+ l7_data += f"client_udp_keep_drops={c_udp_keepdrops}; "
+ # Client without flow
+ c_err_cwf = client_stats.get(u"err_cwf", 0)
+ l7_data += f"client_err_cwf={c_err_cwf}; "
# Server
- s_act_flows = server_stats[u"m_active_flows"]
- s_est_flows = server_stats[u"m_est_flows"]
- s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
- l7_data += f"server_active_flows={s_act_flows}; "
- l7_data += f"server_established_flows={s_est_flows}; "
- l7_data += f"server_traffic_duration={s_traffic_duration}; "
- # Possible errors
- # Too many packets in NIC rx queue
- s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={s_err_rx_throttled}; "
- if u"udp" in profile_file:
- # Client
- # Established connections
- c_udp_connects = client_stats.get(u"udps_connects", 0)
- l7_data += f"client_udp_connects={c_udp_connects}; "
- # Closed connections
- c_udp_closed = client_stats.get(u"udps_closed", 0)
- l7_data += f"client_udp_closed={c_udp_closed}; "
- # Sent bytes
- c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
- l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}; "
- # Sent packets
- c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
- l7_data += f"client_udp_tx_packets={c_udp_sndpkt}; "
- # Received bytes
- c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}; "
- # Received packets
- c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}; "
- # Keep alive drops
- c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
- l7_data += f"client_udp_keep_drops={c_udp_keepdrops}; "
- # Client without flow
- c_err_cwf = client_stats.get(u"err_cwf", 0)
- l7_data += f"client_err_cwf={c_err_cwf}; "
- # Server
- # Accepted connections
- s_udp_accepts = server_stats.get(u"udps_accepts", 0)
- l7_data += f"server_udp_accepts={s_udp_accepts}; "
- # Closed connections
- s_udp_closed = server_stats.get(u"udps_closed", 0)
- l7_data += f"server_udp_closed={s_udp_closed}; "
- # Sent bytes
- s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
- l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}; "
- # Sent packets
- s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
- l7_data += f"server_udp_tx_packets={s_udp_sndpkt}; "
- # Received bytes
- s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}; "
- # Received packets
- s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}; "
- elif u"tcp" in profile_file:
- # Client
- # Connection attempts
- c_tcp_connattempt = client_stats.get(u"tcps_connattempt", 0)
- l7_data += f"client_tcp_connattempt={c_tcp_connattempt}; "
- # Established connections
- c_tcp_connects = client_stats.get(u"tcps_connects", 0)
- l7_data += f"client_tcp_connects={c_tcp_connects}; "
- # Closed connections
- c_tcp_closed = client_stats.get(u"tcps_closed", 0)
- l7_data += f"client_tcp_closed={c_tcp_closed}; "
- # Send bytes
- c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}; "
- # Received bytes
- c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}; "
- # Server
- # Accepted connections
- s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
- l7_data += f"server_tcp_accepts={s_tcp_accepts}; "
- # Established connections
- s_tcp_connects = server_stats.get(u"tcps_connects", 0)
- l7_data += f"server_tcp_connects={s_tcp_connects}; "
- # Closed connections
- s_tcp_closed = server_stats.get(u"tcps_closed", 0)
- l7_data += f"server_tcp_closed={s_tcp_closed}; "
- # Sent bytes
- s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}; "
- # Received bytes
- s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}; "
- else:
- total_sent = stats[port_0][u"opackets"]
- total_received = stats[port_1][u"ipackets"]
+ # Accepted connections
+ s_udp_accepts = server_stats.get(u"udps_accepts", 0)
+ l7_data += f"server_udp_accepts={s_udp_accepts}; "
+ # Closed connections
+ s_udp_closed = server_stats.get(u"udps_closed", 0)
+ l7_data += f"server_udp_closed={s_udp_closed}; "
+ # Sent bytes
+ s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}; "
+ # Sent packets
+ s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"server_udp_tx_packets={s_udp_sndpkt}; "
+ # Received bytes
+ s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}; "
+ # Received packets
+ s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}; "
+ elif u"tcp" in profile_file:
+ # Client
+ # Connection attempts
+ c_tcp_connattempt = client_stats.get(u"tcps_connattempt", 0)
+ l7_data += f"client_tcp_connattempt={c_tcp_connattempt}; "
+ # Established connections
+ c_tcp_connects = client_stats.get(u"tcps_connects", 0)
+ l7_data += f"client_tcp_connects={c_tcp_connects}; "
+ # Closed connections
+ c_tcp_closed = client_stats.get(u"tcps_closed", 0)
+ l7_data += f"client_tcp_closed={c_tcp_closed}; "
+ # Send bytes
+ c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}; "
+ # Received bytes
+ c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}; "
+ # Server
+ # Accepted connections
+ s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
+ l7_data += f"server_tcp_accepts={s_tcp_accepts}; "
+ # Established connections
+ s_tcp_connects = server_stats.get(u"tcps_connects", 0)
+ l7_data += f"server_tcp_connects={s_tcp_connects}; "
+ # Closed connections
+ s_tcp_closed = server_stats.get(u"tcps_closed", 0)
+ l7_data += f"server_tcp_closed={s_tcp_closed}; "
+ # Sent bytes
+ s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}; "
+ # Received bytes
+ s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}; "
print(f"packets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
except TRexError:
print(u"T-Rex ASTF runtime error!", file=sys.stderr)
@@ -381,7 +375,7 @@ def simple_burst(
if async_start:
client.disconnect(stop_traffic=False, release_ports=True)
else:
- client.clear_profile()
+ client.reset()
client.disconnect()
print(
f"multiplier={multiplier!r}; "
@@ -419,6 +413,10 @@ def main():
help=u"Size of a Frame without padding and IPG."
)
parser.add_argument(
+ u"--n_data_frames", type=int, default=5,
+ help=u"Use this many data frames per transaction and direction (TPUT)."
+ )
+ parser.add_argument(
u"-m", u"--multiplier", required=True, type=float,
help=u"Multiplier of profile CPS."
)
@@ -442,6 +440,10 @@ def main():
u"--traffic_directions", type=int, default=2,
help=u"Send bi- (2) or uni- (1) directional traffic."
)
+ parser.add_argument(
+ u"--delay", required=True, type=float, default=0.0,
+ help=u"Allowed time overhead, sleep time is increased by this [s]."
+ )
args = parser.parse_args()
@@ -454,12 +456,14 @@ def main():
profile_file=args.profile,
duration=args.duration,
framesize=framesize,
+ n_data_frames=args.n_data_frames,
multiplier=args.multiplier,
port_0=args.port_0,
port_1=args.port_1,
latency=args.latency,
async_start=args.async_start,
traffic_directions=args.traffic_directions,
+ delay=args.delay,
)
diff --git a/GPL/tools/trex/trex_astf_stop.py b/GPL/tools/trex/trex_astf_stop.py
index 820905ec6c..73c058390c 100644
--- a/GPL/tools/trex/trex_astf_stop.py
+++ b/GPL/tools/trex/trex_astf_stop.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -30,11 +30,6 @@ Requirements:
- compiled and running T-REX process (eg. ./t-rex-64 -i)
- trex.astf.api library
- Script must be executed on a node with T-REX instance
-
-Functionality:
-1. Stop any running traffic
-2. Optionally restore reference counter values.
-3. Return conter differences.
"""
import argparse
@@ -44,75 +39,63 @@ import sys
from collections import OrderedDict # Needed to parse xstats representation.
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient
def main():
"""Stop traffic if any is running. Report xstats."""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"--xstat0", type=str, default=u"",
- help=u"Reference xstat object if any."
- )
- parser.add_argument(
- u"--xstat1", type=str, default=u"",
- help=u"Reference xstat object if any."
+ "--xstat", type=str, nargs="*", help="Reference xstat object if any."
)
args = parser.parse_args()
client = ASTFClient()
try:
- # connect to server
client.connect()
-
client.acquire(force=True)
client.stop()
+ xstats = list()
# Read the stats after the test,
# we need to update values before the last trial started.
- if args.xstat0:
- snapshot = eval(args.xstat0)
- client.ports[0].get_xstats().reference_stats = snapshot
- if args.xstat1:
- snapshot = eval(args.xstat1)
- client.ports[1].get_xstats().reference_stats = snapshot
- # Now we can call the official method to get differences.
- xstats0 = client.get_xstats(0)
- xstats1 = client.get_xstats(1)
-
- # If TRexError happens, let the script fail with stack trace.
+ for i in range(len(client.ports)):
+ if args.xstat[i]:
+ snapshot = eval(args.xstat[i])
+ client.ports[i].get_xstats().reference_stats = snapshot
+ # Now we can call the official method to get differences.
+ xstats.append(client.get_xstats(i))
+ print(f"##### statistics port {i} #####")
+ print(json.dumps(xstats[i], indent=4, separators=(",", ": ")))
finally:
- client.clear_profile()
+ client.reset()
client.disconnect()
- # TODO: check xstats format
- print(u"##### statistics port 0 #####")
- print(json.dumps(xstats0, indent=4, separators=(u",", u": ")))
- print(u"##### statistics port 1 #####")
- print(json.dumps(xstats1, indent=4, separators=(u",", u": ")))
-
- tx_0, rx_0 = xstats0[u"tx_good_packets"], xstats0[u"rx_good_packets"]
- tx_1, rx_1 = xstats1[u"tx_good_packets"], xstats1[u"rx_good_packets"]
- lost_a, lost_b = tx_0 - rx_1, tx_1 - rx_0
+ for idx,stat in enumerate(zip(xstats[0::2], xstats[1::2])):
+ lost_r = stat[0]["tx_good_packets"] - stat[1]["rx_good_packets"]
+ lost_l = stat[1]["tx_good_packets"] - stat[0]["rx_good_packets"]
+ print(f"packets lost from {idx*2} --> {idx*2+1}: {lost_r} pkts")
+ print(f"packets lost from {idx*2+1} --> {idx*2}: {lost_l} pkts")
- print(f"packets lost from 0 --> 1: {lost_a} pkts")
- print(f"packets lost from 1 --> 0: {lost_b} pkts")
+ total_rcvd = 0
+ total_sent = 0
+ for stat in xstats:
+ total_rcvd += stat["rx_good_packets"]
+ total_sent += stat["tx_good_packets"]
- total_rcvd, total_sent = rx_0 + rx_1, tx_0 + tx_1
- total_lost = total_sent - total_rcvd
print(
f"cps='unknown'; "
f"total_received={total_rcvd}; "
f"total_sent={total_sent}; "
- f"frame_loss={total_lost}; "
+ f"frame_loss={total_sent - total_rcvd}; "
f"latency_stream_0(usec)=-1/-1/-1; "
f"latency_stream_1(usec)=-1/-1/-1; "
- f"latency_hist_stream_0={}; "
- f"latency_hist_stream_1={}; "
+ f"latency_hist_stream_0=; "
+ f"latency_hist_stream_1=; "
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_assert.py b/GPL/tools/trex/trex_stl_assert.py
index 1bd428dc12..d4a092b4b9 100644
--- a/GPL/tools/trex/trex_stl_assert.py
+++ b/GPL/tools/trex/trex_stl_assert.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -30,17 +30,14 @@ Requirements:
- compiled and running T-Rex process (eg. ./t-rex-64 -i)
- trex.stl.api library
- Script must be executed on a node with T-Rex instance.
-
-Functionality:
-1. Verify the API functionality and get server information.
"""
import sys
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient, STLError
def main():
@@ -59,5 +56,5 @@ def main():
client.disconnect()
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_profile.py b/GPL/tools/trex/trex_stl_profile.py
index ba2fc7c6db..ac53e90571 100644
--- a/GPL/tools/trex/trex_stl_profile.py
+++ b/GPL/tools/trex/trex_stl_profile.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -34,9 +34,9 @@ import sys
import time
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient, STLProfile, STLError
def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
@@ -66,7 +66,7 @@ def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
except ValueError:
t_max = int(-1)
- return u"/".join(str(tmp) for tmp in (t_min, t_avg, t_max, hdrh))
+ return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max, hdrh))
def simple_burst(
@@ -74,12 +74,12 @@ def simple_burst(
duration,
framesize,
rate,
- port_0,
- port_1,
+ ports,
latency,
async_start=False,
traffic_directions=2,
force=False,
+ delay=0.0,
):
"""Send traffic and measure packet loss and latency.
@@ -102,31 +102,29 @@ def simple_burst(
:param framesize: Frame size.
:param duration: Duration of traffic run in seconds (-1=infinite).
:param rate: Traffic rate [percentage, pps, bps].
- :param port_0: Port 0 on the traffic generator.
- :param port_1: Port 1 on the traffic generator.
+ :param ports: Port list on the traffic generator.
:param latency: With latency stats.
:param async_start: Start the traffic and exit.
:param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
:param force: Force start regardless of ports state.
+ :param delay: Sleep overhead [s].
:type profile_file: str
:type framesize: int or str
:type duration: float
:type rate: str
- :type port_0: int
- :type port_1: int
+ :type ports: list
:type latency: bool
:type async_start: bool
:type traffic_directions: int
:type force: bool
+ :type delay: float
"""
client = None
total_rcvd = 0
total_sent = 0
approximated_duration = 0.0
- lost_a = 0
- lost_b = 0
- lat_a = u"-1/-1/-1/"
- lat_b = u"-1/-1/-1/"
+ lat_a = "-1/-1/-1/"
+ lat_b = "-1/-1/-1/"
# Read the profile:
try:
@@ -146,47 +144,53 @@ def simple_burst(
# Connect to server:
client.connect()
# Prepare our ports (the machine has 0 <--> 1 with static route):
- client.reset(ports=[port_0, port_1])
- client.remove_all_streams(ports=[port_0, port_1])
+ client.reset()
+ client.remove_all_streams()
- if u"macsrc" in profile_file:
- client.set_port_attr(ports=[port_0, port_1], promiscuous=True)
+ if "macsrc" in profile_file:
+ client.set_port_attr(promiscuous=True)
if isinstance(framesize, int):
- last_stream_a = int((len(streams) - 2 ) / 2)
- last_stream_b = (last_stream_a * 2)
- client.add_streams(streams[0:last_stream_a], ports=[port_0])
+ mark_a = len(streams) // 4
+ mark_b = len(streams) // 2
+ for i,j in zip(streams[:mark_a], ports[::2]):
+ client.add_streams(streams=[i], ports=[j])
if traffic_directions > 1:
- client.add_streams(
- streams[last_stream_a:last_stream_b], ports=[port_1])
+ for i,j in zip(streams[mark_a:mark_b], ports[1::2]):
+ print(i, j)
+ client.add_streams(streams=[i], ports=[j])
elif isinstance(framesize, str):
- client.add_streams(streams[0:3], ports=[port_0])
+ mark = 0
+ for i in ports[::2]:
+ client.add_streams(streams=streams[mark:mark+3], ports=[i])
+ mark = mark + 3
if traffic_directions > 1:
- client.add_streams(streams[3:6], ports=[port_1])
+ mark = len(streams) // 2
+ for i in ports[1::2]:
+ client.add_streams(streams=streams[mark:mark+3], ports=[i])
+ mark = mark + 3
if latency:
try:
if isinstance(framesize, int):
- client.add_streams(streams[last_stream_b], ports=[port_0])
+ mark_c = len(streams) // 2
+ mark_d = len(streams) // 2 + len(streams) // 4
+ for i,j in zip(streams[mark_c:mark_d], ports[::2]):
+ client.add_streams(streams=[i], ports=[j])
if traffic_directions > 1:
- client.add_streams(
- streams[last_stream_b + 1], ports=[port_1])
+ for i,j in zip(streams[mark_d:], ports[1::2]):
+ client.add_streams(streams=[i], ports=[j])
elif isinstance(framesize, str):
latency = False
except STLError:
# Disable latency if NIC does not support requested stream type
- print(u"##### FAILED to add latency streams #####")
+ print("##### FAILED to add latency streams #####")
latency = False
- ports = [port_0]
- if traffic_directions > 1:
- ports.append(port_1)
# Clear the stats before injecting:
client.clear_stats()
- lost_a = 0
- lost_b = 0
# Choose rate and start traffic:
client.start(
- ports=ports,
+ ports=ports[::] if traffic_directions == 2 else ports[::2],
mult=rate,
duration=duration,
force=force,
@@ -195,57 +199,54 @@ def simple_burst(
if async_start:
# For async stop, we need to export the current snapshot.
- xsnap0 = client.ports[0].get_xstats().reference_stats
- print(f"Xstats snapshot 0: {xsnap0!r}")
- if traffic_directions > 1:
- xsnap1 = client.ports[1].get_xstats().reference_stats
- print(f"Xstats snapshot 1: {xsnap1!r}")
+ for i in range(len(client.ports)):
+ xsnap = client.ports[i].get_xstats().reference_stats
+ print(f"Xstats snapshot {i}: {xsnap!r}")
else:
- # Block until done:
time_start = time.monotonic()
- client.wait_on_traffic(ports=ports, timeout=duration+30)
+ # wait_on_traffic fails if duration stretches by 30 seconds or more.
+ # TRex has some overhead, wait some more.
+ time.sleep(duration + delay)
+ client.stop()
time_stop = time.monotonic()
- approximated_duration = time_stop - time_start
-
+ approximated_duration = time_stop - time_start - delay
+ # Read the stats after the traffic stopped (or time up).
+ stats = client.get_stats()
if client.get_warnings():
for warning in client.get_warnings():
print(warning)
+ # Now finish the complete reset.
+ client.reset()
- # Read the stats after the test
- stats = client.get_stats()
+ print("##### Statistics #####")
+ print(json.dumps(stats, indent=4, separators=(",", ": ")))
- print(u"##### Statistics #####")
- print(json.dumps(stats, indent=4, separators=(u",", u": ")))
-
- lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
+ nr_ports = len(client.ports)
+ for i,j in zip(range(nr_ports)[0::2], range(nr_ports)[1::2]):
+ lost_r = stats[i]["opackets"] - stats[j]["ipackets"]
+ lost_l = stats[j]["opackets"] - stats[i]["ipackets"]
+ print(f"packets lost from {i} --> {j}: {lost_r} pkts")
+ print(f"packets lost from {j} --> {i}: {lost_l} pkts")
# Stats index is not a port number, but "pgid".
+ # We will take latency read from only first link.
if latency:
- lat_obj = stats[u"latency"][0][u"latency"]
+ lat_obj = stats["latency"][0]["latency"]
lat_a = fmt_latency(
- str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
- str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))
+ str(lat_obj["total_min"]), str(lat_obj["average"]),
+ str(lat_obj["total_max"]), str(lat_obj["hdrh"]))
+ # Do not bother with the other dir latency if unidir.
if traffic_directions > 1:
- lat_obj = stats[u"latency"][1][u"latency"]
+ lat_obj = stats["latency"][1]["latency"]
lat_b = fmt_latency(
- str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
- str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))
-
- if traffic_directions > 1:
- total_sent = stats[0][u"opackets"] + stats[1][u"opackets"]
- total_rcvd = stats[0][u"ipackets"] + stats[1][u"ipackets"]
- else:
- total_sent = stats[port_0][u"opackets"]
- total_rcvd = stats[port_1][u"ipackets"]
+ str(lat_obj["total_min"]), str(lat_obj["average"]),
+ str(lat_obj["total_max"]), str(lat_obj["hdrh"]))
- print(f"\npackets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ total_rcvd = stats["total"]["ipackets"]
+ total_sent = stats["total"]["opackets"]
except STLError:
- print(u"T-Rex STL runtime error!", file=sys.stderr)
+ print("T-Rex STL runtime error!", file=sys.stderr)
raise
finally:
@@ -259,7 +260,7 @@ def simple_burst(
f"rate={rate!r}; "
f"total_received={total_rcvd}; "
f"total_sent={total_sent}; "
- f"frame_loss={lost_a + lost_b}; "
+ f"frame_loss={total_sent - total_rcvd}; "
f"target_duration={duration!r}; "
f"approximated_duration={approximated_duration!r}; "
f"latency_stream_0(usec)={lat_a}; "
@@ -275,44 +276,44 @@ def main():
"""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"-p", u"--profile", required=True, type=str,
- help=u"Python traffic profile."
+ "-p", "--profile", required=True, type=str,
+ help="Python traffic profile."
)
parser.add_argument(
- u"-d", u"--duration", required=True, type=float,
- help=u"Duration of traffic run."
+ "-d", "--duration", required=True, type=float,
+ help="Duration of traffic run."
)
parser.add_argument(
- u"-s", u"--frame_size", required=True,
- help=u"Size of a Frame without padding and IPG."
+ "-s", "--frame_size", required=True,
+ help="Size of a Frame without padding and IPG."
)
parser.add_argument(
- u"-r", u"--rate", required=True,
- help=u"Traffic rate with included units (pps)."
+ "-r", "--rate", required=True,
+ help="Traffic rate with included units (pps)."
)
parser.add_argument(
- u"--port_0", required=True, type=int,
- help=u"Port 0 on the traffic generator."
+ "--ports", required=True, type=int, nargs="+",
+ help="Port list on the traffic generator."
)
parser.add_argument(
- u"--port_1", required=True, type=int,
- help=u"Port 1 on the traffic generator."
+ "--async_start", action="store_true", default=False,
+ help="Non-blocking call of the script."
)
parser.add_argument(
- u"--async_start", action=u"store_true", default=False,
- help=u"Non-blocking call of the script."
+ "--latency", action="store_true", default=False,
+ help="Add latency stream."
)
parser.add_argument(
- u"--latency", action=u"store_true", default=False,
- help=u"Add latency stream."
+ "--traffic_directions", type=int, default=2,
+ help="Send bi- (2) or uni- (1) directional traffic."
)
parser.add_argument(
- u"--traffic_directions", type=int, default=2,
- help=u"Send bi- (2) or uni- (1) directional traffic."
+ "--force", action="store_true", default=False,
+ help="Force start regardless of ports state."
)
parser.add_argument(
- u"--force", action=u"store_true", default=False,
- help=u"Force start regardless of ports state."
+ "--delay", required=True, type=float, default=0.0,
+ help="Delay assumed for traffic, sleep time is increased by this [s]."
)
args = parser.parse_args()
@@ -327,14 +328,14 @@ def main():
duration=args.duration,
framesize=framesize,
rate=args.rate,
- port_0=args.port_0,
- port_1=args.port_1,
+ ports=args.ports,
latency=args.latency,
async_start=args.async_start,
traffic_directions=args.traffic_directions,
force=args.force,
+ delay=args.delay,
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()
diff --git a/GPL/tools/trex/trex_stl_stop.py b/GPL/tools/trex/trex_stl_stop.py
index 9d09f28601..c03624ba24 100644
--- a/GPL/tools/trex/trex_stl_stop.py
+++ b/GPL/tools/trex/trex_stl_stop.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -30,11 +30,6 @@ Requirements:
- compiled and running T-REX process (eg. ./t-rex-64 -i)
- trex.stl.api library
- Script must be executed on a node with T-REX instance
-
-Functionality:
-1. Stop any running traffic
-2. Optionally restore reference counter values.
-3. Return conter differences.
"""
import argparse
@@ -44,68 +39,56 @@ import sys
from collections import OrderedDict # Needed to parse xstats representation.
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, "/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.stl.api import *
+from trex.stl.api import STLClient
def main():
"""Stop traffic if any is running. Report xstats."""
parser = argparse.ArgumentParser()
parser.add_argument(
- u"--xstat0", type=str, default=u"",
- help=u"Reference xstat object if any."
- )
- parser.add_argument(
- u"--xstat1", type=str, default=u"",
- help=u"Reference xstat object if any."
+ "--xstat", type=str, nargs="*", help="Reference xstat object if any."
)
args = parser.parse_args()
client = STLClient()
try:
- # connect to server
client.connect()
-
client.acquire(force=True)
- # TODO: Support unidirection.
- client.stop(ports=[0, 1])
+ client.stop()
+ xstats = list()
# Read the stats after the test,
# we need to update values before the last trial started.
- if args.xstat0:
- snapshot = eval(args.xstat0)
- client.ports[0].get_xstats().reference_stats = snapshot
- if args.xstat1:
- snapshot = eval(args.xstat1)
- client.ports[1].get_xstats().reference_stats = snapshot
- # Now we can call the official method to get differences.
- xstats0 = client.get_xstats(0)
- xstats1 = client.get_xstats(1)
-
- # If STLError happens, let the script fail with stack trace.
+ for i in range(len(client.ports)):
+ if args.xstat[i]:
+ snapshot = eval(args.xstat[i])
+ client.ports[i].get_xstats().reference_stats = snapshot
+ # Now we can call the official method to get differences.
+ xstats.append(client.get_xstats(i))
+ print(f"##### statistics port {i} #####")
+ print(json.dumps(xstats[i], indent=4, separators=(",", ": ")))
finally:
client.disconnect()
- print(u"##### statistics port 0 #####")
- print(json.dumps(xstats0, indent=4, separators=(u",", u": ")))
- print(u"##### statistics port 1 #####")
- print(json.dumps(xstats1, indent=4, separators=(u",", u": ")))
-
- tx_0, rx_0 = xstats0[u"tx_good_packets"], xstats0[u"rx_good_packets"]
- tx_1, rx_1 = xstats1[u"tx_good_packets"], xstats1[u"rx_good_packets"]
- lost_a, lost_b = tx_0 - rx_1, tx_1 - rx_0
+ for idx,stat in enumerate(zip(xstats[0::2], xstats[1::2])):
+ lost_r = stat[0]["tx_good_packets"] - stat[1]["rx_good_packets"]
+ lost_l = stat[1]["tx_good_packets"] - stat[0]["rx_good_packets"]
+ print(f"packets lost from {idx*2} --> {idx*2+1}: {lost_r} pkts")
+ print(f"packets lost from {idx*2+1} --> {idx*2}: {lost_l} pkts")
- print(f"\npackets lost from 0 --> 1: {lost_a} pkts")
- print(f"packets lost from 1 --> 0: {lost_b} pkts")
+ total_rcvd = 0
+ total_sent = 0
+ for stat in xstats:
+ total_rcvd += stat["rx_good_packets"]
+ total_sent += stat["tx_good_packets"]
- total_rcvd, total_sent = rx_0 + rx_1, tx_0 + tx_1
- total_lost = total_sent - total_rcvd
print(
f"rate='unknown'; "
f"total_received={total_rcvd}; "
f"total_sent={total_sent}; "
- f"frame_loss={total_lost}; "
+ f"frame_loss={total_sent - total_rcvd}; "
f"target_duration='manual'; "
f"approximated_duration='manual'; "
f"approximated_rate='unknown'; "
@@ -114,5 +97,5 @@ def main():
)
-if __name__ == u"__main__":
+if __name__ == "__main__":
main()