aboutsummaryrefslogtreecommitdiffstats
path: root/GPL/tools/trex/trex_astf_profile.py
diff options
context:
space:
mode:
Diffstat (limited to 'GPL/tools/trex/trex_astf_profile.py')
-rw-r--r--GPL/tools/trex/trex_astf_profile.py306
1 files changed, 155 insertions, 151 deletions
diff --git a/GPL/tools/trex/trex_astf_profile.py b/GPL/tools/trex/trex_astf_profile.py
index 6faa2c008f..44d81e92f9 100644
--- a/GPL/tools/trex/trex_astf_profile.py
+++ b/GPL/tools/trex/trex_astf_profile.py
@@ -1,6 +1,6 @@
#!/usr/bin/python3
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
@@ -34,9 +34,9 @@ import sys
import time
sys.path.insert(
- 0, u"/opt/trex-core-2.86/scripts/automation/trex_control_plane/interactive/"
+ 0, u"/opt/trex-core-3.03/scripts/automation/trex_control_plane/interactive/"
)
-from trex.astf.api import *
+from trex.astf.api import ASTFClient, ASTFProfile, TRexError
def fmt_latency(lat_min, lat_avg, lat_max, hdrh):
@@ -73,12 +73,14 @@ def simple_burst(
profile_file,
duration,
framesize,
+ n_data_frames,
multiplier,
port_0,
port_1,
latency,
async_start=False,
traffic_directions=2,
+ delay=0.0,
):
"""Send traffic and measure packet loss and latency.
@@ -98,8 +100,9 @@ def simple_burst(
Duration details:
Contrary to stateless mode, ASTF profiles typically limit the number
of flows/transactions that can happen.
- The caller is expected to set the duration parameter accordingly to
- this limit and multiplier, including any overheads.
+ The caller is expected to set the duration parameter to idealized value,
+ but set the delay arguments when TRex is expected
+ to finish processing replies later (including a window for latency).
See *_traffic_duration output fields for TRex's measurement
of the real traffic duration (should be without any inactivity overheads).
If traffic has not ended by the final time, the traffic
@@ -111,27 +114,27 @@ def simple_burst(
:param profile_file: A python module with T-rex traffic profile.
:param duration: Expected duration for all transactions to finish,
- assuming only tolerable duration stretching happens.
- This includes later start of later transactions
- (according to TPS multiplier) and expected duration of each transaction.
- Critically, this also includes any delay TRex shows when starting
- traffic (but not the similar delay during stopping).
+ without any TRex related delays, without even latency.
:param framesize: Frame size.
+ :param n_data_frames: Controls "size" of transaction for TPUT tests.
:param multiplier: Multiplier of profile CPS.
:param port_0: Port 0 on the traffic generator.
:param port_1: Port 1 on the traffic generator.
:param latency: With latency stats.
:param async_start: Start the traffic and exit.
:param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
+ :param delay: Time increase [s] for sleep duration.
:type profile_file: str
:type duration: float
:type framesize: int or str
+ :type n_data_frames: int
:type multiplier: int
:type port_0: int
:type port_1: int
:type latency: bool
:type async_start: bool
:type traffic_directions: int
+ :type delay: float
"""
client = None
total_received = 0
@@ -151,7 +154,11 @@ def simple_burst(
# TODO: key-values pairs to the profile file
# - ips ?
print(f"### Profile file:\n{profile_file}")
- profile = ASTFProfile.load(profile_file, framesize=framesize)
+ profile = ASTFProfile.load(
+ profile_file,
+ framesize=framesize,
+ n_data_frames=n_data_frames,
+ )
except TRexError:
print(f"Error while loading profile '{profile_file}'!")
raise
@@ -167,9 +174,7 @@ def simple_burst(
# Load the profile.
client.load_profile(profile)
- ports = [port_0]
- if traffic_directions > 1:
- ports.append(port_1)
+ ports = [port_0, port_1]
# Clear the stats before injecting.
lost_a = 0
@@ -179,25 +184,21 @@ def simple_burst(
# Choose CPS and start traffic.
client.start(
mult=multiplier,
- # Increase the input duration slightly,
- # to ensure it does not end before sleep&stop below happens.
- duration=duration + 0.1 if duration > 0 else duration,
+ duration=duration,
nc=True,
latency_pps=int(multiplier) if latency else 0,
client_mask=2**len(ports)-1,
)
- time_start = time.monotonic()
+ time_stop = time.monotonic() + duration + delay
if async_start:
# For async stop, we need to export the current snapshot.
xsnap0 = client.ports[port_0].get_xstats().reference_stats
print(f"Xstats snapshot 0: {xsnap0!r}")
- if traffic_directions > 1:
- xsnap1 = client.ports[port_1].get_xstats().reference_stats
- print(f"Xstats snapshot 1: {xsnap1!r}")
+ xsnap1 = client.ports[port_1].get_xstats().reference_stats
+ print(f"Xstats snapshot 1: {xsnap1!r}")
else:
- time.sleep(duration)
-
+ time.sleep(duration + delay)
# Do not block yet, the existing transactions may take long time
# to finish. We need an action that is almost reset(),
# but without clearing stats.
@@ -208,7 +209,7 @@ def simple_burst(
client.stop(block=True)
# Read the stats after the traffic stopped (or time up).
- stats[time.monotonic() - time_start] = client.get_stats(
+ stats[time.monotonic() - time_stop] = client.get_stats(
ports=ports
)
@@ -216,17 +217,15 @@ def simple_burst(
for warning in client.get_warnings():
print(warning)
- # Now finish the complete reset.
- client.reset()
+ # No profile cleanup here, reset will be done in the finally block.
print(u"##### Statistics #####")
print(json.dumps(stats, indent=4, separators=(u",", u": ")))
- approximated_duration = list(sorted(stats.keys()))[-1]
+ approximated_duration = duration + list(sorted(stats.keys()))[-1]
stats = stats[sorted(stats.keys())[-1]]
lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
- if traffic_directions > 1:
- lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
+ lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]
# TODO: Latency measurement not used at this phase. This part will
# be aligned in another commit.
@@ -246,131 +245,126 @@ def simple_burst(
str(lat_obj[u"max_usec"]), u"-")
lat_b_hist = str(lat_obj[u"histogram"])
- if traffic_directions > 1:
- total_sent = \
- stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
- total_received = \
- stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
- client_sent = stats[port_0][u"opackets"]
- client_received = stats[port_0][u"ipackets"]
- client_stats = stats[u"traffic"][u"client"]
- server_stats = stats[u"traffic"][u"server"]
- # Some zero counters are not sent
- # Active and established flows UDP/TCP
+ total_sent = \
+ stats[port_0][u"opackets"] + stats[port_1][u"opackets"]
+ total_received = \
+ stats[port_0][u"ipackets"] + stats[port_1][u"ipackets"]
+ client_sent = stats[port_0][u"opackets"]
+ client_received = stats[port_0][u"ipackets"]
+ client_stats = stats[u"traffic"][u"client"]
+ server_stats = stats[u"traffic"][u"server"]
+ # Some zero counters are not sent
+ # Active and established flows UDP/TCP
+ # Client
+ c_act_flows = client_stats[u"m_active_flows"]
+ c_est_flows = client_stats[u"m_est_flows"]
+ c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
+ l7_data = f"client_active_flows={c_act_flows}; "
+ l7_data += f"client_established_flows={c_est_flows}; "
+ l7_data += f"client_traffic_duration={c_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={c_err_rx_throttled}; "
+ # Number of client side flows that were not opened
+ # due to flow-table overflow
+ c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
+ l7_data += f"client_err_nf_throttled={c_err_nf_throttled}; "
+ # Too many flows
+ c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
+ l7_data += f"client_err_flow_overflow={c_err_flow_overflow}; "
+ # Server
+ s_act_flows = server_stats[u"m_active_flows"]
+ s_est_flows = server_stats[u"m_est_flows"]
+ s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
+ l7_data += f"server_active_flows={s_act_flows}; "
+ l7_data += f"server_established_flows={s_est_flows}; "
+ l7_data += f"server_traffic_duration={s_traffic_duration}; "
+ # Possible errors
+ # Too many packets in NIC rx queue
+ s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
+ l7_data += f"client_err_rx_throttled={s_err_rx_throttled}; "
+ if u"udp" in profile_file:
# Client
- c_act_flows = client_stats[u"m_active_flows"]
- c_est_flows = client_stats[u"m_est_flows"]
- c_traffic_duration = client_stats.get(u"m_traffic_duration", 0)
- l7_data = f"client_active_flows={c_act_flows}; "
- l7_data += f"client_established_flows={c_est_flows}; "
- l7_data += f"client_traffic_duration={c_traffic_duration}; "
- # Possible errors
- # Too many packets in NIC rx queue
- c_err_rx_throttled = client_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={c_err_rx_throttled}; "
- # Number of client side flows that were not opened
- # due to flow-table overflow
- c_err_nf_throttled = client_stats.get(u"err_c_nf_throttled", 0)
- l7_data += f"client_err_nf_throttled={c_err_nf_throttled}; "
- # Too many flows
- c_err_flow_overflow = client_stats.get(u"err_flow_overflow", 0)
- l7_data += f"client_err_flow_overflow={c_err_flow_overflow}; "
+ # Established connections
+ c_udp_connects = client_stats.get(u"udps_connects", 0)
+ l7_data += f"client_udp_connects={c_udp_connects}; "
+ # Closed connections
+ c_udp_closed = client_stats.get(u"udps_closed", 0)
+ l7_data += f"client_udp_closed={c_udp_closed}; "
+ # Sent bytes
+ c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}; "
+ # Sent packets
+ c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"client_udp_tx_packets={c_udp_sndpkt}; "
+ # Received bytes
+ c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}; "
+ # Received packets
+ c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}; "
+ # Keep alive drops
+ c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
+ l7_data += f"client_udp_keep_drops={c_udp_keepdrops}; "
+ # Client without flow
+ c_err_cwf = client_stats.get(u"err_cwf", 0)
+ l7_data += f"client_err_cwf={c_err_cwf}; "
# Server
- s_act_flows = server_stats[u"m_active_flows"]
- s_est_flows = server_stats[u"m_est_flows"]
- s_traffic_duration = server_stats.get(u"m_traffic_duration", 0)
- l7_data += f"server_active_flows={s_act_flows}; "
- l7_data += f"server_established_flows={s_est_flows}; "
- l7_data += f"server_traffic_duration={s_traffic_duration}; "
- # Possible errors
- # Too many packets in NIC rx queue
- s_err_rx_throttled = server_stats.get(u"err_rx_throttled", 0)
- l7_data += f"client_err_rx_throttled={s_err_rx_throttled}; "
- if u"udp" in profile_file:
- # Client
- # Established connections
- c_udp_connects = client_stats.get(u"udps_connects", 0)
- l7_data += f"client_udp_connects={c_udp_connects}; "
- # Closed connections
- c_udp_closed = client_stats.get(u"udps_closed", 0)
- l7_data += f"client_udp_closed={c_udp_closed}; "
- # Sent bytes
- c_udp_sndbyte = client_stats.get(u"udps_sndbyte", 0)
- l7_data += f"client_udp_tx_bytes={c_udp_sndbyte}; "
- # Sent packets
- c_udp_sndpkt = client_stats.get(u"udps_sndpkt", 0)
- l7_data += f"client_udp_tx_packets={c_udp_sndpkt}; "
- # Received bytes
- c_udp_rcvbyte = client_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"client_udp_rx_bytes={c_udp_rcvbyte}; "
- # Received packets
- c_udp_rcvpkt = client_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"client_udp_rx_packets={c_udp_rcvpkt}; "
- # Keep alive drops
- c_udp_keepdrops = client_stats.get(u"udps_keepdrops", 0)
- l7_data += f"client_udp_keep_drops={c_udp_keepdrops}; "
- # Client without flow
- c_err_cwf = client_stats.get(u"err_cwf", 0)
- l7_data += f"client_err_cwf={c_err_cwf}; "
- # Server
- # Accepted connections
- s_udp_accepts = server_stats.get(u"udps_accepts", 0)
- l7_data += f"server_udp_accepts={s_udp_accepts}; "
- # Closed connections
- s_udp_closed = server_stats.get(u"udps_closed", 0)
- l7_data += f"server_udp_closed={s_udp_closed}; "
- # Sent bytes
- s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
- l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}; "
- # Sent packets
- s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
- l7_data += f"server_udp_tx_packets={s_udp_sndpkt}; "
- # Received bytes
- s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
- l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}; "
- # Received packets
- s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
- l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}; "
- elif u"tcp" in profile_file:
- # Client
- # Connection attempts
- c_tcp_connattempt = client_stats.get(u"tcps_connattempt", 0)
- l7_data += f"client_tcp_connattempt={c_tcp_connattempt}; "
- # Established connections
- c_tcp_connects = client_stats.get(u"tcps_connects", 0)
- l7_data += f"client_tcp_connects={c_tcp_connects}; "
- # Closed connections
- c_tcp_closed = client_stats.get(u"tcps_closed", 0)
- l7_data += f"client_tcp_closed={c_tcp_closed}; "
- # Send bytes
- c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}; "
- # Received bytes
- c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}; "
- # Server
- # Accepted connections
- s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
- l7_data += f"server_tcp_accepts={s_tcp_accepts}; "
- # Established connections
- s_tcp_connects = server_stats.get(u"tcps_connects", 0)
- l7_data += f"server_tcp_connects={s_tcp_connects}; "
- # Closed connections
- s_tcp_closed = server_stats.get(u"tcps_closed", 0)
- l7_data += f"server_tcp_closed={s_tcp_closed}; "
- # Sent bytes
- s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
- l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}; "
- # Received bytes
- s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
- l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}; "
- else:
- total_sent = stats[port_0][u"opackets"]
- total_received = stats[port_1][u"ipackets"]
+ # Accepted connections
+ s_udp_accepts = server_stats.get(u"udps_accepts", 0)
+ l7_data += f"server_udp_accepts={s_udp_accepts}; "
+ # Closed connections
+ s_udp_closed = server_stats.get(u"udps_closed", 0)
+ l7_data += f"server_udp_closed={s_udp_closed}; "
+ # Sent bytes
+ s_udp_sndbyte = server_stats.get(u"udps_sndbyte", 0)
+ l7_data += f"server_udp_tx_bytes={s_udp_sndbyte}; "
+ # Sent packets
+ s_udp_sndpkt = server_stats.get(u"udps_sndpkt", 0)
+ l7_data += f"server_udp_tx_packets={s_udp_sndpkt}; "
+ # Received bytes
+ s_udp_rcvbyte = server_stats.get(u"udps_rcvbyte", 0)
+ l7_data += f"server_udp_rx_bytes={s_udp_rcvbyte}; "
+ # Received packets
+ s_udp_rcvpkt = server_stats.get(u"udps_rcvpkt", 0)
+ l7_data += f"server_udp_rx_packets={s_udp_rcvpkt}; "
+ elif u"tcp" in profile_file:
+ # Client
+ # Connection attempts
+ c_tcp_connattempt = client_stats.get(u"tcps_connattempt", 0)
+ l7_data += f"client_tcp_connattempt={c_tcp_connattempt}; "
+ # Established connections
+ c_tcp_connects = client_stats.get(u"tcps_connects", 0)
+ l7_data += f"client_tcp_connects={c_tcp_connects}; "
+ # Closed connections
+ c_tcp_closed = client_stats.get(u"tcps_closed", 0)
+ l7_data += f"client_tcp_closed={c_tcp_closed}; "
+ # Send bytes
+ c_tcp_sndbyte = client_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"client_tcp_tx_bytes={c_tcp_sndbyte}; "
+ # Received bytes
+ c_tcp_rcvbyte = client_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"client_tcp_rx_bytes={c_tcp_rcvbyte}; "
+ # Server
+ # Accepted connections
+ s_tcp_accepts = server_stats.get(u"tcps_accepts", 0)
+ l7_data += f"server_tcp_accepts={s_tcp_accepts}; "
+ # Established connections
+ s_tcp_connects = server_stats.get(u"tcps_connects", 0)
+ l7_data += f"server_tcp_connects={s_tcp_connects}; "
+ # Closed connections
+ s_tcp_closed = server_stats.get(u"tcps_closed", 0)
+ l7_data += f"server_tcp_closed={s_tcp_closed}; "
+ # Sent bytes
+ s_tcp_sndbyte = server_stats.get(u"tcps_sndbyte", 0)
+ l7_data += f"server_tcp_tx_bytes={s_tcp_sndbyte}; "
+ # Received bytes
+ s_tcp_rcvbyte = server_stats.get(u"tcps_rcvbyte", 0)
+ l7_data += f"server_tcp_rx_bytes={s_tcp_rcvbyte}; "
print(f"packets lost from {port_0} --> {port_1}: {lost_a} pkts")
- if traffic_directions > 1:
- print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
+ print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")
except TRexError:
print(u"T-Rex ASTF runtime error!", file=sys.stderr)
@@ -381,7 +375,7 @@ def simple_burst(
if async_start:
client.disconnect(stop_traffic=False, release_ports=True)
else:
- client.clear_profile()
+ client.reset()
client.disconnect()
print(
f"multiplier={multiplier!r}; "
@@ -419,6 +413,10 @@ def main():
help=u"Size of a Frame without padding and IPG."
)
parser.add_argument(
+ u"--n_data_frames", type=int, default=5,
+ help=u"Use this many data frames per transaction and direction (TPUT)."
+ )
+ parser.add_argument(
u"-m", u"--multiplier", required=True, type=float,
help=u"Multiplier of profile CPS."
)
@@ -442,6 +440,10 @@ def main():
u"--traffic_directions", type=int, default=2,
help=u"Send bi- (2) or uni- (1) directional traffic."
)
+ parser.add_argument(
+ u"--delay", required=True, type=float, default=0.0,
+ help=u"Allowed time overhead, sleep time is increased by this [s]."
+ )
args = parser.parse_args()
@@ -454,12 +456,14 @@ def main():
profile_file=args.profile,
duration=args.duration,
framesize=framesize,
+ n_data_frames=args.n_data_frames,
multiplier=args.multiplier,
port_0=args.port_0,
port_1=args.port_1,
latency=args.latency,
async_start=args.async_start,
traffic_directions=args.traffic_directions,
+ delay=args.delay,
)