summaryrefslogtreecommitdiffstats
path: root/scripts/automation/trex_control_plane/stl
diff options
context:
space:
mode:
authorHanoh Haim <hhaim@cisco.com>2016-03-10 19:32:29 +0200
committerHanoh Haim <hhaim@cisco.com>2016-03-10 19:32:29 +0200
commit71433c48afeddb37e3c5a8e134e701d71b09f869 (patch)
tree860cab39c447a426287d0c49a4c0da736297ba3b /scripts/automation/trex_control_plane/stl
parent2be2f7e96be26fbe6dd6763f2ec97fb248abb330 (diff)
parentf24d22eb359753255527430cb8a8b759a424a0df (diff)
merge doc
Diffstat (limited to 'scripts/automation/trex_control_plane/stl')
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py38
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py85
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py22
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py18
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py29
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py96
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py550
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py24
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py4
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py10
10 files changed, 659 insertions, 217 deletions
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
index f972b905..02b00b78 100644
--- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -8,6 +8,7 @@ from cStringIO import StringIO
from trex_stl_lib.utils.text_opts import *
from trex_stl_lib.utils import text_tables
+from trex_stl_lib import trex_stl_stats
# for STL exceptions
from trex_stl_lib.api import *
@@ -217,6 +218,35 @@ class TrexTUIPort(TrexTUIPanel):
self.stateless_client.clear_stats([self.port_id])
return "port {0}: cleared stats".format(self.port_id)
+
+
+# streams stats
+class TrexTUIStreamsStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
+
+ self.key_actions = OrderedDict()
+
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+
+
+ def show (self):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.SS_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.iteritems():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type)
+ pass
+
+
+ def get_key_actions (self):
+ return self.key_actions
+
+ def action_clear (self):
+ self.stateless_client.flow_stats.clear_stats()
+
+ return ""
+
+
# log
class TrexTUILog():
def __init__ (self):
@@ -247,10 +277,12 @@ class TrexTUIPanelManager():
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
+ self.panels['sstats'] = TrexTUIStreamsStats(self)
self.key_actions = OrderedDict()
self.key_actions['q'] = {'action': self.action_quit, 'legend': 'quit', 'show': True}
self.key_actions['g'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
+ self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams stats', 'show': True}
for port_id in self.ports:
self.key_actions[str(port_id)] = {'action': self.action_show_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
@@ -352,6 +384,10 @@ class TrexTUIPanelManager():
return action_show_port_x
+ def action_show_sstats (self):
+ self.main_panel = self.panels['sstats']
+ self.init(self.show_log)
+ return ""
# shows a textual top style window
class TrexTUI():
@@ -427,7 +463,7 @@ class TrexTUI():
elif self.state == self.STATE_RECONNECT:
try:
- self.stateless_client.connect("RO")
+ self.stateless_client.connect()
self.state = self.STATE_ACTIVE
except STLError:
self.state = self.STATE_LOST_CONT
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
index 3708834e..fa6e67c3 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
@@ -14,12 +14,14 @@ def rx_example (tx_port, rx_port, burst_size):
try:
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
-
total_pkts = burst_size
s1 = STLStream(name = 'rx',
packet = pkt,
flow_stats = STLFlowStats(pg_id = 5),
- mode = STLTXSingleBurst(total_pkts = total_pkts, bps_L2 = 250000000))
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ #pps = total_pkts
+ percentage = 80
+ ))
# connect to server
c.connect()
@@ -30,38 +32,14 @@ def rx_example (tx_port, rx_port, burst_size):
# add both streams to ports
c.add_streams([s1], ports = [tx_port])
- print "injecting {0} packets on port {1}\n".format(total_pkts, tx_port)
- c.clear_stats()
- c.start(ports = [tx_port])
- c.wait_on_traffic(ports = [tx_port])
-
- # no error check - just an example... should be 5
- flow_stats = c.get_stats()['flow_stats'][5]
-
- tx_pkts = flow_stats['tx_pkts'][tx_port]
- tx_bytes = flow_stats['tx_bytes'][tx_port]
- rx_pkts = flow_stats['rx_pkts'][rx_port]
-
- if tx_pkts != total_pkts:
- print "TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts)
- passed = False
- return
- else:
- print "TX pkts match - {0}".format(tx_pkts)
+ print "\ninjecting {0} packets on port {1}\n".format(total_pkts, tx_port)
- if tx_bytes != (total_pkts * pkt.get_pkt_len()):
- print "TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * len(pkt)))
- passed = False
- return
- else:
- print "TX bytes match - {0}".format(tx_bytes)
-
- if rx_pkts != total_pkts:
- print "RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts)
- passed = False
- return
- else:
- print "RX pkts match - {0}".format(rx_pkts)
+ for i in range(0, 10):
+ print "\nStarting iteration: {0}:".format(i)
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len())
+ if not rc:
+ passed = False
+ break
except STLError as e:
@@ -76,7 +54,46 @@ def rx_example (tx_port, rx_port, burst_size):
else:
print "\nTest has failed :-(\n"
+# RX one iteration
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+
+ c.clear_stats()
+
+ c.start(ports = [tx_port])
+ c.wait_on_traffic(ports = [tx_port])
+
+ flow_stats = c.get_stats()['flow_stats'].get(5)
+ if not flow_stats:
+ print "no flow stats available"
+ return False
+
+ tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
+
+ if tx_pkts != total_pkts:
+ print "TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts)
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print "TX pkts match - {0}".format(tx_pkts)
+
+ if tx_bytes != (total_pkts * pkt_len):
+ print "TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print "TX bytes match - {0}".format(tx_bytes)
+
+ if rx_pkts != total_pkts:
+ print "RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts)
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print "RX pkts match - {0}".format(rx_pkts)
+
+ return True
# run the tests
-rx_example(tx_port = 0, rx_port = 3, burst_size = 500000)
+rx_example(tx_port = 1, rx_port = 2, burst_size = 500000)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
index cc7691a3..94165614 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -4,6 +4,7 @@ from trex_stl_lib.api import *
import time
import json
from pprint import pprint
+import argparse
# IMIX test
# it maps the ports to sides
@@ -11,11 +12,11 @@ from pprint import pprint
# and attach it to both sides and inject
# at a certain rate for some time
# finally it checks that all packets arrived
-def imix_test ():
+def imix_test (server):
# create client
- c = STLClient()
+ c = STLClient(server = server)
passed = True
@@ -48,7 +49,7 @@ def imix_test ():
# choose rate and start traffic for 10 seconds on 5 mpps
duration = 10
- mult = "5mpps"
+ mult = "30%"
print "Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration)
c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
@@ -78,9 +79,9 @@ def imix_test ():
print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets)
print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0)
- print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0)
+ print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_1, lost_1)
- if (lost_0 == 0) and (lost_0 == 0):
+ if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc.
passed = True
else:
passed = False
@@ -95,10 +96,19 @@ def imix_test ():
if passed:
print "\nTest has passed :-)\n"
+ sys.exit(0)
else:
print "\nTest has failed :-(\n"
+ sys.exit(-1)
+parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
+parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+args = parser.parse_args()
# run the tests
-imix_test()
+imix_test(args.server)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
index 36103cae..ae6cb497 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -178,7 +178,8 @@ class CTRexAsyncClient():
self.connected = True
- rc = self.barrier()
+ # sync all stats data as a baseline from the server
+ rc = self.barrier(baseline = True)
if not rc:
self.disconnect()
return rc
@@ -245,9 +246,11 @@ class CTRexAsyncClient():
name = msg['name']
data = msg['data']
type = msg['type']
+ baseline = msg.get('baseline', False)
+
self.raw_snapshot[name] = data
- self.__dispatch(name, type, data)
+ self.__dispatch(name, type, data, baseline)
# closing of socket must be from the same thread
@@ -268,10 +271,11 @@ class CTRexAsyncClient():
return self.raw_snapshot
# dispatch the message to the right place
- def __dispatch (self, name, type, data):
+ def __dispatch (self, name, type, data, baseline):
+
# stats
if name == "trex-global":
- self.event_handler.handle_async_stats_update(data)
+ self.event_handler.handle_async_stats_update(data, baseline)
# events
elif name == "trex-event":
@@ -282,7 +286,7 @@ class CTRexAsyncClient():
self.handle_async_barrier(type, data)
elif name == "flow_stats":
- self.event_handler.handle_async_rx_stats_event(data)
+ self.event_handler.handle_async_rx_stats_event(data, baseline)
else:
pass
@@ -295,7 +299,7 @@ class CTRexAsyncClient():
# block on barrier for async channel
- def barrier(self, timeout = 5):
+ def barrier(self, timeout = 5, baseline = False):
# set a random key
key = random.getrandbits(32)
@@ -307,7 +311,7 @@ class CTRexAsyncClient():
while not self.async_barrier['ack']:
# inject
- rc = self.stateless_client._transmit("publish_now", params = {'key' : key})
+ rc = self.stateless_client._transmit("publish_now", params = {'key' : key, 'baseline': baseline})
if not rc:
return rc
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index 130fee2c..c7503ab0 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -155,12 +155,12 @@ class AsyncEventHandler(object):
pass
- def handle_async_rx_stats_event (self, data):
- self.client.flow_stats.update(data)
+ def handle_async_rx_stats_event (self, data, baseline):
+ self.client.flow_stats.update(data, baseline)
# handles an async stats update from the subscriber
- def handle_async_stats_update(self, dump_data):
+ def handle_async_stats_update(self, dump_data, baseline):
global_stats = {}
port_stats = {}
@@ -182,11 +182,11 @@ class AsyncEventHandler(object):
global_stats[key] = value
# update the general object with the snapshot
- self.client.global_stats.update(global_stats)
+ self.client.global_stats.update(global_stats, baseline)
# update all ports
for port_id, data in port_stats.iteritems():
- self.client.ports[port_id].port_stats.update(data)
+ self.client.ports[port_id].port_stats.update(data, baseline)
# dispatcher for server async events (port started, port stopped and etc.)
@@ -458,10 +458,12 @@ class STLClient(object):
self.server_version,
self.ports)
+ self.flow_stats = trex_stl_stats.CRxStats()
+
self.stats_generator = trex_stl_stats.CTRexInfoGenerator(self.global_stats,
- self.ports)
+ self.ports,
+ self.flow_stats)
- self.flow_stats = trex_stl_stats.CRxStats()
############# private functions - used by the class itself ###########
@@ -736,7 +738,7 @@ class STLClient(object):
# clear stats
- def __clear_stats(self, port_id_list, clear_global):
+ def __clear_stats(self, port_id_list, clear_global, clear_flow_stats):
for port_id in port_id_list:
self.ports[port_id].clear_stats()
@@ -744,6 +746,9 @@ class STLClient(object):
if clear_global:
self.global_stats.clear_stats()
+ if clear_flow_stats:
+ self.flow_stats.clear_stats()
+
self.logger.log_cmd("clearing stats on port(s) {0}:".format(port_id_list))
return RC
@@ -825,6 +830,7 @@ class STLClient(object):
self.ports[port_id].invalidate_stats()
self.global_stats.invalidate()
+ self.flow_stats.invalidate()
return RC_OK()
@@ -1697,7 +1703,7 @@ class STLClient(object):
@__api_check(False)
- def clear_stats (self, ports = None, clear_global = True):
+ def clear_stats (self, ports = None, clear_global = True, clear_flow_stats = True):
"""
clear stats on port(s)
@@ -1708,6 +1714,9 @@ class STLClient(object):
clear_global : bool
clear the global stats
+ clear_flow_stats : bool
+ clear the flow stats
+
:raises:
+ :exc:`STLError`
@@ -1721,7 +1730,7 @@ class STLClient(object):
raise STLArgumentError('clear_global', clear_global)
- rc = self.__clear_stats(ports, clear_global)
+ rc = self.__clear_stats(ports, clear_global, clear_flow_stats)
if not rc:
raise STLError(rc)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
index 59a047ec..eac12ebb 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
@@ -1,7 +1,7 @@
import random
import string
import struct
-import socket
+import socket
import json
import yaml
import binascii
@@ -50,13 +50,13 @@ def mac_str_to_num (mac_buffer):
def is_valid_ipv4(ip_addr):
"""
- return buffer in network order
+ return buffer in network order
"""
if type(ip_addr)==str and len(ip_addr) == 4:
return ip_addr
if type(ip_addr)==int :
- ip_addr = socket.inet_ntoa(struct.pack("!I", ip_addr))
+ ip_addr = socket.inet_ntoa(struct.pack("!I", ip_addr))
try:
return socket.inet_pton(socket.AF_INET, ip_addr)
@@ -81,7 +81,7 @@ def is_valid_ipv6(ipv6_addr):
class CTRexScriptsBase(object):
"""
- VM Script base class
+ VM Script base class
"""
def clone (self):
return copy.deepcopy(self)
@@ -105,7 +105,7 @@ class CTRexScFieldRangeValue(CTRexScFieldRangeBase):
"""
range of field value
"""
- def __init__(self, field_name,
+ def __init__(self, field_name,
field_type,
min_value,
max_value
@@ -135,7 +135,7 @@ class CTRexScIpv4SimpleRange(CTRexScFieldRangeBase):
class CTRexScIpv4TupleGen(CTRexScriptsBase):
"""
- range tuple
+ range tuple
"""
FLAGS_ULIMIT_FLOWS =1
@@ -157,7 +157,7 @@ class CTRexScIpv4TupleGen(CTRexScriptsBase):
class CTRexScTrimPacketSize(CTRexScriptsBase):
"""
- trim packet size. field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
+ trim packet size. field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
"""
def __init__(self,field_type="rand",min_pkt_size=None, max_pkt_size=None):
super(CTRexScTrimPacketSize, self).__init__()
@@ -174,7 +174,7 @@ class CTRexScTrimPacketSize(CTRexScriptsBase):
class CTRexScRaw(CTRexScriptsBase):
"""
- raw instructions
+ raw instructions
"""
def __init__(self,list_of_commands=None,split_by_field=None):
super(CTRexScRaw, self).__init__()
@@ -190,7 +190,7 @@ class CTRexScRaw(CTRexScriptsBase):
################################################################################################
-# VM raw instructions
+# VM raw instructions
################################################################################################
class CTRexVmInsBase(object):
@@ -283,7 +283,7 @@ class CTRexVmInsTupleGen(CTRexVmInsBase):
################################################################################################
-#
+#
class CTRexVmEngine(object):
def __init__(self):
@@ -294,7 +294,7 @@ class CTRexVmEngine(object):
self.ins=[]
self.split_by_var = ''
- # return as json
+ # return as json
def get_json (self):
inst_array = [];
# dump it as dict
@@ -352,7 +352,7 @@ class CTRexScapyPktUtl(object):
def _layer_offset(self, name, cnt = 0):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ return offset of layer e.g 'IP',1 will return offfset of layer ip:1
"""
save_cnt=cnt
for pkt in self.pkt_iter ():
@@ -367,7 +367,7 @@ class CTRexScapyPktUtl(object):
def layer_offset(self, name, cnt = 0):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ return offset of layer e.g 'IP',1 will return offfset of layer ip:1
"""
save_cnt=cnt
for pkt in self.pkt_iter ():
@@ -381,7 +381,7 @@ class CTRexScapyPktUtl(object):
def get_field_offet(self, layer, layer_cnt, field_name):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ return offset of layer e.g 'IP',1 will return offfset of layer ip:1
"""
t=self._layer_offset(layer,layer_cnt);
l_offset=t[1];
@@ -397,7 +397,7 @@ class CTRexScapyPktUtl(object):
def get_layer_offet_by_str(self, layer_des):
"""
- return layer offset by string
+ return layer offset by string
:parameters:
@@ -423,14 +423,14 @@ class CTRexScapyPktUtl(object):
def get_field_offet_by_str(self, field_des):
"""
- return field_des (offset,size) layer:cnt.field
- for example
+ return field_des (offset,size) layer:cnt.field
+ for example
802|1Q.vlan get 802.1Q->valn replace | with .
IP.src
IP:0.src (first IP.src like IP.src)
for example IP:1.src for internal IP
- return (offset, size) as tuple
+ return (offset, size) as tuple
"""
@@ -489,19 +489,19 @@ class CTRexVmDescBase(object):
def get_var_ref (self):
'''
virtual function return a ref var name
- '''
+ '''
return None
def get_var_name(self):
'''
virtual function return the varible name if exists
- '''
+ '''
return None
- def compile(self,parent):
+ def compile(self,parent):
'''
virtual function to take parent than has function name_to_offset
- '''
+ '''
pass;
@@ -565,12 +565,12 @@ class CTRexVmDescFlowVar(CTRexVmDescBase):
class CTRexVmDescFixIpv4(CTRexVmDescBase):
def __init__(self, offset):
super(CTRexVmDescFixIpv4, self).__init__()
- self.offset = offset; # could be a name of offset
+ self.offset = offset; # could be a name of offset
def get_obj (self):
return CTRexVmInsFixIpv4(self.offset);
- def compile(self,parent):
+ def compile(self,parent):
if type(self.offset)==str:
self.offset = parent._pkt_layer_offset(self.offset);
@@ -593,7 +593,7 @@ class CTRexVmDescWrFlowVar(CTRexVmDescBase):
def get_obj (self):
return CTRexVmInsWrFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.add_val,self.is_big)
- def compile(self,parent):
+ def compile(self,parent):
if type(self.pkt_offset)==str:
t=parent._name_to_offset(self.pkt_offset)
self.pkt_offset = t[0]
@@ -627,7 +627,7 @@ class CTRexVmDescWrMaskFlowVar(CTRexVmDescBase):
def get_obj (self):
return CTRexVmInsWrMaskFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.pkt_cast_size,self.mask,self.shift,self.add_value,self.is_big)
- def compile(self,parent):
+ def compile(self,parent):
if type(self.pkt_offset)==str:
t=parent._name_to_offset(self.pkt_offset)
self.pkt_offset = t[0]
@@ -680,7 +680,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
When path_relative_to_profile is a True load pcap file from a path relative to the profile
"""
- def __init__(self, pkt = None, pkt_buffer = None, vm = None, path_relative_to_profile = False, build_raw = True, remove_fcs = True):
+ def __init__(self, pkt = None, pkt_buffer = None, vm = None, path_relative_to_profile = False, build_raw = False, remove_fcs = True):
"""
Instantiate a CTRexPktBuilder object
@@ -737,7 +737,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
"""
super(CScapyTRexPktBuilder, self).__init__()
- self.pkt = None # as input
+ self.pkt = None # as input
self.pkt_raw = None # from raw pcap file
self.vm_scripts = [] # list of high level instructions
self.vm_low_level = None
@@ -745,7 +745,8 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
self.metadata=""
self.path_relative_to_profile = path_relative_to_profile
self.remove_fcs = remove_fcs
-
+ self.is_binary_source = pkt_buffer != None
+
if pkt != None and pkt_buffer != None:
raise CTRexPacketBuildException(-15, "packet builder cannot be provided with both pkt and pkt_buffer")
@@ -778,7 +779,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
def get_vm_data(self):
"""
- Dumps the instructions
+ Dumps the instructions
:parameters:
None
@@ -792,7 +793,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
assert self.vm_low_level is not None, 'vm_low_level is None, please use compile()'
- return self.vm_low_level.get_json()
+ return self.vm_low_level.get_json()
def dump_pkt(self, encode = True):
"""
@@ -816,7 +817,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
return {'binary': base64.b64encode(pkt_buf) if encode else pkt_buf,
'meta': self.metadata}
-
+
def dump_pkt_to_pcap(self, file_path):
wrpcap(file_path, self._get_pkt_as_str())
@@ -852,7 +853,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
def set_pcap_file (self, pcap_file):
"""
- load raw pcap file into a buffer. load only the first packet
+ load raw pcap file into a buffer. load only the first packet
:parameters:
pcap_file : file_name
@@ -898,7 +899,9 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
else:
raise CTRexPacketBuildException(-14, "bad packet" )
- def is_def_src_mac (self):
+ def is_default_src_mac (self):
+ if self.is_binary_source:
+ return True
p = self.pkt
if isinstance(p, Packet):
if isinstance(p,Ether):
@@ -906,7 +909,9 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
return False
return True
- def is_def_dst_mac (self):
+ def is_default_dst_mac (self):
+ if self.is_binary_source:
+ return True
p = self.pkt
if isinstance(p, Packet):
if isinstance(p,Ether):
@@ -918,7 +923,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
if self.pkt == None and self.pkt_raw == None:
raise CTRexPacketBuildException(-14, "Packet is empty")
-
+
self.vm_low_level = CTRexVmEngine()
# compile the VM
@@ -935,7 +940,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
raise CTRexPacketBuildException(-14, "Packet is empty")
####################################################
- # private
+ # private
def _get_pcap_file_path (self,pcap_file_name):
@@ -944,7 +949,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
f_path = pcap_file_name
else:
if self.path_relative_to_profile:
- p = self._get_path_relative_to_profile () # loader
+ p = self._get_path_relative_to_profile () # loader
if p :
f_path=os.path.abspath(os.path.join(os.path.dirname(p),pcap_file_name))
@@ -960,7 +965,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
def _compile_raw (self,obj):
- # make sure we have varibles once
+ # make sure we have varibles once
vars={};
# add it add var to dit
@@ -979,17 +984,17 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
var_name = desc.get_var_ref()
if var_name :
if not vars.has_key(var_name):
- raise CTRexPacketBuildException(-11,("variable %s does not exists ") % (var_name) );
+ raise CTRexPacketBuildException(-11,("variable %s does not exists ") % (var_name) );
desc.compile(self);
for desc in obj.commands:
self.vm_low_level.add_ins(desc.get_obj());
# set split_by_var
- if obj.split_by_field :
+ if obj.split_by_field :
assert type(obj.split_by_field)==str, "type of split by var should be string"
#if not vars.has_key(obj.split_by_field):
- # raise CTRexPacketBuildException(-11,("variable %s does not exists. change split_by_var args ") % (var_name) );
+ # raise CTRexPacketBuildException(-11,("variable %s does not exists. change split_by_var args ") % (var_name) );
self.vm_low_level.split_by_var = obj.split_by_field
@@ -1008,12 +1013,11 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
# regular scapy packet
elif not self.pkt:
# should not reach here
- raise CTRexPacketBuildException(-11, 'empty packet')
+ raise CTRexPacketBuildException(-11, 'empty packet')
if self.remove_fcs and self.pkt.lastlayer().name == 'Padding':
self.pkt.lastlayer().underlayer.remove_payload()
- if len(self.pkt) < 60: # simulator can write padding with non-zeros, set it explicit
- self.pkt /= Padding('\x00' * (60 - len(self.pkt)))
+
self.pkt.build()
self.is_pkt_built = True
@@ -1036,7 +1040,7 @@ class CScapyTRexPktBuilder(CTrexPktBuilderInterface):
return str(self.pkt)
if self.pkt_raw:
return self.pkt_raw
- raise CTRexPacketBuildException(-11, 'empty packet');
+ raise CTRexPacketBuildException(-11, 'empty packet');
def _add_tuple_gen(self,tuple_gen):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
index c2e318bc..bb877586 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -13,15 +13,45 @@ import re
import math
import copy
import threading
+import pprint
GLOBAL_STATS = 'g'
PORT_STATS = 'p'
PORT_STATUS = 'ps'
-ALL_STATS_OPTS = {GLOBAL_STATS, PORT_STATS, PORT_STATUS}
+STREAMS_STATS = 's'
+
+ALL_STATS_OPTS = {GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS}
COMPACT = {GLOBAL_STATS, PORT_STATS}
+SS_COMPAT = {GLOBAL_STATS, STREAMS_STATS}
ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
+# deep mrege of dicts dst = src + dst
+def deep_merge_dicts (dst, src):
+ for k, v in src.iteritems():
+ # if not exists - deep copy it
+ if not k in dst:
+ dst[k] = copy.deepcopy(v)
+ else:
+ if isinstance(v, dict):
+ deep_merge_dicts(dst[k], v)
+
+# BPS L1 from pps and BPS L2
+def calc_bps_L1 (bps, pps):
+ if (pps == 0) or (bps == 0):
+ return 0
+
+ factor = bps / (pps * 8.0)
+ return bps * ( 1 + (20 / factor) )
+#
+
+def is_intable (value):
+ try:
+ int(value)
+ return True
+ except ValueError:
+ return False
+
# use to calculate diffs relative to the previous values
# for example, BW
def calculate_diff (samples):
@@ -66,18 +96,23 @@ class CTRexInfoGenerator(object):
STLClient and the ports.
"""
- def __init__(self, global_stats_ref, ports_dict_ref):
+ def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref):
self._global_stats = global_stats_ref
self._ports_dict = ports_dict_ref
+ self._rx_stats_ref = rx_stats_ref
def generate_single_statistic(self, port_id_list, statistic_type):
if statistic_type == GLOBAL_STATS:
return self._generate_global_stats()
+
elif statistic_type == PORT_STATS:
return self._generate_port_stats(port_id_list)
- pass
+
elif statistic_type == PORT_STATUS:
return self._generate_port_status(port_id_list)
+
+ elif statistic_type == STREAMS_STATS:
+ return self._generate_streams_stats()
else:
# ignore by returning empty object
return {}
@@ -110,6 +145,90 @@ class CTRexInfoGenerator(object):
return {"global_statistics": ExportableStats(stats_data, stats_table)}
+ def _generate_streams_stats (self):
+
+ streams_keys, sstats_data = self._rx_stats_ref.generate_stats()
+ stream_count = len(streams_keys)
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([10] + [17] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+
+ stats_table.add_rows([[k] + v
+ for k, v in sstats_data.iteritems()],
+ header=False)
+
+ header = ["PG ID"] + [key for key in streams_keys]
+ stats_table.header(header)
+
+ return {"streams_statistics": ExportableStats(sstats_data, stats_table)}
+
+
+
+ per_stream_stats = OrderedDict([("owner", []),
+ ("state", []),
+ ("--", []),
+ ("Tx bps L2", []),
+ ("Tx bps L1", []),
+ ("Tx pps", []),
+ ("Line Util.", []),
+
+ ("---", []),
+ ("Rx bps", []),
+ ("Rx pps", []),
+
+ ("----", []),
+ ("opackets", []),
+ ("ipackets", []),
+ ("obytes", []),
+ ("ibytes", []),
+ ("tx-bytes", []),
+ ("rx-bytes", []),
+ ("tx-pkts", []),
+ ("rx-pkts", []),
+
+ ("-----", []),
+ ("oerrors", []),
+ ("ierrors", []),
+
+ ]
+ )
+
+ total_stats = CPortStats(None)
+
+ for port_obj in relevant_ports:
+ # fetch port data
+ port_stats = port_obj.generate_port_stats()
+
+ total_stats += port_obj.port_stats
+
+ # populate to data structures
+ return_stats_data[port_obj.port_id] = port_stats
+ self.__update_per_field_dict(port_stats, per_field_stats)
+
+ total_cols = len(relevant_ports)
+ header = ["port"] + [port.port_id for port in relevant_ports]
+
+ if (total_cols > 1):
+ self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
+ header += ['total']
+ total_cols += 1
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * total_cols)
+ stats_table.set_cols_width([10] + [17] * total_cols)
+ stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
+
+ stats_table.add_rows([[k] + v
+ for k, v in per_field_stats.iteritems()],
+ header=False)
+
+ stats_table.header(header)
+
+ return {"streams_statistics": ExportableStats(return_stats_data, stats_table)}
+
+
def _generate_port_stats(self, port_id_list):
relevant_ports = self.__get_relevant_ports(port_id_list)
@@ -131,10 +250,10 @@ class CTRexInfoGenerator(object):
("ipackets", []),
("obytes", []),
("ibytes", []),
- ("tx_bytes", []),
- ("rx_bytes", []),
- ("tx_pkts", []),
- ("rx_pkts", []),
+ ("tx-bytes", []),
+ ("rx-bytes", []),
+ ("tx-pkts", []),
+ ("rx-pkts", []),
("-----", []),
("oerrors", []),
@@ -284,97 +403,94 @@ class CTRexStats(object):
self.last_update_ts = time.time()
self.history = deque(maxlen = 10)
self.lock = threading.Lock()
+ self.has_baseline = False
- def __getitem__(self, item):
- # override this to allow quick and clean access to fields
- if not item in self.latest_stats:
- return "N/A"
-
- # item must exist
- m = re.search('_(([a-z])ps)$', item)
- if m:
- # this is a non-relative item
- unit = m.group(2)
- if unit == "b":
- return self.get(item, format=True, suffix="b/sec")
- elif unit == "p":
- return self.get(item, format=True, suffix="pkt/sec")
- else:
- return self.get(item, format=True, suffix=m.group(1))
-
- m = re.search('^[i|o](a-z+)$', item)
- if m:
- # this is a non-relative item
- type = m.group(1)
- if type == "bytes":
- return self.get_rel(item, format=True, suffix="B")
- elif type == "packets":
- return self.get_rel(item, format=True, suffix="pkts")
- else:
- # do not format with suffix
- return self.get_rel(item, format=True)
-
- # can't match to any known pattern, return N/A
- return "N/A"
+ ######## abstract methods ##########
+ # get stats for user / API
+ def get_stats (self):
+ raise NotImplementedError()
+ # generate format stats (for TUI)
def generate_stats(self):
- # must be implemented by designated classes (such as port/ global stats)
raise NotImplementedError()
- def generate_extended_values (self, snapshot):
+ # called when a snapshot arrives - add more fields
+ def _update (self, snapshot, baseline):
raise NotImplementedError()
- def update(self, snapshot):
-
- # some extended generated values (from base values)
- self.generate_extended_values(snapshot)
-
- # update
- self.latest_stats = snapshot
+ ######## END abstract methods ##########
- with self.lock:
- self.history.append(snapshot)
+ def update(self, snapshot, baseline):
- diff_time = time.time() - self.last_update_ts
+ # no update is valid before baseline
+ if not self.has_baseline and not baseline:
+ return
- # 3 seconds is too much - this is the new reference
- if (not self.reference_stats) or (diff_time > 3):
- self.reference_stats = self.latest_stats
+ # call the underlying method
+ rc = self._update(snapshot)
+ if not rc:
+ return
-
+ # sync one time
+ if not self.has_baseline and baseline:
+ self.reference_stats = copy.deepcopy(self.latest_stats)
+ self.has_baseline = True
- self.last_update_ts = time.time()
+ # save history
+ with self.lock:
+ self.history.append(self.latest_stats)
def clear_stats(self):
- self.reference_stats = self.latest_stats
+ self.reference_stats = copy.deepcopy(self.latest_stats)
def invalidate (self):
self.latest_stats = {}
+
+ def _get (self, src, field, default = None):
+ if isinstance(field, list):
+ # deep
+ value = src
+ for level in field:
+ if not level in value:
+ return default
+ value = value[level]
+ else:
+ # flat
+ if not field in src:
+ return default
+ value = src[field]
+
+ return value
+
def get(self, field, format=False, suffix=""):
- if not field in self.latest_stats:
+ value = self._get(self.latest_stats, field)
+ if value == None:
return "N/A"
- if not format:
- return self.latest_stats[field]
- else:
- return format_num(self.latest_stats[field], suffix)
+
+ return value if not format else format_num(value, suffix)
+
def get_rel(self, field, format=False, suffix=""):
- if not field in self.latest_stats:
+
+ ref_value = self._get(self.reference_stats, field)
+ latest_value = self._get(self.latest_stats, field)
+
+ # latest value is an aggregation - must contain the value
+ if latest_value == None:
return "N/A"
- if not format:
- if not field in self.reference_stats:
- print "REF: " + str(self.reference_stats)
- print "BASE: " + str(self.latest_stats)
+ if ref_value == None:
+ ref_value = 0
+
+ value = latest_value - ref_value
+
+ return value if not format else format_num(value, suffix)
- return (self.latest_stats[field] - self.reference_stats[field])
- else:
- return format_num(self.latest_stats[field] - self.reference_stats[field], suffix)
# get trend for a field
def get_trend (self, field, use_raw = False, percision = 10.0):
@@ -458,18 +574,19 @@ class CGlobalStats(CTRexStats):
return stats
- def generate_extended_values (self, snapshot):
+
+ def _update(self, snapshot):
# L1 bps
bps = snapshot.get("m_tx_bps")
pps = snapshot.get("m_tx_pps")
- if pps > 0:
- avg_pkt_size = bps / (pps * 8.0)
- bps_L1 = bps * ( (avg_pkt_size + 20.0) / avg_pkt_size )
- else:
- bps_L1 = 0.0
+ snapshot['m_tx_bps_L1'] = calc_bps_L1(bps, pps)
+
+
+ # simple...
+ self.latest_stats = snapshot
- snapshot['m_tx_bps_L1'] = bps_L1
+ return True
def generate_stats(self):
@@ -568,20 +685,22 @@ class CPortStats(CTRexStats):
return stats
- def generate_extended_values (self, snapshot):
+
+ def _update(self, snapshot):
+
# L1 bps
bps = snapshot.get("m_total_tx_bps")
pps = snapshot.get("m_total_tx_pps")
- if pps > 0:
- avg_pkt_size = bps / (pps * 8.0)
- bps_L1 = bps * ( (avg_pkt_size + 20.0) / avg_pkt_size )
- else:
- bps_L1 = 0.0
-
+ bps_L1 = calc_bps_L1(bps, pps)
snapshot['m_total_tx_bps_L1'] = bps_L1
snapshot['m_percentage'] = (bps_L1 / self._port_obj.get_speed_bps()) * 100
+ # simple...
+ self.latest_stats = snapshot
+
+ return True
+
def generate_stats(self):
@@ -627,10 +746,10 @@ class CPortStats(CTRexStats):
"obytes" : self.get_rel("obytes"),
"ibytes" : self.get_rel("ibytes"),
- "tx_bytes": self.get_rel("obytes", format = True, suffix = "B"),
- "rx_bytes": self.get_rel("ibytes", format = True, suffix = "B"),
- "tx_pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
- "rx_pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
+ "tx-bytes": self.get_rel("obytes", format = True, suffix = "B"),
+ "rx-bytes": self.get_rel("ibytes", format = True, suffix = "B"),
+ "tx-pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
+ "rx-pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
"oerrors" : format_num(self.get_rel("oerrors"),
compact = False,
@@ -643,33 +762,260 @@ class CPortStats(CTRexStats):
}
-class CRxStats(object):
+
+
+# RX stats objects - COMPLEX :-(
+class CRxStats(CTRexStats):
def __init__(self):
- self.flow_stats = {}
+ super(CRxStats, self).__init__()
+
+
+ # calculates a diff between previous snapshot
+ # and current one
+ def calculate_diff_sec (self, current, prev):
+ if not 'ts' in current:
+ raise ValueError("INTERNAL ERROR: RX stats snapshot MUST contain 'ts' field")
+
+ if prev:
+ prev_ts = prev['ts']
+ now_ts = current['ts']
+ diff_sec = (now_ts['value'] - prev_ts['value']) / float(now_ts['freq'])
+ else:
+ diff_sec = 0.0
+
+ return diff_sec
+
+
+ # this is the heart of the complex
+ def process_single_pg (self, current_pg, prev_pg):
+
+ # start with the previous PG
+ output = copy.deepcopy(prev_pg)
+
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
+ # is in the first time ? (nothing in prev)
+ if not field in output:
+ output[field] = {}
+
+ # does the current snapshot has this field ?
+ if field in current_pg:
+ for port, pv in current_pg[field].iteritems():
+ if not is_intable(port):
+ continue
+
+ output[field][port] = pv
+
+ # sum up
+ total = None
+ for port, pv in output[field].iteritems():
+ if not is_intable(port):
+ continue
+ if total is None:
+ total = 0
+ total += pv
+
+ output[field]['total'] = total
+
+
+ return output
+
+
+ def process_snapshot (self, current, prev):
+
+ # final output
+ output = {}
+
+ # copy timestamp field
+ output['ts'] = current['ts']
+
+ # aggregate all the PG ids (previous and current)
+ pg_ids = filter(is_intable, set(prev.keys() + current.keys()))
+
+ for pg_id in pg_ids:
+
+ current_pg = current.get(pg_id, {})
+
+ # first time - we do not care
+ if current_pg.get('first_time'):
+ # new value - ignore history
+ output[pg_id] = self.process_single_pg(current_pg, {})
+ self.reference_stats[pg_id] = {}
+
+ # 'dry' B/W
+ self.calculate_bw_for_pg(output[pg_id])
+ else:
+ # aggregate the two values
+ prev_pg = prev.get(pg_id, {})
+ output[pg_id] = self.process_single_pg(current_pg, prev_pg)
+
+ # calculate B/W
+ diff_sec = self.calculate_diff_sec(current, prev)
+ self.calculate_bw_for_pg(output[pg_id], prev_pg, diff_sec)
+
+
+ return output
+
+
+
+ def calculate_bw_for_pg (self, pg_current, pg_prev = None, diff_sec = 0.0):
+
+ # if no previous values - its None
+ if (pg_prev == None) or not (diff_sec > 0):
+ pg_current['tx_pps'] = None
+ pg_current['tx_bps'] = None
+ pg_current['tx_bps_L1'] = None
+ pg_current['rx_pps'] = None
+ pg_current['rx_bps'] = None
+ return
+
+
+ # read the current values
+ now_tx_pkts = pg_current['tx_pkts']['total']
+ now_tx_bytes = pg_current['tx_bytes']['total']
+ now_rx_pkts = pg_current['rx_pkts']['total']
+ now_rx_bytes = pg_current['rx_bytes']['total']
+
+ # prev values
+ prev_tx_pkts = pg_prev['tx_pkts']['total']
+ prev_tx_bytes = pg_prev['tx_bytes']['total']
+ prev_rx_pkts = pg_prev['rx_pkts']['total']
+ prev_rx_bytes = pg_prev['rx_bytes']['total']
+
+ # prev B/W
+ prev_tx_pps = pg_prev['tx_pps']
+ prev_tx_bps = pg_prev['tx_bps']
+ prev_rx_pps = pg_prev['rx_pps']
+ prev_rx_bps = pg_prev['rx_bps']
+
+
+ #assert(now_tx_pkts >= prev_tx_pkts)
+ pg_current['tx_pps'] = self.calc_pps(prev_tx_pps, now_tx_pkts, prev_tx_pkts, diff_sec)
+ pg_current['tx_bps'] = self.calc_bps(prev_tx_bps, now_tx_bytes, prev_tx_bytes, diff_sec)
+ pg_current['rx_pps'] = self.calc_pps(prev_rx_pps, now_rx_pkts, prev_rx_pkts, diff_sec)
+ pg_current['rx_bps'] = self.calc_bps(prev_rx_bps, now_rx_bytes, prev_rx_bytes, diff_sec)
+
+ if pg_current['tx_bps'] != None and pg_current['tx_pps'] != None:
+ pg_current['tx_bps_L1'] = calc_bps_L1(pg_current['tx_bps'], pg_current['tx_pps'])
+ else:
+ pg_current['tx_bps_L1'] = None
+
+
+ def calc_pps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, False)
+
+
+ def calc_bps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, True)
+
+
+ def calc_bw (self, prev_bw, now, prev, diff_sec, is_bps):
+ # B/W is not valid when the values are None
+ if (now is None) or (prev is None):
+ return None
+
+ # calculate the B/W for current snapshot
+ current_bw = (now - prev) / diff_sec
+ if is_bps:
+ current_bw *= 8
+
+ # previous B/W is None ? ignore it
+ if prev_bw is None:
+ prev_bw = 0
- def update (self, snapshot):
- self.flow_stats = snapshot
+ return ( (0.5 * prev_bw) + (0.5 * current_bw) )
+
+
+ def _update (self, snapshot):
+
+ # generate a new snapshot
+ new_snapshot = self.process_snapshot(snapshot, self.latest_stats)
+
+ #print new_snapshot
+ # advance
+ self.latest_stats = new_snapshot
+
+
+ return True
+
+
+
+ # for API
def get_stats (self):
stats = {}
- for pg_id, pg_id_data in self.flow_stats.iteritems():
- # ignore non pg ID keys
- try:
- pg_id = int(pg_id)
- except ValueError:
+
+ for pg_id, value in self.latest_stats.iteritems():
+ # skip non ints
+ if not is_intable(pg_id):
continue
- # handle pg id
- stats[pg_id] = {}
- for field, per_port_data in pg_id_data.iteritems():
- stats[pg_id][field] = {}
- for port, value in per_port_data.iteritems():
- stats[pg_id][field][int(port)] = value
+ stats[int(pg_id)] = {}
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts']:
+ stats[int(pg_id)][field] = {'total': self.get_rel([pg_id, field, 'total'])}
+
+ for port, pv in value[field].iteritems():
+ try:
+ int(port)
+ except ValueError:
+ continue
+ stats[int(pg_id)][field][int(port)] = self.get_rel([pg_id, field, port])
return stats
+
+ def generate_stats (self):
+
+ # for TUI - maximum 4
+ pg_ids = filter(is_intable, self.latest_stats.keys())[:4]
+ cnt = len(pg_ids)
+
+ formatted_stats = OrderedDict([ ('Tx pps', []),
+ ('Tx bps L2', []),
+ ('Tx bps L1', []),
+ ('---', [''] * cnt),
+ ('Rx pps', []),
+ ('Rx bps', []),
+ ('----', [''] * cnt),
+ ('opackets', []),
+ ('ipackets', []),
+ ('obytes', []),
+ ('ibytes', []),
+ ('-----', [''] * cnt),
+ ('tx_pkts', []),
+ ('rx_pkts', []),
+ ('tx_bytes', []),
+ ('rx_bytes', [])
+ ])
+
+
+
+ # maximum 4
+ for pg_id in pg_ids:
+
+ formatted_stats['Tx pps'].append(self.get([pg_id, 'tx_pps'], format = True, suffix = "pps"))
+ formatted_stats['Tx bps L2'].append(self.get([pg_id, 'tx_bps'], format = True, suffix = "bps"))
+
+ formatted_stats['Tx bps L1'].append(self.get([pg_id, 'tx_bps_L1'], format = True, suffix = "bps"))
+
+ formatted_stats['Rx pps'].append(self.get([pg_id, 'rx_pps'], format = True, suffix = "pps"))
+ formatted_stats['Rx bps'].append(self.get([pg_id, 'rx_bps'], format = True, suffix = "bps"))
+
+ formatted_stats['opackets'].append(self.get_rel([pg_id, 'tx_pkts', 'total']))
+ formatted_stats['ipackets'].append(self.get_rel([pg_id, 'rx_pkts', 'total']))
+ formatted_stats['obytes'].append(self.get_rel([pg_id, 'tx_bytes', 'total']))
+ formatted_stats['ibytes'].append(self.get_rel([pg_id, 'rx_bytes', 'total']))
+ formatted_stats['tx_bytes'].append(self.get_rel([pg_id, 'tx_bytes', 'total'], format = True, suffix = "B"))
+ formatted_stats['rx_bytes'].append(self.get_rel([pg_id, 'rx_bytes', 'total'], format = True, suffix = "B"))
+ formatted_stats['tx_pkts'].append(self.get_rel([pg_id, 'tx_pkts', 'total'], format = True, suffix = "pkts"))
+ formatted_stats['rx_pkts'].append(self.get_rel([pg_id, 'rx_pkts', 'total'], format = True, suffix = "pkts"))
+
+
+
+ return pg_ids, formatted_stats
+
if __name__ == "__main__":
pass
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
index e0b25b1d..d582b499 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
@@ -13,21 +13,30 @@ def stl_map_ports (client, ports = None):
# generate streams
base_pkt = CScapyTRexPktBuilder(pkt = Ether()/IP())
-
+
+ # send something initial to calm down switches with arps etc.
+ stream = STLStream(packet = base_pkt,
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = 1))
+ client.add_streams(stream, ports)
+
+ client.start(ports, mult = "50%")
+ client.wait_on_traffic(ports)
+ client.reset(ports)
+
tx_pkts = {}
pkts = 1
for port in ports:
tx_pkts[pkts] = port
stream = STLStream(packet = base_pkt,
- mode = STLTXSingleBurst(pps = 100000, total_pkts = pkts))
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = pkts * 3))
client.add_streams(stream, [port])
- pkts = pkts * 2
+ pkts *= 2
# inject
client.clear_stats()
- client.start(ports, mult = "1mpps")
+ client.start(ports, mult = "50%")
client.wait_on_traffic(ports)
stats = client.get_stats()
@@ -40,7 +49,7 @@ def stl_map_ports (client, ports = None):
# actual mapping
for port in ports:
- ipackets = stats[port]["ipackets"]
+ ipackets = int(round(stats[port]["ipackets"] / 3.0)) # majority out of 3 to clean random noises
table['map'][port] = None
for pkts in tx_pkts.keys():
@@ -48,7 +57,6 @@ def stl_map_ports (client, ports = None):
tx_port = tx_pkts[pkts]
table['map'][port] = tx_port
-
unmapped = list(ports)
while len(unmapped) > 0:
port_a = unmapped.pop(0)
@@ -57,7 +65,9 @@ def stl_map_ports (client, ports = None):
# if unknown - add to the unknown list
if port_b == None:
table['unknown'].append(port_a)
-
+ # self-loop, due to bug?
+ elif port_a == port_b:
+ continue
# bi-directional ports
elif (table['map'][port_b] == port_a):
unmapped.remove(port_b)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
index b7368767..2a99be8d 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -329,7 +329,7 @@ class STLStream(object):
if mac_src_override_by_pkt == None:
int_mac_src_override_by_pkt=0
if packet :
- if packet.is_def_src_mac ()==False:
+ if packet.is_default_src_mac ()==False:
int_mac_src_override_by_pkt=1
else:
@@ -338,7 +338,7 @@ class STLStream(object):
if mac_dst_override_mode == None:
int_mac_dst_override_mode = 0;
if packet :
- if packet.is_def_dst_mac ()==False:
+ if packet.is_default_dst_mac ()==False:
int_mac_dst_override_mode=STLStreamDstMAC_PKT
else:
int_mac_dst_override_mode = int(mac_dst_override_mode);
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
index 649c192a..0390ac9c 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -34,7 +34,8 @@ PROMISCUOUS_SWITCH = 21
GLOBAL_STATS = 50
PORT_STATS = 51
PORT_STATUS = 52
-STATS_MASK = 53
+STREAMS_STATS = 53
+STATS_MASK = 54
STREAMS_MASK = 60
# ALL_STREAMS = 61
@@ -312,6 +313,10 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
{'action': 'store_true',
'help': "Fetch only port status data"}),
+ STREAMS_STATS: ArgumentPack(['-s'],
+ {'action': 'store_true',
+ 'help': "Fetch only streams stats"}),
+
STREAMS_MASK: ArgumentPack(['--streams'],
{"nargs": '+',
'dest':'streams',
@@ -336,7 +341,8 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
{'required': True}),
STATS_MASK: ArgumentGroup(MUTEX, [GLOBAL_STATS,
PORT_STATS,
- PORT_STATUS],
+ PORT_STATUS,
+ STREAMS_STATS],
{})
}