summaryrefslogtreecommitdiffstats
path: root/scripts/automation/trex_control_plane/stl
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/trex_control_plane/stl')
-rw-r--r--scripts/automation/trex_control_plane/stl/console/__init__.py0
-rw-r--r--scripts/automation/trex_control_plane/stl/console/stl_path.py7
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py889
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_root_path.py15
-rw-r--r--scripts/automation/trex_control_plane/stl/console/trex_tui.py1250
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py114
-rwxr-xr-xscripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py167
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py118
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py144
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py110
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py126
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py113
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_path.py7
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_pcap.py117
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py123
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_profile.py58
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py218
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py71
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py60
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py72
-rwxr-xr-xscripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py149
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py798
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py116
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py188
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py84
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py155
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py14
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py7
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py18
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py440
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py3370
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py71
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py65
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py1595
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py284
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py43
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py1698
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py794
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py620
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py1549
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py78
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py1346
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py167
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py297
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py0
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py88
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py26
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py144
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py596
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py29
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py195
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py35
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py32
53 files changed, 18870 insertions, 0 deletions
diff --git a/scripts/automation/trex_control_plane/stl/console/__init__.py b/scripts/automation/trex_control_plane/stl/console/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/__init__.py
diff --git a/scripts/automation/trex_control_plane/stl/console/stl_path.py b/scripts/automation/trex_control_plane/stl/console/stl_path.py
new file mode 100644
index 00000000..f15c666e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/stl_path.py
@@ -0,0 +1,7 @@
+import sys, os
+
+# FIXME to the write path for trex_stl_lib
+sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, 'profiles')
+
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
new file mode 100755
index 00000000..b23b5f1f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -0,0 +1,889 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Dan Klein, Itay Marom
+Cisco Systems, Inc.
+
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from __future__ import print_function
+
+import subprocess
+import cmd
+import json
+import ast
+import argparse
+import random
+import readline
+import string
+import os
+import sys
+import tty, termios
+
+try:
+ import stl_path
+except:
+ from . import stl_path
+from trex_stl_lib.api import *
+
+from trex_stl_lib.utils.text_opts import *
+from trex_stl_lib.utils.common import user_input, get_current_user
+from trex_stl_lib.utils import parsing_opts
+
+try:
+ import trex_tui
+except:
+ from . import trex_tui
+
+from functools import wraps
+
+__version__ = "2.0"
+
+# console custom logger
+class ConsoleLogger(LoggerApi):
+ def __init__ (self):
+ self.prompt_redraw = None
+
+ def write (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print(msg, end=' ')
+
+ def flush (self):
+ sys.stdout.flush()
+
+ # override this for the prompt fix
+ def async_log (self, msg, level = LoggerApi.VERBOSE_REGULAR, newline = True):
+ self.log(msg, level, newline)
+ if ( (self.level >= LoggerApi.VERBOSE_REGULAR) and self.prompt_redraw ):
+ self.prompt_redraw()
+ self.flush()
+
+
+def set_window_always_on_top (title):
+ # we need the GDK module, if not available - ignroe this command
+ try:
+ if sys.version_info < (3,0):
+ from gtk import gdk
+ else:
+ #from gi.repository import Gdk as gdk
+ return
+
+ except ImportError:
+ return
+
+ # search the window and set it as above
+ root = gdk.get_default_root_window()
+
+ for id in root.property_get('_NET_CLIENT_LIST')[2]:
+ w = gdk.window_foreign_new(id)
+ if w:
+ name = w.property_get('WM_NAME')[2]
+ if name == title:
+ w.set_keep_above(True)
+ gdk.window_process_all_updates()
+ break
+
+
+class TRexGeneralCmd(cmd.Cmd):
+ def __init__(self):
+ cmd.Cmd.__init__(self)
+ # configure history behaviour
+ self._history_file_dir = "/tmp/trex/console/"
+ self._history_file = self.get_history_file_full_path()
+ readline.set_history_length(100)
+ # load history, if any
+ self.load_console_history()
+
+
+ def get_console_identifier(self):
+ return self.__class__.__name__
+
+ def get_history_file_full_path(self):
+ return "{dir}{filename}.hist".format(dir=self._history_file_dir,
+ filename=self.get_console_identifier())
+
+ def load_console_history(self):
+ if os.path.exists(self._history_file):
+ readline.read_history_file(self._history_file)
+ return
+
+ def save_console_history(self):
+ if not os.path.exists(self._history_file_dir):
+ # make the directory available for every user
+ try:
+ original_umask = os.umask(0)
+ os.makedirs(self._history_file_dir, mode = 0o777)
+ finally:
+ os.umask(original_umask)
+
+
+ # os.mknod(self._history_file)
+ readline.write_history_file(self._history_file)
+ return
+
+ def print_history (self):
+
+ length = readline.get_current_history_length()
+
+ for i in range(1, length + 1):
+ cmd = readline.get_history_item(i)
+ print("{:<5} {:}".format(i, cmd))
+
+ def get_history_item (self, index):
+ length = readline.get_current_history_length()
+ if index > length:
+ print(format_text("please select an index between {0} and {1}".format(0, length)))
+ return None
+
+ return readline.get_history_item(index)
+
+
+ def emptyline(self):
+ """Called when an empty line is entered in response to the prompt.
+
+ This overriding is such that when empty line is passed, **nothing happens**.
+ """
+ return
+
+ def completenames(self, text, *ignored):
+ """
+ This overriding is such that a space is added to name completion.
+ """
+ dotext = 'do_'+text
+ return [a[3:]+' ' for a in self.get_names() if a.startswith(dotext)]
+
+
+#
+# main console object
+class TRexConsole(TRexGeneralCmd):
+ """Trex Console"""
+
+ def __init__(self, stateless_client, verbose = False):
+
+ self.stateless_client = stateless_client
+
+ TRexGeneralCmd.__init__(self)
+
+ self.tui = trex_tui.TrexTUI(stateless_client)
+ self.terminal = None
+
+ self.verbose = verbose
+
+ self.intro = "\n-=TRex Console v{ver}=-\n".format(ver=__version__)
+ self.intro += "\nType 'help' or '?' for supported actions\n"
+
+ self.postcmd(False, "")
+
+
+ ################### internal section ########################
+
+ def prompt_redraw (self):
+ self.postcmd(False, "")
+ sys.stdout.write("\n" + self.prompt + readline.get_line_buffer())
+ sys.stdout.flush()
+
+
+ def verify_connected(f):
+ @wraps(f)
+ def wrap(*args):
+ inst = args[0]
+ func_name = f.__name__
+ if func_name.startswith("do_"):
+ func_name = func_name[3:]
+
+ if not inst.stateless_client.is_connected():
+ print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold'))
+ return
+
+ ret = f(*args)
+ return ret
+
+ return wrap
+
+
+ def get_console_identifier(self):
+ return "{context}_{server}".format(context=get_current_user(),
+ server=self.stateless_client.get_connection_info()['server'])
+
+ def register_main_console_methods(self):
+ main_names = set(self.trex_console.get_names()).difference(set(dir(self.__class__)))
+ for name in main_names:
+ for prefix in 'do_', 'help_', 'complete_':
+ if name.startswith(prefix):
+ self.__dict__[name] = getattr(self.trex_console, name)
+
+ def precmd(self, line):
+ # before doing anything, save history snapshot of the console
+ # this is done before executing the command in case of ungraceful application exit
+ self.save_console_history()
+
+ lines = line.split(';')
+ try:
+ for line in lines:
+ stop = self.onecmd(line)
+ stop = self.postcmd(stop, line)
+ if stop:
+ return "quit"
+
+ return ""
+ except STLError as e:
+ print(e)
+ return ''
+
+
+ def postcmd(self, stop, line):
+ self.prompt = self.stateless_client.generate_prompt(prefix = 'trex')
+ return stop
+
+
+ def default(self, line):
+ print("'{0}' is an unrecognized command. type 'help' or '?' for a list\n".format(line))
+
+ @staticmethod
+ def tree_autocomplete(text):
+ dir = os.path.dirname(text)
+ if dir:
+ path = dir
+ else:
+ path = "."
+
+
+ start_string = os.path.basename(text)
+
+ targets = []
+
+ for x in os.listdir(path):
+ if x.startswith(start_string):
+ y = os.path.join(path, x)
+ if os.path.isfile(y):
+ targets.append(x + ' ')
+ elif os.path.isdir(y):
+ targets.append(x + '/')
+
+ return targets
+
+
+ ####################### shell commands #######################
+ @verify_connected
+ def do_ping (self, line):
+ '''Ping the server\n'''
+ self.stateless_client.ping_line(line)
+
+
+ @verify_connected
+ def do_shutdown (self, line):
+ '''Sends the server a shutdown request\n'''
+ self.stateless_client.shutdown_line(line)
+
+ # set verbose on / off
+ def do_verbose(self, line):
+ '''Shows or set verbose mode\n'''
+ if line == "":
+ print("\nverbose is " + ("on\n" if self.verbose else "off\n"))
+
+ elif line == "on":
+ self.verbose = True
+ self.stateless_client.set_verbose("high")
+ print(format_text("\nverbose set to on\n", 'green', 'bold'))
+
+ elif line == "off":
+ self.verbose = False
+ self.stateless_client.set_verbose("normal")
+ print(format_text("\nverbose set to off\n", 'green', 'bold'))
+
+ else:
+ print(format_text("\nplease specify 'on' or 'off'\n", 'bold'))
+
+ # show history
+ def help_history (self):
+ self.do_history("-h")
+
+ def do_shell (self, line):
+ self.do_history(line)
+
+ def do_push (self, line):
+ '''Push a local PCAP file\n'''
+ self.stateless_client.push_line(line)
+
+ def help_push (self):
+ self.do_push("-h")
+
+ def do_portattr (self, line):
+ '''Change/show port(s) attributes\n'''
+ self.stateless_client.set_port_attr_line(line)
+
+ def help_portattr (self):
+ self.do_portattr("-h")
+
+ @verify_connected
+ def do_map (self, line):
+ '''Maps ports topology\n'''
+ ports = self.stateless_client.get_acquired_ports()
+ if not ports:
+ print("No ports acquired\n")
+ return
+
+ with self.stateless_client.logger.supress():
+ table = stl_map_ports(self.stateless_client, ports = ports)
+
+
+ print(format_text('\nAcquired ports topology:\n', 'bold', 'underline'))
+
+ # bi-dir ports
+ print(format_text('Bi-directional ports:\n','underline'))
+ for port_a, port_b in table['bi']:
+ print("port {0} <--> port {1}".format(port_a, port_b))
+
+ print("")
+
+ # unknown ports
+ print(format_text('Mapping unknown:\n','underline'))
+ for port in table['unknown']:
+ print("port {0}".format(port))
+ print("")
+
+
+
+
+ def do_history (self, line):
+ '''Manage the command history\n'''
+
+ item = parsing_opts.ArgumentPack(['item'],
+ {"nargs": '?',
+ 'metavar': 'item',
+ 'type': parsing_opts.check_negative,
+ 'help': "an history item index",
+ 'default': 0})
+
+ parser = parsing_opts.gen_parser(self.stateless_client,
+ "history",
+ self.do_history.__doc__,
+ item)
+
+ opts = parser.parse_args(line.split())
+ if opts is None:
+ return
+
+ if opts.item == 0:
+ self.print_history()
+ else:
+ cmd = self.get_history_item(opts.item)
+ if cmd == None:
+ return
+
+ print("Executing '{0}'".format(cmd))
+
+ return self.onecmd(cmd)
+
+
+
+ ############### connect
+ def do_connect (self, line):
+ '''Connects to the server and acquire ports\n'''
+
+ self.stateless_client.connect_line(line)
+
+ def help_connect (self):
+ self.do_connect("-h")
+
+ def do_disconnect (self, line):
+ '''Disconnect from the server\n'''
+
+ self.stateless_client.disconnect_line(line)
+
+
+ @verify_connected
+ def do_acquire (self, line):
+ '''Acquire ports\n'''
+
+ self.stateless_client.acquire_line(line)
+
+
+ @verify_connected
+ def do_release (self, line):
+ '''Release ports\n'''
+ self.stateless_client.release_line(line)
+
+ def do_reacquire (self, line):
+ '''reacquire all the ports under your logged user name'''
+ self.stateless_client.reacquire_line(line)
+
+ def help_acquire (self):
+ self.do_acquire("-h")
+
+ def help_release (self):
+ self.do_release("-h")
+
+ def help_reacquire (self):
+ self.do_reacquire("-h")
+
+ ############### start
+
+ def complete_start(self, text, line, begidx, endidx):
+ s = line.split()
+ l = len(s)
+
+ file_flags = parsing_opts.get_flags(parsing_opts.FILE_PATH)
+
+ if (l > 1) and (s[l - 1] in file_flags):
+ return TRexConsole.tree_autocomplete("")
+
+ if (l > 2) and (s[l - 2] in file_flags):
+ return TRexConsole.tree_autocomplete(s[l - 1])
+
+ complete_push = complete_start
+
+ @verify_connected
+ def do_start(self, line):
+ '''Start selected traffic in specified port(s) on TRex\n'''
+
+ self.stateless_client.start_line(line)
+
+
+
+ def help_start(self):
+ self.do_start("-h")
+
+ ############# stop
+ @verify_connected
+ def do_stop(self, line):
+ '''stops port(s) transmitting traffic\n'''
+
+ self.stateless_client.stop_line(line)
+
+ def help_stop(self):
+ self.do_stop("-h")
+
+ ############# update
+ @verify_connected
+ def do_update(self, line):
+ '''update speed of port(s)currently transmitting traffic\n'''
+
+ self.stateless_client.update_line(line)
+
+ def help_update (self):
+ self.do_update("-h")
+
+ ############# pause
+ @verify_connected
+ def do_pause(self, line):
+ '''pause port(s) transmitting traffic\n'''
+
+ self.stateless_client.pause_line(line)
+
+ ############# resume
+ @verify_connected
+ def do_resume(self, line):
+ '''resume port(s) transmitting traffic\n'''
+
+ self.stateless_client.resume_line(line)
+
+
+
+ ########## reset
+ @verify_connected
+ def do_reset (self, line):
+ '''force stop all ports\n'''
+ self.stateless_client.reset_line(line)
+
+
+ ######### validate
+ @verify_connected
+ def do_validate (self, line):
+ '''validates port(s) stream configuration\n'''
+
+ self.stateless_client.validate_line(line)
+
+
+ @verify_connected
+ def do_stats(self, line):
+ '''Fetch statistics from TRex server by port\n'''
+ self.stateless_client.show_stats_line(line)
+
+
+ def help_stats(self):
+ self.do_stats("-h")
+
+ @verify_connected
+ def do_streams(self, line):
+ '''Fetch statistics from TRex server by port\n'''
+ self.stateless_client.show_streams_line(line)
+
+
+ def help_streams(self):
+ self.do_streams("-h")
+
+ @verify_connected
+ def do_clear(self, line):
+ '''Clear cached local statistics\n'''
+ self.stateless_client.clear_stats_line(line)
+
+
+ def help_clear(self):
+ self.do_clear("-h")
+
+
+ def help_events (self):
+ self.do_events("-h")
+
+ def do_events (self, line):
+ '''shows events recieved from server\n'''
+ self.stateless_client.get_events_line(line)
+
+
+ def complete_profile(self, text, line, begidx, endidx):
+ return self.complete_start(text,line, begidx, endidx)
+
+ def do_profile (self, line):
+ '''shows information about a profile'''
+ self.stateless_client.show_profile_line(line)
+
+ # tui
+ @verify_connected
+ def do_tui (self, line):
+ '''Shows a graphical console\n'''
+ parser = parsing_opts.gen_parser(self.stateless_client,
+ "tui",
+ self.do_tui.__doc__,
+ parsing_opts.XTERM,
+ parsing_opts.LOCKED)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+ if opts.xterm:
+ if not os.path.exists('/usr/bin/xterm'):
+ print(format_text("XTERM does not exists on this machine", 'bold'))
+ return
+
+ info = self.stateless_client.get_connection_info()
+
+ exe = './trex-console --top -t -q -s {0} -p {1} --async_port {2}'.format(info['server'], info['sync_port'], info['async_port'])
+ cmd = ['/usr/bin/xterm', '-geometry', '{0}x{1}'.format(self.tui.MIN_COLS, self.tui.MIN_ROWS), '-sl', '0', '-title', 'trex_tui', '-e', exe]
+
+ # detach child
+ self.terminal = subprocess.Popen(cmd, preexec_fn = os.setpgrp)
+
+ return
+
+
+ try:
+ with self.stateless_client.logger.supress():
+ self.tui.show(self.stateless_client, self.save_console_history, locked = opts.locked)
+
+ except self.tui.ScreenSizeException as e:
+ print(format_text(str(e) + "\n", 'bold'))
+
+
+ def help_tui (self):
+ do_tui("-h")
+
+
+ # quit function
+ def do_quit(self, line):
+ '''Exit the client\n'''
+ return True
+
+
+ def do_help (self, line):
+ '''Shows This Help Screen\n'''
+ if line:
+ try:
+ func = getattr(self, 'help_' + line)
+ except AttributeError:
+ try:
+ doc = getattr(self, 'do_' + line).__doc__
+ if doc:
+ self.stdout.write("%s\n"%str(doc))
+ return
+ except AttributeError:
+ pass
+ self.stdout.write("%s\n"%str(self.nohelp % (line,)))
+ return
+ func()
+ return
+
+ print("\nSupported Console Commands:")
+ print("----------------------------\n")
+
+ cmds = [x[3:] for x in self.get_names() if x.startswith("do_")]
+ hidden = ['EOF', 'q', 'exit', 'h', 'shell']
+ for cmd in cmds:
+ if cmd in hidden:
+ continue
+
+ try:
+ doc = getattr(self, 'do_' + cmd).__doc__
+ if doc:
+ help = str(doc)
+ else:
+ help = "*** Undocumented Function ***\n"
+ except AttributeError:
+ help = "*** Undocumented Function ***\n"
+
+ l=help.splitlines()
+ print("{:<30} {:<30}".format(cmd + " - ",l[0] ))
+
+ # a custorm cmdloop wrapper
+ def start(self):
+ while True:
+ try:
+ self.cmdloop()
+ break
+ except KeyboardInterrupt as e:
+ if not readline.get_line_buffer():
+ raise KeyboardInterrupt
+ else:
+ print("")
+ self.intro = None
+ continue
+
+ if self.terminal:
+ self.terminal.kill()
+
+ # aliases
+ do_exit = do_EOF = do_q = do_quit
+ do_h = do_history
+
+
+# run a script of commands
+def run_script_file (self, filename, stateless_client):
+
+ self.logger.log(format_text("\nRunning script file '{0}'...".format(filename), 'bold'))
+
+ with open(filename) as f:
+ script_lines = f.readlines()
+
+ cmd_table = {}
+
+ # register all the commands
+ cmd_table['start'] = stateless_client.start_line
+ cmd_table['stop'] = stateless_client.stop_line
+ cmd_table['reset'] = stateless_client.reset_line
+
+ for index, line in enumerate(script_lines, start = 1):
+ line = line.strip()
+ if line == "":
+ continue
+ if line.startswith("#"):
+ continue
+
+ sp = line.split(' ', 1)
+ cmd = sp[0]
+ if len(sp) == 2:
+ args = sp[1]
+ else:
+ args = ""
+
+ stateless_client.logger.log(format_text("Executing line {0} : '{1}'\n".format(index, line)))
+
+ if not cmd in cmd_table:
+ print("\n*** Error at line {0} : '{1}'\n".format(index, line))
+ stateless_client.logger.log(format_text("unknown command '{0}'\n".format(cmd), 'bold'))
+ return False
+
+ cmd_table[cmd](args)
+
+ stateless_client.logger.log(format_text("\n[Done]", 'bold'))
+
+ return True
+
+
+#
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="trex_console.py")
+
+ parser.add_argument("-s", "--server", help = "TRex Server [default is localhost]",
+ default = "localhost",
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "TRex Server Port [default is 4501]\n",
+ default = 4501,
+ type = int)
+
+ parser.add_argument("--async_port", help = "TRex ASync Publisher Port [default is 4500]\n",
+ default = 4500,
+ dest='pub',
+ type = int)
+
+ parser.add_argument("-u", "--user", help = "User Name [default is currently logged in user]\n",
+ default = get_current_user(),
+ type = str)
+
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option. Default is: OFF.",
+ default = False)
+
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument("-a", "--acquire", dest="acquire",
+ nargs = '+',
+ type = int,
+ help="Acquire ports on connect. default is all available ports",
+ default = None)
+
+ group.add_argument("-r", "--readonly", dest="readonly",
+ action="store_true",
+ help="Starts console in a read only mode",
+ default = False)
+
+
+ parser.add_argument("-f", "--force", dest="force",
+ action="store_true",
+ help="Force acquire the requested ports",
+ default = False)
+
+ parser.add_argument("--batch", dest="batch",
+ nargs = 1,
+ type = is_valid_file,
+ help = "Run the console in a batch mode with file",
+ default = None)
+
+ parser.add_argument("-t", "--tui", dest="tui",
+ action="store_true", help="Starts with TUI mode",
+ default = False)
+
+ parser.add_argument("-x", "--xtui", dest="xtui",
+ action="store_true", help="Starts with XTERM TUI mode",
+ default = False)
+
+ parser.add_argument("--top", dest="top",
+ action="store_true", help="Set the window as always on top",
+ default = False)
+
+ parser.add_argument("-q", "--quiet", dest="quiet",
+ action="store_true", help="Starts with all outputs suppressed",
+ default = False)
+
+ return parser
+
+# a simple info printed on log on
+def show_intro (logger, c):
+ x = c.get_server_system_info()
+ ver = c.get_server_version().get('version', 'N/A')
+
+ # find out which NICs the server has
+ port_types = {}
+ for port in x['ports']:
+ if 'supp_speeds' in port:
+ speed = max(port['supp_speeds']) // 1000
+ else:
+ speed = port['speed']
+ key = (speed, port.get('description', port['driver']))
+ if key not in port_types:
+ port_types[key] = 0
+ port_types[key] += 1
+
+ port_line = ''
+ for k, v in port_types.items():
+ port_line += "{0} x {1}Gbps @ {2}\t".format(v, k[0], k[1])
+
+ logger.log(format_text("\nServer Info:\n", 'underline'))
+ logger.log("Server version: {:>}".format(format_text(ver, 'bold')))
+ logger.log("Server CPU: {:>}".format(format_text("{:>} x {:>}".format(x.get('dp_core_count'), x.get('core_type')), 'bold')))
+ logger.log("Ports count: {:>}".format(format_text(port_line, 'bold')))
+
+
+def main():
+ parser = setParserOptions()
+ options = parser.parse_args()
+
+ if options.xtui:
+ options.tui = True
+
+ # always on top
+ if options.top:
+ set_window_always_on_top('trex_tui')
+
+
+ # Stateless client connection
+ if options.quiet:
+ verbose_level = LoggerApi.VERBOSE_QUIET
+ elif options.verbose:
+ verbose_level = LoggerApi.VERBOSE_HIGH
+ else:
+ verbose_level = LoggerApi.VERBOSE_REGULAR
+
+ # Stateless client connection
+ logger = ConsoleLogger()
+ stateless_client = STLClient(username = options.user,
+ server = options.server,
+ sync_port = options.port,
+ async_port = options.pub,
+ verbose_level = verbose_level,
+ logger = logger)
+
+ # TUI or no acquire will give us READ ONLY mode
+ try:
+ stateless_client.connect()
+ except STLError as e:
+ logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+ return
+
+ if not options.tui and not options.readonly:
+ try:
+ # acquire all ports
+ stateless_client.acquire(options.acquire, force = options.force)
+ except STLError as e:
+ logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+
+ logger.log("\n*** Failed to acquire all required ports ***\n")
+ return
+
+ if options.readonly:
+ logger.log(format_text("\nRead only mode - only few commands will be available", 'bold'))
+
+ show_intro(logger, stateless_client)
+
+
+ # a script mode
+ if options.batch:
+ cont = run_script_file(options.batch[0], stateless_client)
+ if not cont:
+ return
+
+ # console
+ try:
+ console = TRexConsole(stateless_client, options.verbose)
+ logger.prompt_redraw = console.prompt_redraw
+
+ # TUI
+ if options.tui:
+ console.do_tui("-x" if options.xtui else "-l")
+
+ else:
+ console.start()
+
+ except KeyboardInterrupt as e:
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
+
+ finally:
+ with stateless_client.logger.supress():
+ stateless_client.disconnect(stop_traffic = False)
+
+if __name__ == '__main__':
+
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_root_path.py b/scripts/automation/trex_control_plane/stl/console/trex_root_path.py
new file mode 100755
index 00000000..de4ec03b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_root_path.py
@@ -0,0 +1,15 @@
+#!/router/bin/python
+
+import os
+import sys
+
+def add_root_to_path ():
+ """adds trex_control_plane root dir to script path, up to `depth` parent dirs"""
+ root_dirname = 'trex_control_plane'
+ file_path = os.path.dirname(os.path.realpath(__file__))
+
+ components = file_path.split(os.sep)
+ sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) )
+ return
+
+add_root_to_path()
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
new file mode 100644
index 00000000..d7db6d30
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py
@@ -0,0 +1,1250 @@
+from __future__ import print_function
+
+import termios
+import sys
+import os
+import time
+import threading
+
+from collections import OrderedDict, deque
+from texttable import ansi_len
+
+
+import datetime
+import readline
+
+
+if sys.version_info > (3,0):
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+from trex_stl_lib.utils.text_opts import *
+from trex_stl_lib.utils import text_tables
+from trex_stl_lib import trex_stl_stats
+from trex_stl_lib.utils.filters import ToggleFilter
+
+class TUIQuit(Exception):
+ pass
+
+
+# for STL exceptions
+from trex_stl_lib.api import *
+
+def ascii_split (s):
+ output = []
+
+ lines = s.split('\n')
+ for elem in lines:
+ if ansi_len(elem) > 0:
+ output.append(elem)
+
+ return output
+
+class SimpleBar(object):
+ def __init__ (self, desc, pattern):
+ self.desc = desc
+ self.pattern = pattern
+ self.pattern_len = len(pattern)
+ self.index = 0
+
+ def show (self, buffer):
+ if self.desc:
+ print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
+ else:
+ print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
+
+ self.index = (self.index + 1) % self.pattern_len
+
+
+# base type of a panel
+class TrexTUIPanel(object):
+ def __init__ (self, mng, name):
+
+ self.mng = mng
+ self.name = name
+ self.stateless_client = mng.stateless_client
+ self.is_graph = False
+
+ def show (self, buffer):
+ raise NotImplementedError("must implement this")
+
+ def get_key_actions (self):
+ raise NotImplementedError("must implement this")
+
+
+ def get_name (self):
+ return self.name
+
+
+# dashboard panel
+class TrexTUIDashBoard(TrexTUIPanel):
+
+ FILTER_ACQUIRED = 1
+ FILTER_ALL = 2
+
+ def __init__ (self, mng):
+ super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
+
+ self.ports = self.stateless_client.get_all_ports()
+
+ self.key_actions = OrderedDict()
+
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+ self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
+ self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
+
+ self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
+ self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
+ self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
+
+ # register all the ports to the toggle action
+ for port_id in self.ports:
+ self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
+
+
+ self.toggle_filter = ToggleFilter(self.ports)
+
+ if self.stateless_client.get_acquired_ports():
+ self.action_show_owned()
+ else:
+ self.action_show_all()
+
+
+ def get_showed_ports (self):
+ return self.toggle_filter.filter_items()
+
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(self.get_showed_ports())
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+
+
+ def get_key_actions (self):
+ allowed = OrderedDict()
+
+
+ allowed['n'] = self.key_actions['n']
+ allowed['o'] = self.key_actions['o']
+ allowed['a'] = self.key_actions['a']
+ for i in self.ports:
+ allowed[str(i)] = self.key_actions[str(i)]
+
+
+ if self.get_showed_ports():
+ allowed['c'] = self.key_actions['c']
+
+ # if not all ports are acquired - no operations
+ if not (set(self.get_showed_ports()) <= set(self.stateless_client.get_acquired_ports())):
+ return allowed
+
+ # if any/some ports can be resumed
+ if set(self.get_showed_ports()) & set(self.stateless_client.get_paused_ports()):
+ allowed['r'] = self.key_actions['r']
+
+ # if any/some ports are transmitting - support those actions
+ if set(self.get_showed_ports()) & set(self.stateless_client.get_transmitting_ports()):
+ allowed['p'] = self.key_actions['p']
+
+
+ return allowed
+
+
+ ######### actions
+ def action_pause (self):
+ try:
+ rc = self.stateless_client.pause(ports = self.get_showed_ports())
+ except STLError:
+ pass
+
+ return ""
+
+
+
+ def action_resume (self):
+ try:
+ self.stateless_client.resume(ports = self.get_showed_ports())
+ except STLError:
+ pass
+
+ return ""
+
+
+ def action_reset_view (self):
+ self.toggle_filter.reset()
+ return ""
+
+ def action_show_owned (self):
+ self.toggle_filter.reset()
+ self.toggle_filter.toggle_items(*self.stateless_client.get_acquired_ports())
+ return ""
+
+ def action_show_all (self):
+ self.toggle_filter.reset()
+ self.toggle_filter.toggle_items(*self.stateless_client.get_all_ports())
+ return ""
+
+ def action_clear (self):
+ self.stateless_client.clear_stats(self.toggle_filter.filter_items())
+ return "cleared all stats"
+
+
+ def action_toggle_port(self, port_id):
+ def action_toggle_port_x():
+ self.toggle_filter.toggle_item(port_id)
+ return ""
+
+ return action_toggle_port_x
+
+
+
+# streams stats
+class TrexTUIStreamsStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
+
+ self.key_actions = OrderedDict()
+
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.SS_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+ pass
+
+
+ def get_key_actions (self):
+ return self.key_actions
+
+ def action_clear (self):
+ self.stateless_client.flow_stats.clear_stats()
+
+ return ""
+
+
+# latency stats
+class TrexTUILatencyStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUILatencyStats, self).__init__(mng, "lstats")
+ self.key_actions = OrderedDict()
+ self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
+ self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
+ self.is_histogram = False
+
+
+ def show (self, buffer):
+ if self.is_histogram:
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.LH_COMPAT)
+ else:
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.LS_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ if stat_type == 'latency_statistics':
+ untouched_header = ' (usec)'
+ else:
+ untouched_header = ''
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, untouched_header = untouched_header, buffer = buffer)
+
+ def get_key_actions (self):
+ return self.key_actions
+
+ def action_toggle_histogram (self):
+ self.is_histogram = not self.is_histogram
+ return ""
+
+ def action_clear (self):
+ self.stateless_client.latency_stats.clear_stats()
+ return ""
+
+
+# utilization stats
+class TrexTUIUtilizationStats(TrexTUIPanel):
+ def __init__ (self, mng):
+ super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
+ self.key_actions = {}
+
+ def show (self, buffer):
+ stats = self.stateless_client._get_formatted_stats(port_id_list = None, stats_mask = trex_stl_stats.UT_COMPAT)
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type, buffer = buffer)
+
+ def get_key_actions (self):
+ return self.key_actions
+
+
+# log
+class TrexTUILog():
+ def __init__ (self):
+ self.log = []
+
+ def add_event (self, msg):
+ self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
+
+ def show (self, buffer, max_lines = 4):
+
+ cut = len(self.log) - max_lines
+ if cut < 0:
+ cut = 0
+
+ print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
+
+ for msg in self.log[cut:]:
+ print(msg, file = buffer)
+
+
+# a predicate to wrap function as a bool
+class Predicate(object):
+ def __init__ (self, func):
+ self.func = func
+
+ def __nonzero__ (self):
+ return True if self.func() else False
+ def __bool__ (self):
+ return True if self.func() else False
+
+
+# Panels manager (contains server panels)
+class TrexTUIPanelManager():
+ def __init__ (self, tui):
+ self.tui = tui
+ self.stateless_client = tui.stateless_client
+ self.ports = self.stateless_client.get_all_ports()
+ self.locked = False
+
+ self.panels = {}
+ self.panels['dashboard'] = TrexTUIDashBoard(self)
+ self.panels['sstats'] = TrexTUIStreamsStats(self)
+ self.panels['lstats'] = TrexTUILatencyStats(self)
+ self.panels['ustats'] = TrexTUIUtilizationStats(self)
+
+ self.key_actions = OrderedDict()
+
+ # we allow console only when ports are acquired
+ self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
+
+ self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
+ self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
+ self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
+ self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
+ self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
+
+
+ # start with dashboard
+ self.main_panel = self.panels['dashboard']
+
+ # log object
+ self.log = TrexTUILog()
+
+ self.generate_legend()
+
+ self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
+ self.dis_bar = SimpleBar('status: ', ['X', ' '])
+ self.show_log = False
+
+
+ def generate_legend (self):
+
+ self.legend = "\n{:<12}".format("browse:")
+
+ for k, v in self.key_actions.items():
+ if v['show']:
+ x = "'{0}' - {1}, ".format(k, v['legend'])
+ if v.get('color'):
+ self.legend += "{:}".format(format_text(x, v.get('color')))
+ else:
+ self.legend += "{:}".format(x)
+
+
+ self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
+
+ for k, v in self.main_panel.get_key_actions().items():
+ if v['show']:
+ x = "'{0}' - {1}, ".format(k, v['legend'])
+
+ if v.get('color'):
+ self.legend += "{:}".format(format_text(x, v.get('color')))
+ else:
+ self.legend += "{:}".format(x)
+
+
+ def print_connection_status (self, buffer):
+ if self.tui.get_state() == self.tui.STATE_ACTIVE:
+ self.conn_bar.show(buffer = buffer)
+ else:
+ self.dis_bar.show(buffer = buffer)
+
+ def print_legend (self, buffer):
+ print(format_text(self.legend, 'bold'), file = buffer)
+
+
+ # on window switch or turn on / off of the TUI we call this
+ def init (self, show_log = False, locked = False):
+ self.show_log = show_log
+ self.locked = locked
+ self.generate_legend()
+
+ def show (self, show_legend, buffer):
+ self.main_panel.show(buffer)
+ self.print_connection_status(buffer)
+
+ if show_legend:
+ self.generate_legend()
+ self.print_legend(buffer)
+
+ if self.show_log:
+ self.log.show(buffer)
+
+
+ def handle_key (self, ch):
+ # check for the manager registered actions
+ if ch in self.key_actions:
+ msg = self.key_actions[ch]['action']()
+
+ # check for main panel actions
+ elif ch in self.main_panel.get_key_actions():
+ msg = self.main_panel.get_key_actions()[ch]['action']()
+
+ else:
+ return False
+
+ self.generate_legend()
+ return True
+
+ #if msg == None:
+ # return False
+ #else:
+ # if msg:
+ # self.log.add_event(msg)
+ # return True
+
+
+ # actions
+
+ def action_none (self):
+ return None
+
+ def action_show_dash (self):
+ self.main_panel = self.panels['dashboard']
+ self.init(self.show_log)
+ return ""
+
+ def action_show_port (self, port_id):
+ def action_show_port_x ():
+ self.main_panel = self.panels['port {0}'.format(port_id)]
+ self.init()
+ return ""
+
+ return action_show_port_x
+
+
+ def action_show_sstats (self):
+ self.main_panel = self.panels['sstats']
+ self.init(self.show_log)
+ return ""
+
+
+ def action_show_lstats (self):
+ self.main_panel = self.panels['lstats']
+ self.init(self.show_log)
+ return ""
+
+ def action_show_ustats(self):
+ self.main_panel = self.panels['ustats']
+ self.init(self.show_log)
+ return ""
+
+
+
+# ScreenBuffer is a class designed to
+# avoid inline delays when reprinting the screen
+class ScreenBuffer():
+ def __init__ (self, redraw_cb):
+ self.snapshot = ''
+ self.lock = threading.Lock()
+
+ self.redraw_cb = redraw_cb
+ self.update_flag = False
+
+
+ def start (self):
+ self.active = True
+ self.t = threading.Thread(target = self.__handler)
+ self.t.setDaemon(True)
+ self.t.start()
+
+ def stop (self):
+ self.active = False
+ self.t.join()
+
+
+ # request an update
+ def update (self):
+ self.update_flag = True
+
+ # fetch the screen, return None if no new screen exists yet
+ def get (self):
+
+ if not self.snapshot:
+ return None
+
+ # we have a snapshot - fetch it
+ with self.lock:
+ x = self.snapshot
+ self.snapshot = None
+ return x
+
+
+ def __handler (self):
+
+ while self.active:
+ if self.update_flag:
+ self.__redraw()
+
+ time.sleep(0.01)
+
+ # redraw the next screen
+ def __redraw (self):
+ buffer = StringIO()
+
+ self.redraw_cb(buffer)
+
+ with self.lock:
+ self.snapshot = buffer
+ self.update_flag = False
+
+# a policer class to make sure no too-fast redraws
+# occurs - it filters fast bursts of redraws
+class RedrawPolicer():
+ def __init__ (self, rate):
+ self.ts = 0
+ self.marked = False
+ self.rate = rate
+ self.force = False
+
+ def mark_for_redraw (self, force = False):
+ self.marked = True
+ if force:
+ self.force = True
+
+ def should_redraw (self):
+ dt = time.time() - self.ts
+ return self.force or (self.marked and (dt > self.rate))
+
+ def reset (self, restart = False):
+ self.ts = time.time()
+ self.marked = restart
+ self.force = False
+
+
+# shows a textual top style window
+class TrexTUI():
+
+ STATE_ACTIVE = 0
+ STATE_LOST_CONT = 1
+ STATE_RECONNECT = 2
+ is_graph = False
+
+ MIN_ROWS = 50
+ MIN_COLS = 111
+
+
+ class ScreenSizeException(Exception):
+ def __init__ (self, cols, rows):
+ msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
+ TrexTUI.MIN_ROWS,
+ cols,
+ rows)
+ super(TrexTUI.ScreenSizeException, self).__init__(msg)
+
+
+ def __init__ (self, stateless_client):
+ self.stateless_client = stateless_client
+
+ self.tui_global_lock = threading.Lock()
+ self.pm = TrexTUIPanelManager(self)
+ self.sb = ScreenBuffer(self.redraw_handler)
+
+ def redraw_handler (self, buffer):
+ # this is executed by the screen buffer - should be protected against TUI commands
+ with self.tui_global_lock:
+ self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
+
+ def clear_screen (self, lines = 50):
+ # reposition the cursor
+ sys.stdout.write("\x1b[0;0H")
+
+ # clear all lines
+ for i in range(lines):
+ sys.stdout.write("\x1b[0K")
+ if i < (lines - 1):
+ sys.stdout.write("\n")
+
+ # reposition the cursor
+ sys.stdout.write("\x1b[0;0H")
+
+
+
+ def show (self, client, save_console_history, show_log = False, locked = False):
+
+ rows, cols = os.popen('stty size', 'r').read().split()
+ if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
+ raise self.ScreenSizeException(rows = rows, cols = cols)
+
+ with AsyncKeys(client, save_console_history, self.tui_global_lock, locked) as async_keys:
+ sys.stdout.write("\x1bc")
+ self.async_keys = async_keys
+ self.show_internal(show_log, locked)
+
+
+
+ def show_internal (self, show_log, locked):
+
+ self.pm.init(show_log, locked)
+
+ self.state = self.STATE_ACTIVE
+
+ # create print policers
+ self.full_redraw = RedrawPolicer(0.5)
+ self.keys_redraw = RedrawPolicer(0.05)
+ self.full_redraw.mark_for_redraw()
+
+
+ try:
+ self.sb.start()
+
+ while True:
+ # draw and handle user input
+ status = self.async_keys.tick(self.pm)
+
+ # prepare the next frame
+ self.prepare(status)
+ time.sleep(0.01)
+ self.draw_screen()
+
+ with self.tui_global_lock:
+ self.handle_state_machine()
+
+ except TUIQuit:
+ print("\nExiting TUI...")
+
+ finally:
+ self.sb.stop()
+
+ print("")
+
+
+
+ # handle state machine
+ def handle_state_machine (self):
+ # regular state
+ if self.state == self.STATE_ACTIVE:
+ # if no connectivity - move to lost connecitivty
+ if not self.stateless_client.async_client.is_alive():
+ self.stateless_client._invalidate_stats(self.pm.ports)
+ self.state = self.STATE_LOST_CONT
+
+
+ # lost connectivity
+ elif self.state == self.STATE_LOST_CONT:
+ # got it back
+ if self.stateless_client.async_client.is_alive():
+ # move to state reconnect
+ self.state = self.STATE_RECONNECT
+
+
+ # restored connectivity - try to reconnect
+ elif self.state == self.STATE_RECONNECT:
+
+ try:
+ self.stateless_client.connect()
+ self.stateless_client.acquire()
+ self.state = self.STATE_ACTIVE
+ except STLError:
+ self.state = self.STATE_LOST_CONT
+
+
+ # logic before printing
+ def prepare (self, status):
+ if status == AsyncKeys.STATUS_REDRAW_ALL:
+ self.full_redraw.mark_for_redraw(force = True)
+
+ elif status == AsyncKeys.STATUS_REDRAW_KEYS:
+ self.keys_redraw.mark_for_redraw()
+
+ if self.full_redraw.should_redraw():
+ self.sb.update()
+ self.full_redraw.reset(restart = True)
+
+ return
+
+
+ # draw once
+ def draw_screen (self):
+
+ # check for screen buffer's new screen
+ x = self.sb.get()
+
+ # we have a new screen to draw
+ if x:
+ self.clear_screen()
+
+ self.async_keys.draw(x)
+ sys.stdout.write(x.getvalue())
+ sys.stdout.flush()
+
+ # maybe we need to redraw the keys
+ elif self.keys_redraw.should_redraw():
+ sys.stdout.write("\x1b[4A")
+ self.async_keys.draw(sys.stdout)
+ sys.stdout.flush()
+
+ # reset the policer for next time
+ self.keys_redraw.reset()
+
+
+
+
+ def get_state (self):
+ return self.state
+
+
+class TokenParser(object):
+ def __init__ (self, seq):
+ self.buffer = list(seq)
+
+ def pop (self):
+ return self.buffer.pop(0)
+
+
+ def peek (self):
+ if not self.buffer:
+ return None
+ return self.buffer[0]
+
+ def next_token (self):
+ if not self.peek():
+ return None
+
+ token = self.pop()
+
+ # special chars
+ if token == '\x1b' and self.peek() == '[':
+ token += self.pop()
+ if self.peek():
+ token += self.pop()
+
+ return token
+
+ def parse (self):
+ tokens = []
+
+ while True:
+ token = self.next_token()
+ if token == None:
+ break
+ tokens.append(token)
+
+ return tokens
+
+
+# handles async IO
+class AsyncKeys:
+
+ MODE_LEGEND = 1
+ MODE_CONSOLE = 2
+
+ STATUS_NONE = 0
+ STATUS_REDRAW_KEYS = 1
+ STATUS_REDRAW_ALL = 2
+
+ def __init__ (self, client, save_console_history, tui_global_lock, locked = False):
+ self.tui_global_lock = tui_global_lock
+
+ self.engine_console = AsyncKeysEngineConsole(self, client, save_console_history)
+ self.engine_legend = AsyncKeysEngineLegend(self)
+ self.locked = locked
+
+ if locked:
+ self.engine = self.engine_legend
+ self.locked = True
+ else:
+ self.engine = self.engine_console
+ self.locked = False
+
+ def __enter__ (self):
+ # init termios
+ self.old_settings = termios.tcgetattr(sys.stdin)
+ new_settings = termios.tcgetattr(sys.stdin)
+ new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
+ new_settings[6][termios.VMIN] = 0 # cc
+ new_settings[6][termios.VTIME] = 0 # cc
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
+
+ # huge buffer - no print without flush
+ sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
+ return self
+
+ def __exit__ (self, type, value, traceback):
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
+
+ # restore sys.stdout
+ sys.stdout.close()
+ sys.stdout = sys.__stdout__
+
+
+ def is_legend_mode (self):
+ return self.engine.get_type() == AsyncKeys.MODE_LEGEND
+
+ def is_console_mode (self):
+ return self.engine.get_type == AsyncKeys.MODE_CONSOLE
+
+ def switch (self):
+ if self.is_legend_mode():
+ self.engine = self.engine_console
+ else:
+ self.engine = self.engine_legend
+
+
+ # parse the buffer to manageble tokens
+ def parse_tokens (self, seq):
+
+ tokens = []
+ chars = list(seq)
+
+ while chars:
+ token = chars.pop(0)
+
+ # special chars
+ if token == '\x1b' and chars[0] == '[':
+ token += chars.pop(0)
+ token += chars.pop(0)
+
+ tokens.append(token)
+
+ return tokens
+
+ def handle_token (self, token, pm):
+ # ESC for switch
+ if token == '\x1b':
+ if not self.locked:
+ self.switch()
+ return self.STATUS_REDRAW_ALL
+
+ # EOF (ctrl + D)
+ if token == '\x04':
+ raise TUIQuit()
+
+ # pass tick to engine
+ return self.engine.tick(token, pm)
+
+
+ def tick (self, pm):
+ rc = self.STATUS_NONE
+
+ # fetch the stdin buffer
+ seq = os.read(sys.stdin.fileno(), 1024).decode()
+ if not seq:
+ return self.STATUS_NONE
+
+ # parse all the tokens from the buffer
+ tokens = TokenParser(seq).parse()
+
+ # process them
+ for token in tokens:
+ token_rc = self.handle_token(token, pm)
+ rc = max(rc, token_rc)
+
+
+ return rc
+
+
+ def draw (self, buffer):
+ self.engine.draw(buffer)
+
+
+
+# Legend engine
+class AsyncKeysEngineLegend:
+ def __init__ (self, async):
+ self.async = async
+
+ def get_type (self):
+ return self.async.MODE_LEGEND
+
+ def tick (self, seq, pm):
+
+ if seq == 'q':
+ raise TUIQuit()
+
+ # ignore escapes
+ if len(seq) > 1:
+ return AsyncKeys.STATUS_NONE
+
+ rc = pm.handle_key(seq)
+ return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
+
+ def draw (self, buffer):
+ pass
+
+
+# console engine
+class AsyncKeysEngineConsole:
+ def __init__ (self, async, client, save_console_history):
+ self.async = async
+ self.lines = deque(maxlen = 100)
+
+ self.generate_prompt = client.generate_prompt
+ self.save_console_history = save_console_history
+
+ self.ac = {'start' : client.start_line,
+ 'stop' : client.stop_line,
+ 'pause' : client.pause_line,
+ 'clear' : client.clear_stats_line,
+ 'push' : client.push_line,
+ 'resume' : client.resume_line,
+ 'reset' : client.reset_line,
+ 'update' : client.update_line,
+ 'connect' : client.connect_line,
+ 'disconnect' : client.disconnect_line,
+ 'acquire' : client.acquire_line,
+ 'release' : client.release_line,
+ 'quit' : self.action_quit,
+ 'q' : self.action_quit,
+ 'exit' : self.action_quit,
+ 'help' : self.action_help,
+ '?' : self.action_help}
+
+ # fetch readline history and add relevants
+ for i in range(0, readline.get_current_history_length()):
+ cmd = readline.get_history_item(i)
+ if cmd and cmd.split()[0] in self.ac:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # new line
+ self.lines.appendleft(CmdLine(''))
+ self.line_index = 0
+ self.last_status = ''
+
+ def action_quit (self, _):
+ raise TUIQuit()
+
+ def action_help (self, _):
+ return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
+
+ def get_type (self):
+ return self.async.MODE_CONSOLE
+
+
+ def handle_escape_char (self, seq):
+ # up
+ if seq == '\x1b[A':
+ self.line_index = min(self.line_index + 1, len(self.lines) - 1)
+
+ # down
+ elif seq == '\x1b[B':
+ self.line_index = max(self.line_index - 1, 0)
+
+ # left
+ elif seq == '\x1b[D':
+ self.lines[self.line_index].go_left()
+
+ # right
+ elif seq == '\x1b[C':
+ self.lines[self.line_index].go_right()
+
+ # del
+ elif seq == '\x1b[3~':
+ self.lines[self.line_index].del_key()
+
+ # home
+ elif seq == '\x1b[H':
+ self.lines[self.line_index].home_key()
+
+ # end
+ elif seq == '\x1b[F':
+ self.lines[self.line_index].end_key()
+ return True
+
+ # unknown key
+ else:
+ return AsyncKeys.STATUS_NONE
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ def tick (self, seq, _):
+
+ # handle escape chars
+ if len(seq) > 1:
+ return self.handle_escape_char(seq)
+
+ # handle each char
+ for ch in seq:
+ return self.handle_single_key(ch)
+
+
+
+ def handle_single_key (self, ch):
+ # newline
+ if ch == '\n':
+ self.handle_cmd()
+
+ # backspace
+ elif ch == '\x7f':
+ self.lines[self.line_index].backspace()
+
+ # TAB
+ elif ch == '\t':
+ tokens = self.lines[self.line_index].get().split()
+ if not tokens:
+ return
+
+ if len(tokens) == 1:
+ self.handle_tab_names(tokens[0])
+ else:
+ self.handle_tab_files(tokens)
+
+
+ # simple char
+ else:
+ self.lines[self.line_index] += ch
+
+ return AsyncKeys.STATUS_REDRAW_KEYS
+
+
+ # handle TAB key for completing function names
+ def handle_tab_names (self, cur):
+ matching_cmds = [x for x in self.ac if x.startswith(cur)]
+
+ common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
+ if common:
+ if len(matching_cmds) == 1:
+ self.lines[self.line_index].set(common + ' ')
+ self.last_status = ''
+ else:
+ self.lines[self.line_index].set(common)
+ self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
+
+
+ # handle TAB for completing filenames
+ def handle_tab_files (self, tokens):
+
+ # only commands with files
+ if tokens[0] not in {'start', 'push'}:
+ return
+
+ # '-f' with no paramters - no partial and use current dir
+ if tokens[-1] == '-f':
+ partial = ''
+ d = '.'
+
+ # got a partial path
+ elif tokens[-2] == '-f':
+ partial = tokens.pop()
+
+ # check for dirs
+ dirname, basename = os.path.dirname(partial), os.path.basename(partial)
+ if os.path.isdir(dirname):
+ d = dirname
+ partial = basename
+ else:
+ d = '.'
+ else:
+ return
+
+ # fetch all dirs and files matching wildcard
+ files = []
+ for x in os.listdir(d):
+ if os.path.isdir(os.path.join(d, x)):
+ files.append(x + '/')
+ elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
+ files.append(x)
+
+ # dir might not have the files
+ if not files:
+ self.last_status = format_text('no loadble files under path', 'bold')
+ return
+
+
+ # find all the matching files
+ matching_files = [x for x in files if x.startswith(partial)] if partial else files
+
+ # do we have a longer common than partial ?
+ common = os.path.commonprefix([x for x in files if x.startswith(partial)])
+ if not common:
+ common = partial
+
+ tokens.append(os.path.join(d, common) if d is not '.' else common)
+
+ # reforge the line
+ newline = ' '.join(tokens)
+
+ if len(matching_files) == 1:
+ if os.path.isfile(tokens[-1]):
+ newline += ' '
+
+ self.lines[self.line_index].set(newline)
+ self.last_status = ''
+ else:
+ self.lines[self.line_index].set(newline)
+ self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
+ if len(matching_files) > 5:
+ self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
+
+
+
+ def split_cmd (self, cmd):
+ s = cmd.split(' ', 1)
+ op = s[0]
+ param = s[1] if len(s) == 2 else ''
+ return op, param
+
+
+ def handle_cmd (self):
+
+ cmd = self.lines[self.line_index].get().strip()
+ if not cmd:
+ return
+
+ op, param = self.split_cmd(cmd)
+
+ func = self.ac.get(op)
+ if func:
+ with self.async.tui_global_lock:
+ func_rc = func(param)
+
+ # take out the empty line
+ empty_line = self.lines.popleft()
+ assert(empty_line.ro_line == '')
+
+ if not self.lines or self.lines[0].ro_line != cmd:
+ self.lines.appendleft(CmdLine(cmd))
+
+ # back in
+ self.lines.appendleft(empty_line)
+ self.line_index = 0
+ readline.add_history(cmd)
+ self.save_console_history()
+
+ # back to readonly
+ for line in self.lines:
+ line.invalidate()
+
+ assert(self.lines[0].modified == False)
+ color = None
+ if not func:
+ self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
+ else:
+ # internal commands
+ if isinstance(func_rc, str):
+ self.last_status = func_rc
+
+ # RC response
+ else:
+ # success
+ if func_rc:
+ self.last_status = format_text("[OK]", 'green')
+ # errors
+ else:
+ err_msgs = ascii_split(str(func_rc))
+ self.last_status = format_text(err_msgs[0], 'red')
+ if len(err_msgs) > 1:
+ self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
+ color = 'red'
+
+
+
+ # trim too long lines
+ if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
+ self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
+
+
+ def draw (self, buffer):
+ buffer.write("\nPress 'ESC' for navigation panel...\n")
+ buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
+ buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
+ self.lines[self.line_index].draw(buffer)
+
+
+# a readline alike command line - can be modified during edit
+class CmdLine(object):
+ def __init__ (self, line):
+ self.ro_line = line
+ self.w_line = None
+ self.modified = False
+ self.cursor_index = len(line)
+
+ def get (self):
+ if self.modified:
+ return self.w_line
+ else:
+ return self.ro_line
+
+ def set (self, line, cursor_pos = None):
+ self.w_line = line
+ self.modified = True
+
+ if cursor_pos is None:
+ self.cursor_index = len(self.w_line)
+ else:
+ self.cursor_index = cursor_pos
+
+
+ def __add__ (self, other):
+ assert(0)
+
+
+ def __str__ (self):
+ return self.get()
+
+
+ def __iadd__ (self, other):
+
+ self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
+ cursor_pos = self.cursor_index + len(other))
+
+ return self
+
+
+ def backspace (self):
+ if self.cursor_index == 0:
+ return
+
+ self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
+ self.cursor_index - 1)
+
+
+ def del_key (self):
+ if self.cursor_index == len(self.get()):
+ return
+
+ self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
+ self.cursor_index)
+
+ def home_key (self):
+ self.cursor_index = 0
+
+ def end_key (self):
+ self.cursor_index = len(self.get())
+
+ def invalidate (self):
+ self.modified = False
+ self.w_line = None
+ self.cursor_index = len(self.ro_line)
+
+ def go_left (self):
+ self.cursor_index = max(0, self.cursor_index - 1)
+
+ def go_right (self):
+ self.cursor_index = min(len(self.get()), self.cursor_index + 1)
+
+ def draw (self, buffer):
+ buffer.write(self.get())
+ buffer.write('\b' * (len(self.get()) - self.cursor_index))
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py b/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py
new file mode 100644
index 00000000..1f754f0a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/hlt_udp_simple.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+
+"""
+Sample HLTAPI application (for loopback)
+Connect to TRex
+Send UDP packet in specific length
+Each direction has its own IP range
+"""
+
+import sys
+import argparse
+import stl_path
+from trex_stl_lib.api import *
+from trex_stl_lib.trex_stl_hltapi import *
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(usage="""
+ Connect to TRex and send bidirectional continuous traffic
+
+ examples:
+
+ hlt_udp_simple.py --server <hostname/ip>
+
+ hlt_udp_simple.py -s 300 -d 30 -rate_pps 5000000 --src <MAC> --dst <MAC>
+
+ then run the simulator on the output
+ ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet
+
+ """,
+ description="Example for TRex HLTAPI",
+ epilog=" based on hhaim's stl_run_udp_simple example");
+
+ parser.add_argument("--server",
+ dest="server",
+ help='Remote trex address',
+ default="127.0.0.1",
+ type = str)
+
+ parser.add_argument("-s", "--frame-size",
+ dest="frame_size",
+ help='L2 frame size in bytes without FCS',
+ default=60,
+ type = int,)
+
+ parser.add_argument('-d','--duration',
+ dest='duration',
+ help='duration in second ',
+ default=10,
+ type = int,)
+
+ parser.add_argument('--rate-pps',
+ dest='rate_pps',
+ help='speed in pps',
+ default="100")
+
+ parser.add_argument('--src',
+ dest='src_mac',
+ help='src MAC',
+ default='00:50:56:b9:de:75')
+
+ parser.add_argument('--dst',
+ dest='dst_mac',
+ help='dst MAC',
+ default='00:50:56:b9:34:f3')
+
+ args = parser.parse_args();
+
+ hltapi = CTRexHltApi()
+ print('Connecting to TRex')
+ res = hltapi.connect(device = args.server, port_list = [0, 1], reset = True, break_locks = True)
+ check_res(res)
+ ports = list(res['port_handle'].values())
+ if len(ports) < 2:
+ error('Should have at least 2 ports for this test')
+ print('Connected, acquired ports: %s' % ports)
+
+ print('Creating traffic')
+
+ res = hltapi.traffic_config(mode = 'create', bidirectional = True,
+ port_handle = ports[0], port_handle2 = ports[1],
+ frame_size = args.frame_size,
+ mac_src = args.src_mac, mac_dst = args.dst_mac,
+ mac_src2 = args.dst_mac, mac_dst2 = args.src_mac,
+ l3_protocol = 'ipv4',
+ ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254,
+ ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254,
+ l4_protocol = 'udp',
+ udp_dst_port = 12, udp_src_port = 1025,
+ rate_pps = args.rate_pps,
+ )
+ check_res(res)
+
+ print('Starting traffic')
+ res = hltapi.traffic_control(action = 'run', port_handle = ports[:2])
+ check_res(res)
+ wait_with_progress(args.duration)
+
+ print('Stopping traffic')
+ res = hltapi.traffic_control(action = 'stop', port_handle = ports[:2])
+ check_res(res)
+
+ res = hltapi.traffic_stats(mode = 'aggregate', port_handle = ports[:2])
+ check_res(res)
+ print_brief_stats(res)
+
+ print('Removing all streams from port 0')
+ res = hltapi.traffic_config(mode = 'remove', port_handle = ports[0], stream_id = 'all')
+ check_res(res)
+
+ res = hltapi.cleanup_session(port_handle = 'all')
+ check_res(res)
+
+ print('Done')
diff --git a/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py b/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py
new file mode 100755
index 00000000..ad2697d8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/rpc_proxy_server.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+
+import argparse
+import traceback
+import logging
+import sys
+import os
+import json
+import socket
+from functools import partial
+logging.basicConfig(level = logging.FATAL) # keep quiet
+
+import stl_path
+from trex_stl_lib.api import *
+from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, HLT_OK, HLT_ERR
+
+# ext libs
+ext_libs = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs') # usual package path
+if not os.path.exists(ext_libs):
+ ext_libs = os.path.join(os.pardir, os.pardir, 'external_libs') # client package path
+sys.path.append(os.path.join(ext_libs, 'jsonrpclib-pelix-0.2.5'))
+from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+import yaml
+
+# TODO: refactor this to class
+
+native_client = None
+hltapi_client = None
+
+def OK(res = True):
+ return[True, res]
+
+def ERR(res = 'Unknown error'):
+ return [False, res]
+
+def deunicode_json(data):
+ return yaml.safe_load(json.dumps(data))
+
+
+### Server functions ###
+
+def add(a, b): # for sanity checks
+ try:
+ return OK(a + b)
+ except:
+ return ERR(traceback.format_exc())
+
+def check_connectivity():
+ return OK()
+
+def native_proxy_init(force = False, *args, **kwargs):
+ global native_client
+ if native_client and not force:
+ return ERR('Native Client is already initiated')
+ try:
+ native_client = STLClient(*args, **kwargs)
+ return OK('Native Client initiated')
+ except:
+ return ERR(traceback.format_exc())
+
+def native_proxy_del():
+ global native_client
+ native_client = None
+ return OK()
+
+def hltapi_proxy_init(force = False, *args, **kwargs):
+ global hltapi_client
+ if hltapi_client and not force:
+ return HLT_ERR('HLTAPI Client is already initiated')
+ try:
+ hltapi_client = CTRexHltApi(*args, **kwargs)
+ return HLT_OK()
+ except:
+ return HLT_ERR(traceback.format_exc())
+
+def hltapi_proxy_del():
+ global hltapi_client
+ hltapi_client = None
+ return HLT_OK()
+
+# any method not listed above can be called with passing its name here
+def native_method(func_name, *args, **kwargs):
+ try:
+ func = getattr(native_client, func_name)
+ return OK(func(*deunicode_json(args), **deunicode_json(kwargs)))
+ except:
+ return ERR(traceback.format_exc())
+
+# any HLTAPI method can be called with passing its name here
+def hltapi_method(func_name, *args, **kwargs):
+ try:
+ func = getattr(hltapi_client, func_name)
+ return func(*deunicode_json(args), **deunicode_json(kwargs))
+ except:
+ return HLT_ERR(traceback.format_exc())
+
+### /Server functions ###
+
+
+def run_server(port = 8095):
+ native_methods = [
+ 'acquire',
+ 'connect',
+ 'disconnect',
+ 'get_stats',
+ 'get_warnings',
+ 'push_remote',
+ 'reset',
+ 'wait_on_traffic',
+ ]
+ hltapi_methods = [
+ 'connect',
+ 'cleanup_session',
+ 'interface_config',
+ 'traffic_config',
+ 'traffic_control',
+ 'traffic_stats',
+ ]
+
+ try:
+ register_socket('trex_stl_rpc_proxy')
+ server = SimpleJSONRPCServer(('0.0.0.0', port))
+ server.register_function(add)
+ server.register_function(check_connectivity)
+ server.register_function(native_proxy_init)
+ server.register_function(native_proxy_del)
+ server.register_function(hltapi_proxy_init)
+ server.register_function(hltapi_proxy_del)
+ server.register_function(native_method)
+ server.register_function(hltapi_method)
+
+ for method in native_methods:
+ server.register_function(partial(native_method, method), method)
+ for method in hltapi_methods:
+ if method in native_methods: # collision in names
+ method_hlt_name = 'hlt_%s' % method
+ else:
+ method_hlt_name = method
+ server.register_function(partial(hltapi_method, method), method_hlt_name)
+ server.register_function(server.funcs.keys, 'get_methods') # should be last
+ print('Started Stateless RPC proxy at port %s' % port)
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print('Done')
+
+# provides unique way to determine running process
+def register_socket(tag):
+ global foo_socket # Without this our lock gets garbage collected
+ foo_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ foo_socket.bind('\0%s' % tag)
+ print('Got the socket lock for tag %s.' % tag)
+ except socket.error:
+ print('Error: process with tag %s is already running.' % tag)
+ sys.exit(-1)
+
+### Main ###
+
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description = 'Runs TRex Stateless RPC proxy for usage with any language client.')
+ parser.add_argument('-p', '--port', type=int, default = 8095, dest='port', action = 'store',
+ help = 'Select port on which the stl rpc proxy will run.\nDefault is 8095.')
+ kwargs = vars(parser.parse_args())
+ run_server(**kwargs)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
new file mode 100644
index 00000000..9977fa3e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_bi_dir_flows.py
@@ -0,0 +1,118 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import json
+
+# simple packet creation
+def create_pkt (size, direction):
+
+ ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"},
+ 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}}
+
+ if (direction == 0):
+ src = ip_range['src']
+ dst = ip_range['dst']
+ else:
+ src = ip_range['dst']
+ dst = ip_range['src']
+
+ vm = [
+ # src
+ STLVmFlowVar(name="src",min_value=src['start'],max_value=src['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"),
+
+ # dst
+ STLVmFlowVar(name="dst",min_value=dst['start'],max_value=dst['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"),
+
+ # checksum
+ STLVmFixIpv4(offset = "IP")
+ ]
+
+
+ base = Ether()/IP()/UDP()
+ pad = max(0, size-len(base)) * 'x'
+
+ return STLPktBuilder(pkt = base/pad,
+ vm = vm)
+
+
+def simple_burst (port_a, port_b, pkt_size, rate):
+
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ # turn this on for some information
+ #c.set_verbose("high")
+
+ # create two streams
+ s1 = STLStream(packet = create_pkt(pkt_size, 0),
+ mode = STLTXCont(pps = 100))
+
+ # second stream with a phase of 1ms (inter stream gap)
+ s2 = STLStream(packet = create_pkt(pkt_size, 1),
+ isg = 1000,
+ mode = STLTXCont(pps = 100))
+
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [port_a])
+ c.add_streams(s2, ports = [port_b])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # here we multiply the traffic lineaer to whatever given in rate
+ print("Running {:} on ports {:}, {:} for 10 seconds...".format(rate, port_a, port_b))
+ c.start(ports = [port_a, port_b], mult = rate, duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ print(json.dumps(stats[port_a], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[port_b], indent = 4, separators=(',', ': '), sort_keys = True))
+
+ lost_a = stats[port_a]["opackets"] - stats[port_b]["ipackets"]
+ lost_b = stats[port_b]["opackets"] - stats[port_a]["ipackets"]
+
+ print("\npackets lost from {0} --> {1}: {2} pkts".format(port_a, port_b, lost_a))
+ print("packets lost from {0} --> {1}: {2} pkts".format(port_b, port_a, lost_b))
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if (lost_a == 0) and (lost_b == 0) and not c.get_warnings():
+ passed = True
+ else:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+# run the tests
+simple_burst(0, 3, 64, "10gbps")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
new file mode 100644
index 00000000..d8a99479
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_latency_stats.py
@@ -0,0 +1,144 @@
+# Example showing how to define stream for latency measurement, and how to parse the latency information
+
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import pprint
+
+def rx_example (tx_port, rx_port, burst_size, pps):
+
+ print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/'at_least_16_bytes_payload_needed')
+ total_pkts = burst_size
+ s1 = STLStream(name = 'rx',
+ packet = pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ pps = pps))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [tx_port, rx_port])
+
+ # add both streams to ports
+ c.add_streams([s1], ports = [tx_port])
+
+ print("\nInjecting {0} packets on port {1}\n".format(total_pkts, tx_port))
+
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, pkt.get_pkt_len())
+ if not rc:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest passed :-)\n")
+ else:
+ print("\nTest failed :-(\n")
+
+# RX one iteration
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+
+ c.clear_stats()
+
+ c.start(ports = [tx_port])
+ c.wait_on_traffic(ports = [tx_port])
+
+ stats = c.get_stats()
+ flow_stats = stats['flow_stats'].get(5)
+ global_lat_stats = stats['latency']
+ lat_stats = global_lat_stats.get(5)
+ if not flow_stats:
+ print("no flow stats available")
+ return False
+ if not lat_stats:
+ print("no latency stats available")
+ return False
+
+ tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
+ drops = lat_stats['err_cntrs']['dropped']
+ ooo = lat_stats['err_cntrs']['out_of_order']
+ dup = lat_stats['err_cntrs']['dup']
+ sth = lat_stats['err_cntrs']['seq_too_high']
+ stl = lat_stats['err_cntrs']['seq_too_low']
+ old_flow = global_lat_stats['global']['old_flow']
+ bad_hdr = global_lat_stats['global']['bad_hdr']
+ lat = lat_stats['latency']
+ jitter = lat['jitter']
+ avg = lat['average']
+ tot_max = lat['total_max']
+ tot_min = lat['total_min']
+ last_max = lat['last_max']
+ hist = lat ['histogram']
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+ return False
+
+ print('Error counters: dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl))
+ if old_flow:
+ print ('Packets arriving too late after flow stopped: {0}'.format(old_flow))
+ if bad_hdr:
+ print ('Latency packets with corrupted info: {0}'.format(bad_hdr))
+ print('Latency info:')
+ print(" Maximum latency(usec): {0}".format(tot_max))
+ print(" Minimum latency(usec): {0}".format(tot_min))
+ print(" Maximum latency in last sampling period (usec): {0}".format(last_max))
+ print(" Average latency(usec): {0}".format(avg))
+ print(" Jitter(usec): {0}".format(jitter))
+ print(" Latency distribution histogram:")
+ l = hist.keys()
+ l.sort()
+ for sample in l:
+ range_start = sample
+ if range_start == 0:
+ range_end = 10
+ else:
+ range_end = range_start + pow(10, (len(str(range_start))-1))
+ val = hist[sample]
+ print (" Packets with latency between {0} and {1}:{2} ".format(range_start, range_end, val))
+
+ if tx_pkts != total_pkts:
+ print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("TX pkts match - {0}".format(tx_pkts))
+
+ if tx_bytes != (total_pkts * (pkt_len + 4)): # +4 for ethernet CRC
+ print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("TX bytes match - {0}".format(tx_bytes))
+
+ if rx_pkts != total_pkts:
+ print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ return False
+ else:
+ print("RX pkts match - {0}".format(rx_pkts))
+
+ return True
+
+# run the tests
+rx_example(tx_port = 0, rx_port = 1, burst_size = 1000, pps = 1000)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
new file mode 100644
index 00000000..3c630ece
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_flow_stats.py
@@ -0,0 +1,110 @@
+# Example showing how to define stream for getting per flow statistics, and how to parse the received statistics
+
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import pprint
+
+def rx_example (tx_port, rx_port, burst_size, bw):
+
+ print("\nGoing to inject {0} packets on port {1} - checking RX stats on port {2}\n".format(burst_size, tx_port, rx_port))
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ total_pkts = burst_size
+ s1 = STLStream(name = 'rx',
+ packet = pkt,
+ flow_stats = STLFlowStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = bw))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [tx_port, rx_port])
+
+ # add stream to port
+ c.add_streams([s1], ports = [tx_port])
+
+ print("\ngoing to inject {0} packets on port {1}\n".format(total_pkts, tx_port))
+
+ rc = rx_iteration(c, tx_port, rx_port, total_pkts, s1.get_pkt_len())
+ if not rc:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest passed :-)\n")
+ else:
+ print("\nTest failed :-(\n")
+
+# RX one iteration
+def rx_iteration (c, tx_port, rx_port, total_pkts, pkt_len):
+ ret = True
+
+ c.clear_stats()
+
+ c.start(ports = [tx_port])
+ c.wait_on_traffic(ports = [tx_port])
+
+ global_flow_stats = c.get_stats()['flow_stats']
+ flow_stats = global_flow_stats.get(5)
+ if not flow_stats:
+ print("no flow stats available")
+ return False
+
+ tx_pkts = flow_stats['tx_pkts'].get(tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(rx_port, 0)
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+ return False
+
+ if tx_pkts != total_pkts:
+ print("TX pkts mismatch - got: {0}, expected: {1}".format(tx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("TX pkts match - {0}".format(tx_pkts))
+
+ if tx_bytes != (total_pkts * pkt_len):
+ print("TX bytes mismatch - got: {0}, expected: {1}".format(tx_bytes, (total_pkts * pkt_len)))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("TX bytes match - {0}".format(tx_bytes))
+
+ if rx_pkts != total_pkts:
+ print("RX pkts mismatch - got: {0}, expected: {1}".format(rx_pkts, total_pkts))
+ pprint.pprint(flow_stats)
+ ret = False
+ else:
+ print("RX pkts match - {0}".format(rx_pkts))
+
+
+ for field in ['rx_err', 'tx_err']:
+ for port in global_flow_stats['global'][field].keys():
+ if global_flow_stats['global'][field][port] != 0:
+ print ("\n{0} on port {1}: {2} - You should consider increasing rx_delay_ms value in wait_on_traffic"
+ .format(field, port, global_flow_stats['global'][field][port]))
+
+ return ret
+
+# run the tests
+rx_example(tx_port = 0, rx_port = 1, burst_size = 500, bw = 50)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
new file mode 100644
index 00000000..875186ba
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -0,0 +1,126 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+import json
+from pprint import pprint
+import argparse
+import sys
+
+# IMIX test
+# it maps the ports to sides
+# then it load a predefind profile 'IMIX'
+# and attach it to both sides and inject
+# at a certain rate for some time
+# finally it checks that all packets arrived
+def imix_test (server, mult):
+
+
+ # create client
+ c = STLClient(server = server)
+
+ passed = True
+
+
+ try:
+
+ # connect to server
+ c.connect()
+
+ # take all the ports
+ c.reset()
+
+
+ # map ports - identify the routes
+ table = stl_map_ports(c)
+
+ dir_0 = [x[0] for x in table['bi']]
+ dir_1 = [x[1] for x in table['bi']]
+
+ print("Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1))
+
+ # load IMIX profile
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile = STLProfile.load_py(profile_file)
+ streams = profile.get_streams()
+
+ # add both streams to ports
+ c.add_streams(streams, ports = dir_0)
+ c.add_streams(streams, ports = dir_1)
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds
+ duration = 10
+ print("Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration))
+
+ c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
+
+ # block until done
+ c.wait_on_traffic(ports = (dir_0 + dir_1))
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ # use this for debug info on all the stats
+ #pprint(stats)
+
+ # sum dir 0
+ dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0])
+ dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0])
+
+ # sum dir 1
+ dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1])
+ dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1])
+
+
+ lost_0 = dir_0_opackets - dir_1_ipackets
+ lost_1 = dir_1_opackets - dir_0_ipackets
+
+ print("\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets))
+ print("Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets))
+
+ print("\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_0, lost_0))
+ print("packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_1, lost_1))
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if (lost_0 <= 0) and (lost_1 <= 0) and not c.get_warnings(): # less or equal because we might have incoming arps etc.
+ passed = True
+ else:
+ passed = False
+
+
+ except STLError as e:
+ passed = False
+ print(e)
+ sys.exit(1)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
+parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+parser.add_argument('-m', '--mult',
+ dest='mult',
+ help='Multiplier of traffic, see Stateless help for more info',
+ default='30%',
+ type = str)
+args = parser.parse_args()
+
+# run the tests
+imix_test(args.server, args.mult)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
new file mode 100644
index 00000000..956b910a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
@@ -0,0 +1,113 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import imp
+import time
+import json
+from pprint import pprint
+import argparse
+
+# IMIX test
+# it maps the ports to sides
+# then it load a predefind profile 'IMIX'
+# and attach it to both sides and inject
+# at a certain rate for some time
+# finally it checks that all packets arrived
+def imix_test (server):
+
+
+ # create client
+ c = STLClient(server = server)
+ passed = True
+
+
+ try:
+
+ # connect to server
+ c.connect()
+
+ # take all the ports
+ c.reset()
+
+ dir_0 = [0]
+ dir_1 = [1]
+
+ print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1)
+
+ # load IMIX profile
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile1 = STLProfile.load_py(profile_file, direction=0)
+ profile2 = STLProfile.load_py(profile_file, direction=1)
+ stream1 = profile1.get_streams()
+ stream2 = profile2.get_streams()
+
+ # add both streams to ports
+ c.add_streams(stream1, ports = dir_0)
+ c.add_streams(stream2, ports = dir_1)
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds on 5 mpps
+ duration = 30
+ mult = "30%"
+ print "Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration)
+
+ c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
+
+ # block until done
+ c.wait_on_traffic(ports = (dir_0 + dir_1))
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ # use this for debug info on all the stats
+ pprint(stats)
+
+ # sum dir 0
+ dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0])
+ dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0])
+
+ # sum dir 1
+ dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1])
+ dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1])
+
+
+ lost_0 = dir_0_opackets - dir_1_ipackets
+ lost_1 = dir_1_opackets - dir_0_ipackets
+
+ print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets)
+ print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets)
+
+ print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_1, lost_0)
+ print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_0, lost_1)
+
+ if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc.
+ passed = True
+ else:
+ passed = False
+
+
+ except STLError as e:
+ passed = False
+ print e
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print "\nTest has passed :-)\n"
+ else:
+ print "\nTest has failed :-(\n"
+
+parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
+parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+args = parser.parse_args()
+
+# run the tests
+imix_test(args.server)
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_path.py b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
new file mode 100644
index 00000000..f190aab1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
@@ -0,0 +1,7 @@
+import sys, os
+
+# FIXME to the right path for trex_stl_lib
+sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'stl')
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
new file mode 100644
index 00000000..98af6134
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_pcap.py
@@ -0,0 +1,117 @@
+import stl_path
+from trex_stl_lib.api import *
+import argparse
+import sys
+
+
+def packet_hook_generator (remove_fcs, vlan_id):
+
+ def packet_hook (packet):
+ packet = Ether(packet)
+
+ if vlan_id >= 0 and vlan_id <= 4096:
+ packet_l3 = packet.payload
+ packet = Ether() / Dot1Q(vlan = vlan_id) / packet_l3
+
+ if remove_fcs and packet.lastlayer().name == 'Padding':
+ packet.lastlayer().underlayer.remove_payload()
+
+ return str(packet)
+
+ return packet_hook
+
+
+def inject_pcap (pcap_file, server, port, loop_count, ipg_usec, use_vm, remove_fcs, vlan_id):
+
+ # create client
+ c = STLClient(server = server)
+
+ if remove_fcs or vlan_id:
+ packet_hook = packet_hook_generator(remove_fcs, vlan_id)
+ else:
+ packet_hook = None
+
+ try:
+
+ vm = STLIPRange(dst = {'start': '10.0.0.1', 'end': '10.0.0.254', 'step' : 1}) if use_vm else None
+
+ c.connect()
+ c.reset(ports = [port])
+
+ c.clear_stats()
+ c.push_pcap(pcap_file,
+ ipg_usec = ipg_usec,
+ count = loop_count,
+ vm = vm,
+ packet_hook = packet_hook)
+
+ c.wait_on_traffic()
+
+
+ stats = c.get_stats()
+ opackets = stats[port]['opackets']
+ print("{0} packets were Tx on port {1}\n".format(opackets, port))
+
+ except STLError as e:
+ print(e)
+ sys.exit(1)
+
+ finally:
+ c.disconnect()
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_pcap.py")
+
+ parser.add_argument("-f", "--file", help = "pcap file to inject",
+ dest = "pcap",
+ required = True,
+ type = str)
+
+ parser.add_argument("-s", "--server", help = "TRex server address",
+ dest = "server",
+ default = 'localhost',
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "port to inject on",
+ dest = "port",
+ required = True,
+ type = int)
+
+ parser.add_argument("-n", "--number", help = "How many times to inject pcap [default is 1, 0 means forever]",
+ dest = "loop_count",
+ default = 1,
+ type = int)
+
+ parser.add_argument("-i", help = "IPG in usec",
+ dest = "ipg",
+ default = 10.0,
+ type = float)
+
+ parser.add_argument("-x", help = "Iterate over IP dest",
+ dest = "use_vm",
+ default = False,
+ action = "store_true")
+
+ parser.add_argument("-r", "--remove-fcs", help = "Remove FCS if exists. Limited by Scapy capabilities.",
+ dest = "remove",
+ default = False,
+ action = "store_true")
+
+ parser.add_argument("-v", "--vlan", help = "Add VLAN header with this ID. Limited by Scapy capabilities.",
+ dest = "vlan",
+ default = -1,
+ type = int)
+
+ return parser
+
+def main ():
+ parser = setParserOptions()
+ options = parser.parse_args()
+
+ inject_pcap(options.pcap, options.server, options.port, options.loop_count, options.ipg, options.use_vm, options.remove, options.vlan)
+
+# inject pcap
+if __name__ == '__main__':
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py b/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py
new file mode 100644
index 00000000..c47eee31
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_pcap_remote.py
@@ -0,0 +1,123 @@
+import stl_path
+from trex_stl_lib.api import *
+import argparse
+import sys
+
+
+def inject_pcap (c, pcap_file, port, loop_count, ipg_usec, duration):
+
+ pcap_file = os.path.abspath(pcap_file)
+
+ c.reset(ports = [port])
+ c.push_remote(pcap_file, ports = [port], ipg_usec = ipg_usec, speedup = 1.0, count = loop_count, duration = duration)
+ # assume 100 seconds is enough - but can be more
+ c.wait_on_traffic(ports = [port], timeout = 100)
+
+ stats = c.get_stats()
+ opackets = stats[port]['opackets']
+
+ return opackets
+ #print("{0} packets were Tx on port {1}\n".format(opackets, port))
+
+
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_pcap.py")
+
+ parser.add_argument("-f", "--file", help = "pcap file to inject",
+ dest = "pcap",
+ required = True,
+ type = str)
+
+ parser.add_argument("-s", "--server", help = "TRex server address",
+ dest = "server",
+ default = 'localhost',
+ type = str)
+
+ parser.add_argument("-p", "--port", help = "port to inject on",
+ dest = "port",
+ required = True,
+ type = int)
+
+ parser.add_argument("-n", "--number", help = "How many times to inject pcap [default is 1, 0 means forever]",
+ dest = "loop_count",
+ default = 1,
+ type = int)
+
+ parser.add_argument("-i", help = "IPG in usec",
+ dest = "ipg",
+ default = None,
+ type = float)
+
+ parser.add_argument("-d", help = "duration in seconds",
+ dest = "duration",
+ default = -1,
+ type = float)
+
+ return parser
+
+def sizeof_fmt(num, suffix='B'):
+ for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Yi', suffix)
+
+
+def read_txt_file (filename):
+
+ with open(filename) as f:
+ lines = f.readlines()
+
+ caps = []
+ for raw in lines:
+ raw = raw.rstrip()
+ if raw[0] == '#':
+ continue
+ ext=os.path.splitext(raw)[1]
+ if ext not in ['.cap', '.pcap', '.erf']:
+ # skip unknown format
+ continue
+
+ caps.append(raw)
+
+ return caps
+
+
+def start (args):
+
+ parser = setParserOptions()
+ options = parser.parse_args(args)
+
+ ext = os.path.splitext(options.pcap)[1]
+ if ext == '.txt':
+ caps = read_txt_file(options.pcap)
+ elif ext in ['.cap', '.pcap']:
+ caps = [options.pcap]
+ else:
+ print("unknown file extension for file {0}".format(options.pcap))
+ return
+
+ c = STLClient(server = options.server)
+ try:
+ c.connect()
+ for i, cap in enumerate(caps, start = 1):
+ before = time.time()
+ print ("{:} CAP {:} @ {:} - ".format(i, cap, sizeof_fmt(os.path.getsize(cap)))),
+ injected = inject_pcap(c, cap, options.port, options.loop_count, options.ipg, options.duration)
+ print("took {:.2f} seconds for {:} packets").format(time.time() - before, injected)
+
+ except STLError as e:
+ print(e)
+ return
+
+ finally:
+ c.disconnect()
+
+def main ():
+ start(sys.argv[1:])
+
+# inject pcap
+if __name__ == '__main__':
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
new file mode 100644
index 00000000..16d5238e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
@@ -0,0 +1,58 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'hlt', 'udp_1pkt_simple.py')
+
+ try:
+ profile = STLProfile.load(profile_file)
+ except STLError as e:
+ print(format_text("\nError while loading profile '{0}'\n".format(opts.file[0]), 'bold'))
+ print(e.brief() + "\n")
+ return
+
+ print(profile.dump_to_yaml())
+
+ c.remove_all_streams(my_ports)
+
+
+ c.add_streams(profile.get_streams(), ports = my_ports)
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
new file mode 100644
index 00000000..d06414e4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_run_udp_simple.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+import sys, getopt
+import argparse;
+"""
+Sample API application,
+Connect to TRex
+Send UDP packet in specific length
+Each direction has its own IP range
+Compare Rx-pkts to TX-pkts assuming ports are loopback
+
+"""
+
+import stl_path
+from trex_stl_lib.api import *
+
+H_VER = "trex-x v0.1 "
+
+class t_global(object):
+ args=None;
+
+
+import time
+import json
+import string
+
+def generate_payload(length):
+ word = ''
+ alphabet_size = len(string.letters)
+ for i in range(length):
+ word += string.letters[(i % alphabet_size)]
+ return word
+
+# simple packet creation
+def create_pkt (frame_size = 9000, direction=0):
+
+ ip_range = {'src': {'start': "10.0.0.1", 'end': "10.0.0.254"},
+ 'dst': {'start': "8.0.0.1", 'end': "8.0.0.254"}}
+
+ if (direction == 0):
+ src = ip_range['src']
+ dst = ip_range['dst']
+ else:
+ src = ip_range['dst']
+ dst = ip_range['src']
+
+ vm = [
+ # src
+ STLVmFlowVar(name="src",min_value=src['start'],max_value=src['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src"),
+
+ # dst
+ STLVmFlowVar(name="dst",min_value=dst['start'],max_value=dst['end'],size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst"),
+
+ # checksum
+ STLVmFixIpv4(offset = "IP")
+ ]
+
+ pkt_base = Ether(src="00:00:00:00:00:01",dst="00:00:00:00:00:02")/IP()/UDP(dport=12,sport=1025)
+ pyld_size = frame_size - len(pkt_base);
+ pkt_pyld = generate_payload(pyld_size)
+
+ return STLPktBuilder(pkt = pkt_base/pkt_pyld,
+ vm = vm)
+
+
+def simple_burst (duration = 10, frame_size = 9000, speed = '1gbps'):
+
+ if (frame_size < 60):
+ frame_size = 60
+
+ pkt_dir_0 = create_pkt (frame_size, 0)
+
+ pkt_dir_1 = create_pkt (frame_size, 1)
+
+ # create client
+ c = STLClient(server = t_global.args.ip)
+
+ passed = True
+
+ try:
+ # turn this on for some information
+ #c.set_verbose("high")
+
+ # create two streams
+ s1 = STLStream(packet = pkt_dir_0,
+ mode = STLTXCont(pps = 100))
+
+ # second stream with a phase of 1ms (inter stream gap)
+ s2 = STLStream(packet = pkt_dir_1,
+ isg = 1000,
+ mode = STLTXCont(pps = 100))
+
+ if t_global.args.debug:
+ STLStream.dump_to_yaml ("example.yaml", [s1,s2]) # export to YAML so you can run it on simulator ./stl-sim -f example.yaml -o o.pcap
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports (my machine has 0 <--> 1 with static route)
+ c.reset(ports = [0, 1])
+
+ # add both streams to ports
+ c.add_streams(s1, ports = [0])
+ c.add_streams(s2, ports = [1])
+
+ # clear the stats before injecting
+ c.clear_stats()
+
+ # choose rate and start traffic for 10 seconds on 5 mpps
+ print("Running {0} on ports 0, 1 for 10 seconds, UDP {1}...".format(speed,frame_size+4))
+ c.start(ports = [0, 1], mult = speed, duration = duration)
+
+ # block until done
+ c.wait_on_traffic(ports = [0, 1])
+
+ # read the stats after the test
+ stats = c.get_stats()
+
+ #print stats
+ print(json.dumps(stats[0], indent = 4, separators=(',', ': '), sort_keys = True))
+ print(json.dumps(stats[1], indent = 4, separators=(',', ': '), sort_keys = True))
+
+ lost_a = stats[0]["opackets"] - stats[1]["ipackets"]
+ lost_b = stats[1]["opackets"] - stats[0]["ipackets"]
+
+ print("\npackets lost from 0 --> 1: {0} pkts".format(lost_a))
+ print("packets lost from 1 --> 0: {0} pkts".format(lost_b))
+
+ if (lost_a == 0) and (lost_b == 0):
+ passed = True
+ else:
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nPASSED\n")
+ else:
+ print("\nFAILED\n")
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ connect to TRex and send burst of packets
+
+ examples
+
+ stl_run_udp_simple.py -s 9001
+
+ stl_run_udp_simple.py -s 9000 -d 2
+
+ stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps
+
+ stl_run_udp_simple.py -s 3000 -d 3 -m 10mbps --debug
+
+ then run the simulator on the output
+ ./stl-sim -f example.yaml -o a.pcap ==> a.pcap include the packet
+
+ """,
+ description="example for TRex api",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-s", "--frame-size",
+ dest="frame_size",
+ help='L2 frame size in bytes without FCS',
+ default=60,
+ type = int,
+ )
+
+ parser.add_argument("--ip",
+ dest="ip",
+ help='remote trex ip default local',
+ default="127.0.0.1",
+ type = str
+ )
+
+
+ parser.add_argument('-d','--duration',
+ dest='duration',
+ help='duration in second ',
+ default=10,
+ type = int,
+ )
+
+
+ parser.add_argument('-m','--multiplier',
+ dest='mul',
+ help='speed in gbps/pps for example 1gbps, 1mbps, 1mpps ',
+ default="1mbps"
+ )
+
+ parser.add_argument('--debug',
+ action='store_true',
+ help='see debug into ')
+
+ parser.add_argument('--version', action='version',
+ version=H_VER )
+
+ t_global.args = parser.parse_args();
+ print(t_global.args)
+
+
+
+def main():
+ process_options ()
+ simple_burst(duration = t_global.args.duration,
+ frame_size = t_global.args.frame_size,
+ speed = t_global.args.mul
+ )
+
+if __name__ == "__main__":
+ main()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
new file mode 100644
index 00000000..4bd9fd4c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_burst.py
@@ -0,0 +1,71 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple_burst (port_a, port_b, pkt_size, burst_size, rate):
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt_base = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()
+ pad = max(0, pkt_size - len(pkt_base)) * 'x'
+ pkt = STLPktBuilder(pkt = pkt_base / pad)
+
+ # create two bursts and link them
+ s1 = STLStream(name = 'A',
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size),
+ next = 'B')
+
+ s2 = STLStream(name = 'B',
+ self_start = False,
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ stream_ids = c.add_streams([s1, s2], ports = [port_a, port_b])
+
+ # run 5 times
+ for i in range(1, 6):
+ c.clear_stats()
+ c.start(ports = [port_a, port_b], mult = rate)
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ stats = c.get_stats()
+ ipackets = stats['total']['ipackets']
+
+ print("Test iteration {0} - Packets Received: {1} ".format(i, ipackets))
+ # two streams X 2 ports
+ if (ipackets != (burst_size * 2 * 2)):
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if passed and not c.get_warnings():
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple_burst(0, 3, 256, 50000, "80%")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
new file mode 100644
index 00000000..1d4ef250
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
@@ -0,0 +1,60 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple ():
+
+ # create client
+ #verbose_level = LoggerApi.VERBOSE_HIGH
+ c = STLClient(verbose_level = LoggerApi.VERBOSE_REGULAR)
+ passed = True
+
+ try:
+ # connect to server
+ c.connect()
+
+ my_ports=[0,1]
+
+ # prepare our ports
+ c.reset(ports = my_ports)
+
+ print((" is connected {0}".format(c.is_connected())))
+
+ print((" number of ports {0}".format(c.get_port_count())))
+ print((" acquired_ports {0}".format(c.get_acquired_ports())))
+ # port stats
+ print(c.get_stats(my_ports))
+ # port info
+ print(c.get_port_info(my_ports))
+
+ c.ping()
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'udp_1pkt_simple.py')
+
+ print("start")
+ c.start_line (" -f %s -m 10mpps --port 0 1 " % profile_file)
+ time.sleep(2);
+ c.pause_line("--port 0 1");
+ time.sleep(2);
+ c.resume_line("--port 0 1");
+ time.sleep(2);
+ c.update_line("--port 0 1 -m 5mpps");
+ time.sleep(2);
+ c.stop_line("--port 0 1");
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if passed:
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple()
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py
new file mode 100644
index 00000000..6e3d5f7f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_pin_core.py
@@ -0,0 +1,72 @@
+import stl_path
+from trex_stl_lib.api import *
+
+import time
+
+def simple_burst (port_a, port_b, pkt_size, burst_size, rate):
+
+ # create client
+ c = STLClient()
+ passed = True
+
+ try:
+ pkt_base = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()
+ pad = max(0, pkt_size - len(pkt_base)) * 'x'
+ pkt = STLPktBuilder(pkt = pkt_base / pad)
+
+ # create two bursts and link them
+ s1 = STLStream(name = 'A',
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size),
+ next = 'B')
+
+ s2 = STLStream(name = 'B',
+ self_start = False,
+ packet = pkt,
+ mode = STLTXSingleBurst(total_pkts = burst_size))
+
+ # connect to server
+ c.connect()
+
+ # prepare our ports
+ c.reset(ports = [port_a, port_b])
+
+ # add both streams to ports
+ stream_ids = c.add_streams([s1, s2], ports = [port_a, port_b])
+
+ # run 5 times
+ for i in range(1, 6):
+ c.clear_stats()
+ ##
+ c.start(ports = [port_a, port_b], mult = rate, core_mask=STLClient.CORE_MASK_PIN) # better performance
+ c.wait_on_traffic(ports = [port_a, port_b])
+
+ stats = c.get_stats()
+ ipackets = stats['total']['ipackets']
+
+ print("Test iteration {0} - Packets Received: {1} ".format(i, ipackets))
+ # two streams X 2 ports
+ if (ipackets != (burst_size * 2 * 2)):
+ passed = False
+
+ except STLError as e:
+ passed = False
+ print(e)
+
+ finally:
+ c.disconnect()
+
+ if c.get_warnings():
+ print("\n\n*** test had warnings ****\n\n")
+ for w in c.get_warnings():
+ print(w)
+
+ if passed and not c.get_warnings():
+ print("\nTest has passed :-)\n")
+ else:
+ print("\nTest has failed :-(\n")
+
+
+# run the tests
+simple_burst(0, 3, 256, 50000, "80%")
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py b/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py
new file mode 100755
index 00000000..d2fcdff3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/examples/using_rpc_proxy.py
@@ -0,0 +1,149 @@
+#!/router/bin/python
+
+import argparse
+import sys
+import os
+from time import sleep
+from pprint import pprint
+
+# ext libs
+ext_libs = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs')
+sys.path.append(os.path.join(ext_libs, 'jsonrpclib-pelix-0.2.5'))
+import jsonrpclib
+
+def fail(msg):
+ print(msg)
+ sys.exit(1)
+
+def verify(res):
+ if not res[0]:
+ fail(res[1])
+ return res
+
+def verify_hlt(res):
+ if res['status'] == 0:
+ fail(res['log'])
+ return res
+
+### Main ###
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description = 'Use of Stateless through rpc_proxy. (Can be implemented in any language)')
+ parser.add_argument('-s', '--server', type=str, default = 'localhost', dest='server', action = 'store',
+ help = 'Address of rpc proxy.')
+ parser.add_argument('-p', '--port', type=int, default = 8095, dest='port', action = 'store',
+ help = 'Port of rpc proxy.\nDefault is 8095.')
+ parser.add_argument('--master_port', type=int, default = 8091, dest='master_port', action = 'store',
+ help = 'Port of Master daemon.\nDefault is 8091.')
+ args = parser.parse_args()
+
+ server = jsonrpclib.Server('http://%s:%s' % (args.server, args.port))
+ master = jsonrpclib.Server('http://%s:%s' % (args.server, args.master_port))
+
+# Connecting
+
+ try:
+ print('Connecting to STL RPC proxy server')
+ server.check_connectivity()
+ print('Connected')
+ except Exception as e:
+ print('Could not connect to STL RPC proxy server: %s\nTrying to start it from Master daemon.' % e)
+ try:
+ master.check_connectivity()
+ master.start_stl_rpc_proxy()
+ print('Started')
+ except Exception as e:
+ print('Could not start it from Master daemon. Error: %s' % e)
+ sys.exit(-1)
+
+
+# Native API
+
+ print('Initializing Native Client')
+ verify(server.native_proxy_init(server = args.server, force = True))
+
+ print('Connecting to TRex server')
+ verify(server.connect())
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ print('Getting ports info')
+ res = verify(server.native_method(func_name = 'get_port_info'))
+ print('Ports info is: %s' % res[1])
+ ports = [port['index'] for port in res[1]]
+
+ print('Sending pcap to ports %s' % ports)
+ verify(server.push_remote(pcap_filename = 'stl/sample.pcap'))
+ sleep(3)
+
+ print('Getting stats')
+ res = verify(server.get_stats())
+ pprint(res[1])
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ imix_path_1 = '../../../../stl/imix.py'
+ imix_path_2 = '../../stl/imix.py'
+ if os.path.exists(imix_path_1):
+ imix_path = imix_path_1
+ elif os.path.exists(imix_path_2):
+ imix_path = imix_path_2
+ else:
+ print('Could not find path of imix profile, skipping')
+ imix_path = None
+
+ if imix_path:
+ print('Adding profile %s' % imix_path)
+ verify(server.native_method(func_name = 'add_profile', filename = imix_path))
+
+ print('Start traffic for 5 sec')
+ verify(server.native_method('start'))
+ sleep(5)
+
+ print('Getting stats')
+ res = verify(server.get_stats())
+ pprint(res[1])
+
+ print('Resetting all ports')
+ verify(server.reset())
+
+ print('Deleting Native Client instance')
+ verify(server.native_proxy_del())
+
+# HLTAPI
+
+ print('Initializing HLTAPI Client')
+ verify_hlt(server.hltapi_proxy_init(force = True))
+ print('HLTAPI Client initiated')
+
+ print('HLTAPI connect')
+ verify_hlt(server.hlt_connect(device = args.server, port_list = ports, reset = True, break_locks = True))
+
+ print('Creating traffic')
+ verify_hlt(server.traffic_config(
+ mode = 'create', bidirectional = True,
+ port_handle = ports[0], port_handle2 = ports[1],
+ frame_size = 100,
+ l3_protocol = 'ipv4',
+ ip_src_addr = '10.0.0.1', ip_src_mode = 'increment', ip_src_count = 254,
+ ip_dst_addr = '8.0.0.1', ip_dst_mode = 'increment', ip_dst_count = 254,
+ l4_protocol = 'udp',
+ udp_dst_port = 12, udp_src_port = 1025,
+ rate_percent = 10, ignore_macs = True,
+ ))
+
+ print('Starting traffic for 5 sec')
+ verify_hlt(server.traffic_control(action = 'run', port_handle = ports[:2]))
+
+ sleep(5)
+ print('Stopping traffic')
+ verify_hlt(server.traffic_control(action = 'stop', port_handle = ports[:2]))
+
+ print('Getting stats')
+ res = verify_hlt(server.traffic_stats(mode = 'aggregate', port_handle = ports[:2]))
+ pprint(res)
+
+ print('Deleting HLTAPI Client instance')
+ verify_hlt(server.hltapi_proxy_del())
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
new file mode 100755
index 00000000..91257596
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -0,0 +1,798 @@
+
+import os
+import sys
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+sys.path.append(stl_pathname)
+
+from trex_stl_lib.api import *
+import tempfile
+import hashlib
+import base64
+import numbers
+import inspect
+import json
+from pprint import pprint
+
+# add some layers as an example
+# need to test more
+from scapy.layers.dns import *
+from scapy.layers.dhcp import *
+from scapy.layers.ipsec import *
+from scapy.layers.netflow import *
+from scapy.layers.sctp import *
+from scapy.layers.tftp import *
+
+from scapy.contrib.mpls import *
+from scapy.contrib.igmp import *
+from scapy.contrib.igmpv3 import *
+
+
+
+
+#additional_stl_udp_pkts = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,os.pardir, os.pardir,'stl'))
+#sys.path.append(additional_stl_udp_pkts)
+#from udp_1pkt_vxlan import VXLAN
+#sys.path.remove(additional_stl_udp_pkts)
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+
+
+
+class Scapy_service_api():
+
+ def get_version_handler(self,client_v_major,client_v_minor):
+ """ get_version_handler(self,client_v_major,client_v_minor)
+
+ Gives a handler to client to connect and use server api
+
+ Parameters
+ ----------
+ client_v_major - major number of api version on the client side
+
+ Returns
+ -------
+ Handler(string) to provide when using server api
+ """
+ pass
+ def get_all(self,client_v_handler):
+ """ get_all(self,client_v_handler)
+
+ Sends all the protocols and fields that Scapy Service supports.
+ also sends the md5 of the Protocol DB and Fields DB used to check if the DB's are up to date
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Dictionary (of protocol DB and scapy fields DB)
+
+ Raises
+ ------
+ Raises an exception when a DB error occurs (i.e a layer is not loaded properly and has missing components)
+ """
+ pass
+
+ def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
+ """ check_update_of_dbs(self,client_v_handler,db_md5,field_md5)
+ Checks if the Scapy Service running on the server has a newer version of the databases that the client has
+
+ Parameters
+ ----------
+ db_md5 - The md5 that was delivered with the protocol database that the client owns, when first received at the client
+ field_md5 - The md5 that was delivered with the fields database that the client owns, when first received at the client
+
+ Returns
+ -------
+ True/False according the Databases version(determined by their md5)
+
+ Raises
+ ------
+ Raises an exception (ScapyException) when protocol DB/Fields DB is not up to date
+ """
+ pass
+
+
+ def build_pkt(self,client_v_handler,pkt_model_descriptor):
+ """ build_pkt(self,client_v_handler,pkt_model_descriptor) -> Dictionary (of Offsets,Show2 and Buffer)
+
+ Performs calculations on the given packet and returns results for that packet.
+
+ Parameters
+ ----------
+ pkt_descriptor - An array of dictionaries describing a network packet
+
+ Returns
+ -------
+ - The packets offsets: each field in every layer is mapped inside the Offsets Dictionary
+ - The Show2: A description of each field and its value in every layer of the packet
+ - The Buffer: The Hexdump of packet encoded in base64
+
+ Raises
+ ------
+ will raise an exception when the Scapy string format is illegal, contains syntax error, contains non-supported
+ protocl, etc.
+ """
+ pass
+
+
+ def get_tree(self,client_v_handler):
+ """ get_tree(self) -> Dictionary describing an example of hierarchy in layers
+
+ Scapy service holds a tree of layers that can be stacked to a recommended packet
+ according to the hierarchy
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ Returns an example hierarchy tree of layers that can be stacked to a packet
+
+ Raises
+ ------
+ None
+ """
+ pass
+
+ def reconstruct_pkt(self,client_v_handler,binary_pkt,model_descriptor):
+ """ reconstruct_pkt(self,client_v_handler,binary_pkt)
+
+ Makes a Scapy valid packet by applying changes to binary packet and returns all information returned in build_pkt
+
+ Parameters
+ ----------
+ Source packet in binary_pkt, formatted in "base64" encoding
+ List of changes in model_descriptor
+
+ Returns
+ -------
+ All data provided in build_pkt:
+ show2 - detailed description of the packet
+ buffer - the packet presented in binary
+ offsets - the offset[in bytes] of each field in the packet
+
+ """
+ pass
+
+ def read_pcap(self,client_v_handler,pcap_base64):
+ """ read_pcap(self,client_v_handler,pcap_base64)
+
+ Parses pcap file contents and returns an array with build_pkt information for each packet
+
+ Parameters
+ ----------
+ binary pcap file in base64 encoding
+
+ Returns
+ -------
+ Array of build_pkt(packet)
+ """
+ pass
+
+ def write_pcap(self,client_v_handler,packets_base64):
+ """ write_pcap(self,client_v_handler,packets_base64)
+
+ Writes binary packets to pcap file
+
+ Parameters
+ ----------
+ array of binary packets in base64 encoding
+
+ Returns
+ -------
+ binary pcap file in base64 encoding
+ """
+ pass
+
+ def get_definitions(self,client_v_handler, def_filter):
+ """ get_definitions(self,client_v_handler, def_filter)
+
+ Returns protocols and fields metadata of scapy service
+
+ Parameters
+ ----------
+ def_filter - array of protocol names
+
+ Returns
+ -------
+ definitions for protocols
+ """
+ pass
+
+ def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
+ """ get_payload_classes(self,client_v_handler, pkt_model_descriptor)
+
+ Returns an array of protocol classes, which normally can be used as a payload
+
+ Parameters
+ ----------
+ pkt_model_descriptor - see build_pkt
+
+ Returns
+ -------
+ array of supported protocol classes
+ """
+ pass
+
+def is_python(version):
+ return version == sys.version_info[0]
+
+def is_number(obj):
+ return isinstance(obj, numbers.Number)
+
+def is_string(obj):
+ return type(obj) == str or type(obj).__name__ == 'unicode' # python3 doesn't have unicode type
+
+def is_ascii_str(strval):
+ return strval and all(ord(ch) < 128 for ch in strval)
+
+def is_ascii_bytes(buf):
+ return buf and all(byte < 128 for byte in buf)
+
+def is_ascii(obj):
+ if is_bytes3(obj):
+ return is_ascii_bytes(obj)
+ else:
+ return is_ascii_str(obj)
+
+def is_bytes3(obj):
+ # checks if obj is exactly bytes(always false for python2)
+ return is_python(3) and type(obj) == bytes
+
+def str_to_bytes(strval):
+ return strval.encode("utf8")
+
+def bytes_to_str(buf):
+ return buf.decode("utf8")
+
+def b64_to_bytes(payload_base64):
+ # get bytes from base64 string(unicode)
+ return base64.b64decode(payload_base64)
+
+def bytes_to_b64(buf):
+ # bytes to base64 string(unicode)
+ return base64.b64encode(buf).decode('ascii')
+
+def get_sample_field_val(scapy_layer, fieldId):
+ # get some sample value for the field, to determine the value type
+ # use random or serialized value if default value is None
+ field_desc, current_val = scapy_layer.getfield_and_val(fieldId)
+ if current_val is not None:
+ return current_val
+ try:
+ # try to get some random value to determine type
+ return field_desc.randval()._fix()
+ except:
+ pass
+ try:
+ # try to serialize/deserialize
+ ltype = type(scapy_layer)
+ pkt = ltype(bytes(ltype()))
+ return pkt.getfieldval(fieldId)
+ except:
+ pass
+
+class ScapyException(Exception): pass
+class Scapy_service(Scapy_service_api):
+
+#----------------------------------------------------------------------------------------------------
+ class ScapyFieldDesc:
+ def __init__(self,FieldName,regex='empty'):
+ self.FieldName = FieldName
+ self.regex = regex
+ #defualt values - should be changed when needed, or added to constructor
+ self.string_input =""
+ self.string_input_mex_len = 1
+ self.integer_input = 0
+ self.integer_input_min = 0
+ self.integer_input_max = 1
+ self.input_array = []
+ self.input_list_max_len = 1
+
+ def stringRegex(self):
+ return self.regex
+#----------------------------------------------------------------------------------------------------
+ def __init__(self):
+ self.Raw = {'Raw':''}
+ self.high_level_protocols = ['Raw']
+ self.transport_protocols = {'TCP':self.Raw,'UDP':self.Raw}
+ self.network_protocols = {'IP':self.transport_protocols ,'ARP':''}
+ self.low_level_protocols = { 'Ether': self.network_protocols }
+ self.regexDB= {'MACField' : self.ScapyFieldDesc('MACField','^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$'),
+ 'IPField' : self.ScapyFieldDesc('IPField','^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$')}
+ self.all_protocols = self._build_lib()
+ self.protocol_tree = {'ALL':{'Ether':{'ARP':{},'IP':{'TCP':{'RAW':'payload'},'UDP':{'RAW':'payload'}}}}}
+ self.version_major = '1'
+ self.version_minor = '01'
+ self.server_v_hashed = self._generate_version_hash(self.version_major,self.version_minor)
+
+
+ def _all_protocol_structs(self):
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls()
+ sys.stdout = old_stdout
+ all_protocol_data= mystdout.getvalue()
+ return all_protocol_data
+
+ def _protocol_struct(self,protocol):
+ if '_' in protocol:
+ return []
+ if not protocol=='':
+ if protocol not in self.all_protocols:
+ return 'protocol not supported'
+ protocol = eval(protocol)
+ old_stdout = sys.stdout
+ sys.stdout = mystdout = StringIO()
+ ls(protocol)
+ sys.stdout = old_stdout
+ protocol_data= mystdout.getvalue()
+ return protocol_data
+
+ def _build_lib(self):
+ lib = self._all_protocol_structs()
+ lib = lib.splitlines()
+ all_protocols=[]
+ for entry in lib:
+ entry = entry.split(':')
+ all_protocols.append(entry[0].strip())
+ del all_protocols[len(all_protocols)-1]
+ return all_protocols
+
+ def _parse_description_line(self,line):
+ line_arr = [x.strip() for x in re.split(': | = ',line)]
+ return tuple(line_arr)
+
+ def _parse_entire_description(self,description):
+ description = description.split('\n')
+ description_list = [self._parse_description_line(x) for x in description]
+ del description_list[len(description_list)-1]
+ return description_list
+
+ def _get_protocol_details(self,p_name):
+ protocol_str = self._protocol_struct(p_name)
+ if protocol_str=='protocol not supported':
+ return 'protocol not supported'
+ if len(protocol_str) is 0:
+ return []
+ tupled_protocol = self._parse_entire_description(protocol_str)
+ return tupled_protocol
+
+ def _value_from_dict(self, val):
+ # allows building python objects from json
+ if type(val) == type({}):
+ value_type = val['vtype']
+ if value_type == 'EXPRESSION':
+ return eval(val['expr'], {})
+ elif value_type == 'BYTES': # bytes payload(ex Raw.load)
+ return b64_to_bytes(val['base64'])
+ elif value_type == 'OBJECT':
+ return val['value']
+ else:
+ return val # it's better to specify type explicitly
+ elif type(val) == type([]):
+ return [self._value_from_dict(v) for v in val]
+ else:
+ return val
+
+ def _field_value_from_def(self, layer, fieldId, val):
+ field_desc = layer.get_field(fieldId)
+ sample_val = get_sample_field_val(layer, fieldId)
+ # extensions for field values
+ if type(val) == type({}):
+ value_type = val['vtype']
+ if value_type == 'UNDEFINED': # clear field value
+ return None
+ elif value_type == 'RANDOM': # random field value
+ return field_desc.randval()
+ elif value_type == 'MACHINE': # internal machine field repr
+ return field_desc.m2i(layer, b64_to_bytes(val['base64']))
+ if is_number(sample_val) and is_string(val):
+ # human-value. guess the type and convert to internal value
+ # seems setfieldval already does this for some fields,
+ # but does not convert strings/hex(0x123) to integers and long
+ val = str(val) # unicode -> str(ascii)
+ # parse str to int/long as a decimal or hex
+ val_constructor = type(sample_val)
+ if len(val) == 0:
+ return None
+ elif re.match(r"^0x[\da-f]+$", val, flags=re.IGNORECASE): # hex
+ return val_constructor(val, 16)
+ elif re.match(r"^\d+L?$", val): # base10
+ return val_constructor(val)
+ # generate recursive field-independent values
+ return self._value_from_dict(val)
+
+ def _print_tree(self):
+ pprint(self.protocol_tree)
+
+ def _get_all_db(self):
+ db = {}
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ db[pro] = details
+ return db
+
+ def _get_all_fields(self):
+ fields = []
+ for pro in self.all_protocols:
+ details = self._get_protocol_details(pro)
+ for i in range(0,len(details),1):
+ if len(details[i]) == 3:
+ fields.append(details[i][1])
+ uniqueFields = list(set(fields))
+ fieldDict = {}
+ for f in uniqueFields:
+ if f in self.regexDB:
+ fieldDict[f] = self.regexDB[f].stringRegex()
+ else:
+ fieldDict[f] = self.ScapyFieldDesc(f).stringRegex()
+ return fieldDict
+
+ def _fully_define(self,pkt):
+ # returns scapy object with all fields initialized
+ rootClass = type(pkt)
+ full_pkt = rootClass(bytes(pkt))
+ full_pkt.build() # this trick initializes offset
+ return full_pkt
+
+ def _bytes_to_value(self, payload_bytes):
+ # generates struct with a value
+ return { "vtype": "BYTES", "base64": bytes_to_b64(payload_bytes) }
+
+ def _pkt_to_field_tree(self,pkt):
+ pkt.build()
+ result = []
+ pcap_struct = self._fully_define(pkt) # structure, which will appear in pcap binary
+ while pkt:
+ layer_id = type(pkt).__name__ # Scapy classname
+ layer_full = self._fully_define(pkt) # current layer recreated from binary to get auto-calculated vals
+ real_layer_id = type(pcap_struct).__name__ if pcap_struct else None
+ valid_struct = True # shows if packet is mapped correctly to the binary representation
+ if not pcap_struct:
+ valid_struct = False
+ elif not issubclass(type(pkt), type(pcap_struct)) and not issubclass(type(pcap_struct), type(pkt)):
+ # structure mismatch. no need to go deeper in pcap_struct
+ valid_struct = False
+ pcap_struct = None
+ fields = []
+ for field_desc in pkt.fields_desc:
+ field_id = field_desc.name
+ ignored = field_id not in layer_full.fields
+ offset = field_desc.offset
+ protocol_offset = pkt.offset
+ field_sz = field_desc.get_size_bytes()
+ # some values are unavailable in pkt(original model)
+ # at the same time,
+ fieldval = pkt.getfieldval(field_id)
+ pkt_fieldval_defined = is_string(fieldval) or is_number(fieldval) or is_bytes3(fieldval)
+ if not pkt_fieldval_defined:
+ fieldval = layer_full.getfieldval(field_id)
+ value = None
+ hvalue = None
+ value_base64 = None
+ if is_python(3) and is_bytes3(fieldval):
+ value = self._bytes_to_value(fieldval)
+ if is_ascii_bytes(fieldval):
+ hvalue = bytes_to_str(fieldval)
+ else:
+ # can't be shown as ascii.
+ # also this buffer may not be unicode-compatible(still can try to convert)
+ value = self._bytes_to_value(fieldval)
+ hvalue = '<binary>'
+ elif not is_string(fieldval):
+ # value as is. this can be int,long, or custom object(list/dict)
+ # "nice" human value, i2repr(string) will have quotes, so we have special handling for them
+ hvalue = field_desc.i2repr(pkt, fieldval)
+
+ if is_number(fieldval):
+ value = fieldval
+ if is_string(hvalue) and re.match(r"^\d+L$", hvalue):
+ hvalue = hvalue[:-1] # chop trailing L for long decimal number(python2)
+ else:
+ # fieldval is an object( class / list / dict )
+ # generic serialization/deserialization needed for proper packet rebuilding from packet tree,
+ # some classes can not be mapped to json, but we can pass them serialize them
+ # as a python eval expr, value bytes base64, or field machine internal val(m2i)
+ value = {"vtype": "EXPRESSION", "expr": hvalue}
+ if is_python(3) and is_string(fieldval):
+ hvalue = value = fieldval
+ if is_python(2) and is_string(fieldval):
+ if is_ascii(fieldval):
+ hvalue = value = fieldval
+ else:
+ # python2 non-ascii byte buffers
+ # payload contains non-ascii chars, which
+ # sometimes can not be passed as unicode strings
+ value = self._bytes_to_value(fieldval)
+ hvalue = '<binary>'
+ if field_desc.name == 'load':
+ # show Padding(and possible similar classes) as Raw
+ layer_id = 'Raw'
+ field_sz = len(pkt)
+ value = self._bytes_to_value(fieldval)
+ field_data = {
+ "id": field_id,
+ "value": value,
+ "hvalue": hvalue,
+ "offset": offset,
+ "length": field_sz
+ }
+ if ignored:
+ field_data["ignored"] = ignored
+ fields.append(field_data)
+ layer_data = {
+ "id": layer_id,
+ "offset": pkt.offset,
+ "fields": fields,
+ "real_id": real_layer_id,
+ "valid_structure": valid_struct,
+ }
+ result.append(layer_data)
+ pkt = pkt.payload
+ if pcap_struct:
+ pcap_struct = pcap_struct.payload or None
+ return result
+
+#input: container
+#output: md5 encoded in base64
+ def _get_md5(self,container):
+ container = json.dumps(container)
+ m = hashlib.md5()
+ m.update(str_to_bytes(container))
+ res_md5 = bytes_to_b64(m.digest())
+ return res_md5
+
+ def get_version(self):
+ return {'built_by':'itraviv','version':self.version_major+'.'+self.version_minor}
+
+ def supported_methods(self,method_name='all'):
+ if method_name=='all':
+ methods = {}
+ for f in dir(Scapy_service):
+ if f[0]=='_':
+ continue
+ if inspect.ismethod(eval('Scapy_service.'+f)):
+ param_list = inspect.getargspec(eval('Scapy_service.'+f))[0]
+ del param_list[0] #deleting the parameter "self" that appears in every method
+ #because the server automatically operates on an instance,
+ #and this can cause confusion
+ methods[f] = (len(param_list), param_list)
+ return methods
+ if method_name in dir(Scapy_service):
+ return True
+ return False
+
+ def _generate_version_hash(self,v_major,v_minor):
+ v_for_hash = v_major+v_minor+v_major+v_minor
+ m = hashlib.md5()
+ m.update(str_to_bytes(v_for_hash))
+ return bytes_to_b64(m.digest())
+
+ def _generate_invalid_version_error(self):
+ error_desc1 = "Provided version handler does not correspond to the server's version.\nUpdate client to latest version.\nServer version:"+self.version_major+"."+self.version_minor
+ return error_desc1
+
+ def _verify_version_handler(self,client_v_handler):
+ return (self.server_v_hashed == client_v_handler)
+
+ def _parse_packet_dict(self,layer,scapy_layers,scapy_layer_names):
+ class_name = scapy_layer_names.index(layer['id'])
+ class_p = scapy_layers[class_name] # class pointer
+ scapy_layer = class_p()
+ if isinstance(scapy_layer, Raw):
+ scapy_layer.load = str_to_bytes("dummy")
+ if 'fields' in layer:
+ self._modify_layer(scapy_layer, layer['fields'])
+ return scapy_layer
+
+ def _packet_model_to_scapy_packet(self,data):
+ layers = Packet.__subclasses__()
+ layer_names = [ layer.__name__ for layer in layers]
+ base_layer = self._parse_packet_dict(data[0],layers,layer_names)
+ for i in range(1,len(data),1):
+ packet_layer = self._parse_packet_dict(data[i],layers,layer_names)
+ base_layer = base_layer/packet_layer
+ return base_layer
+
+ def _pkt_data(self,pkt):
+ if pkt == None:
+ return {'data': [], 'binary': None}
+ data = self._pkt_to_field_tree(pkt)
+ binary = bytes_to_b64(bytes(pkt))
+ res = {'data': data, 'binary': binary}
+ return res
+
+#--------------------------------------------API implementation-------------
+ def get_tree(self,client_v_handler):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ return self.protocol_tree
+
+ def get_version_handler(self,client_v_major,client_v_minor):
+ v_handle = self._generate_version_hash(client_v_major,client_v_minor)
+ return v_handle
+
+# pkt_descriptor in packet model format (dictionary)
+ def build_pkt(self,client_v_handler,pkt_model_descriptor):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
+ return self._pkt_data(pkt)
+
+ # @deprecated. to be removed
+ def get_all(self,client_v_handler):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ fields_md5 = self._get_md5(fields)
+ db_md5 = self._get_md5(db)
+ res = {}
+ res['db'] = db
+ res['fields'] = fields
+ res['db_md5'] = db_md5
+ res['fields_md5'] = fields_md5
+ return res
+
+ def _is_packet_class(self, pkt_class):
+ # returns true for final Packet classes. skips aliases and metaclasses
+ return issubclass(pkt_class, Packet) and pkt_class.name and pkt_class.fields_desc
+
+ def _getDummyPacket(self, pkt_class):
+ if issubclass(pkt_class, Raw):
+ # need to have some payload. otherwise won't appear in the binary chunk
+ return pkt_class(load=str_to_bytes("dummy"))
+ else:
+ return pkt_class()
+
+
+ def _get_payload_classes(self, pkt):
+ # tries to find, which subclasses allowed.
+ # this can take long time, since it tries to build packets with all subclasses(O(N))
+ pkt_class = type(pkt)
+ allowed_subclasses = []
+ for pkt_subclass in conf.layers:
+ if self._is_packet_class(pkt_subclass):
+ try:
+ pkt_w_payload = pkt_class() / self._getDummyPacket(pkt_subclass)
+ recreated_pkt = pkt_class(bytes(pkt_w_payload))
+ if type(recreated_pkt.lastlayer()) is pkt_subclass:
+ allowed_subclasses.append(pkt_subclass)
+ except Exception as e:
+ # no actions needed on fail, just sliently skip
+ pass
+ return allowed_subclasses
+
+ def _get_fields_definition(self, pkt_class):
+ fields = []
+ for field_desc in pkt_class.fields_desc:
+ field_data = {
+ "id": field_desc.name,
+ "name": field_desc.name
+ }
+ if isinstance(field_desc, EnumField):
+ try:
+ field_data["values_dict"] = field_desc.s2i
+ except:
+ # MultiEnumField doesn't have s2i. need better handling
+ pass
+ fields.append(field_data)
+ return fields
+
+ def get_definitions(self,client_v_handler, def_filter):
+ # def_filter is an array of classnames or None
+ all_classes = Packet.__subclasses__() # as an alternative to conf.layers
+ if def_filter:
+ all_classes = [c for c in all_classes if c.__name__ in def_filter]
+ protocols = []
+ for pkt_class in all_classes:
+ if self._is_packet_class(pkt_class):
+ # enumerate all non-abstract Packet classes
+ protocols.append({
+ "id": pkt_class.__name__,
+ "name": pkt_class.name,
+ "fields": self._get_fields_definition(pkt_class)
+ })
+ res = {"protocols": protocols}
+ return res
+
+ def get_payload_classes(self,client_v_handler, pkt_model_descriptor):
+ pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor)
+ return [c.__name__ for c in self._get_payload_classes(pkt)]
+
+#input in string encoded base64
+ def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
+ if not (self._verify_version_handler(client_v_handler)):
+ raise ScapyException(self._generate_invalid_version_error())
+ fields=self._get_all_fields()
+ db=self._get_all_db()
+ current_db_md5 = self._get_md5(db)
+ current_field_md5 = self._get_md5(fields)
+ res = []
+ if (field_md5 == current_field_md5):
+ if (db_md5 == current_db_md5):
+ return True
+ else:
+ raise ScapyException("Protocol DB is not up to date")
+ else:
+ raise ScapyException("Fields DB is not up to date")
+
+ def _modify_layer(self, scapy_layer, fields):
+ for field in fields:
+ fieldId = str(field['id'])
+ fieldval = self._field_value_from_def(scapy_layer, fieldId, field['value'])
+ if fieldval is not None:
+ scapy_layer.setfieldval(fieldId, fieldval)
+ else:
+ scapy_layer.delfieldval(fieldId)
+
+ def _is_last_layer(self, layer):
+ # can be used, that layer has no payload
+ # if true, the layer.payload is likely NoPayload()
+ return layer is layer.lastlayer()
+
+#input of binary_pkt must be encoded in base64
+ def reconstruct_pkt(self,client_v_handler,binary_pkt,model_descriptor):
+ pkt_bin = b64_to_bytes(binary_pkt)
+ scapy_pkt = Ether(pkt_bin)
+ if not model_descriptor:
+ model_descriptor = []
+ for depth in range(len(model_descriptor)):
+ model_layer = model_descriptor[depth]
+ if model_layer.get('delete') is True:
+ # slice packet from the current item
+ if depth == 0:
+ scapy_pkt = None
+ break
+ else:
+ scapy_pkt[depth-1].payload = None
+ break
+ if depth > 0 and self._is_last_layer(scapy_pkt[depth-1]):
+ # insert new layer(s) from json definition
+ remaining_definitions = model_descriptor[depth:]
+ pkt_to_append = self._packet_model_to_scapy_packet(remaining_definitions)
+ scapy_pkt = scapy_pkt / pkt_to_append
+ break
+ # modify fields of existing stack items
+ scapy_layer = scapy_pkt[depth]
+ if model_layer['id'] != type(scapy_layer).__name__:
+ # TODO: support replacing payload, instead of breaking
+ raise ScapyException("Protocol id inconsistent")
+ if 'fields' in model_layer:
+ self._modify_layer(scapy_layer, model_layer['fields'])
+ return self._pkt_data(scapy_pkt)
+
+ def read_pcap(self,client_v_handler,pcap_base64):
+ pcap_bin = b64_to_bytes(pcap_base64)
+ pcap = []
+ res_packets = []
+ with tempfile.NamedTemporaryFile(mode='w+b') as tmpPcap:
+ tmpPcap.write(pcap_bin)
+ tmpPcap.flush()
+ pcap = rdpcap(tmpPcap.name)
+ for scapy_packet in pcap:
+ res_packets.append(self._pkt_data(scapy_packet))
+ return res_packets
+
+ def write_pcap(self,client_v_handler,packets_base64):
+ packets = [Ether(b64_to_bytes(pkt_b64)) for pkt_b64 in packets_base64]
+ pcap_bin = None
+ with tempfile.NamedTemporaryFile(mode='r+b') as tmpPcap:
+ wrpcap(tmpPcap.name, packets)
+ pcap_bin = tmpPcap.read()
+ return bytes_to_b64(pcap_bin)
+
+
+
+
+#---------------------------------------------------------------------------
+
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
new file mode 100644
index 00000000..18d32272
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_client.py
@@ -0,0 +1,116 @@
+
+import sys
+import os
+python2_zmq_path = os.path.abspath(os.path.join(os.pardir,os.pardir,os.pardir,os.pardir,
+ os.pardir,'external_libs','pyzmq-14.5.0','python2','fedora18','64bit'))
+sys.path.append(python2_zmq_path)
+
+import zmq
+import json
+from argparse import *
+from pprint import pprint
+
+class Scapy_server_wrapper():
+ def __init__(self,dest_scapy_port=5555,server_ip_address='localhost'):
+ self.server_ip_address = server_ip_address
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REQ)
+ self.dest_scapy_port =dest_scapy_port
+ self.socket.connect("tcp://"+str(self.server_ip_address)+":"+str(self.dest_scapy_port))
+
+ def call_method(self,method_name,method_params):
+ json_rpc_req = { "jsonrpc":"2.0","method": method_name ,"params": method_params, "id":"1"}
+ request = json.dumps(json_rpc_req)
+ self.socket.send_string(request)
+ # Get the reply.
+ message = self.socket.recv_string()
+ message_parsed = json.loads(message)
+ if 'result' in message_parsed.keys():
+ result = message_parsed['result']
+ else:
+ result = {'error':message_parsed['error']}
+ return result
+
+ def get_all(self):
+ return self.call_method('get_all',[])
+
+ def check_update(self,db_md5,field_md5):
+ result = self.call_method('check_update',[db_md5,field_md5])
+ if result!=True:
+ if 'error' in result.keys():
+ if "Fields DB is not up to date" in result['error']['message:']:
+ raise Exception("Fields DB is not up to date")
+ if "Protocol DB is not up to date" in result['error']['message:']:
+ raise Exception("Protocol DB is not up to date")
+ return result
+
+ def build_pkt(self,pkt_descriptor):
+ return self.call_method('build_pkt',[pkt_descriptor])
+
+ def _get_all_pkt_offsets(self,pkt_desc):
+ return self.call_method('_get_all_pkt_offsets',[pkt_desc])
+
+ def _activate_console(self):
+ context = zmq.Context()
+ # Socket to talk to server
+ print 'Connecting:'
+ socket = context.socket(zmq.REQ)
+ socket.connect("tcp://"+str(self.server_ip_address)+":"+str(self.dest_scapy_port))
+ try:
+ print('This is a simple console to communicate with Scapy server.\nInvoke supported_methods (with 1 parameter = all) to see supported commands\n')
+ while True:
+ command = raw_input("enter RPC command [enter quit to exit]:\n")
+ if (command == 'quit'):
+ break
+ parameter_num = 0
+ params = []
+ while True:
+ try:
+ parameter_num = int(raw_input('Enter number of parameters to command:\n'))
+ break
+ except Exception:
+ print('Invalid input. Try again')
+ for i in range(1,parameter_num+1,1):
+ print "input parameter %d:" % i
+ user_parameter = raw_input()
+ params.append(user_parameter)
+ pprint_output = raw_input('pprint the output [y/n]? ')
+ while ((pprint_output!= 'y') and (pprint_output!='n')):
+ pprint_output = raw_input('pprint the output [y/n]? ')
+ json_rpc_req = { "jsonrpc":"2.0","method": command ,"params":params, "id":"1"}
+ request = json.dumps(json_rpc_req)
+ print("Sending request in json format %s " % request)
+ socket.send(request)
+
+ # Get the reply.
+ message = socket.recv()
+ print ('received reply:')
+ parsed_message = json.loads(message)
+ if (pprint_output == 'y'):
+ pprint(parsed_message)
+ else:
+ print message
+ except KeyboardInterrupt:
+ print('Terminated By Ctrl+C')
+ finally:
+ socket.close()
+ context.destroy()
+
+
+
+if __name__=='__main__':
+ parser = ArgumentParser(description='Example of client module for Scapy server ')
+ parser.add_argument('-p','--dest-scapy-port',type=int, default = 4507, dest='dest_scapy_port',
+ help='Select port to which this Scapy Server client will send to.\n default is 4507\n',action='store')
+ parser.add_argument('-s','--server',type=str, default = 'localhost', dest='dest_scapy_ip',
+ help='Remote server IP address .\n default is localhost\n',action='store')
+ parser.add_argument('-c','--console',
+ help='Run simple client console for Scapy server.\nrun with \'-s\' and \'-p\' to determine IP and port of the server\n',
+ action='store_true',default = False)
+ args = parser.parse_args()
+ if (args.console):
+ s = Scapy_server_wrapper(args.dest_scapy_port,args.dest_scapy_ip)
+ sys.exit(s._activate_console())
+ else:
+ print('Scapy client: for interactive console re-run with \'-c\', else import as seperate module.')
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
new file mode 100755
index 00000000..6489b36a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
@@ -0,0 +1,188 @@
+
+import time
+import sys
+import os
+import traceback
+
+stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir))
+if stl_pathname not in sys.path:
+ sys.path.append(stl_pathname)
+from trex_stl_lib.api import *
+import zmq
+import inspect
+from scapy_service import *
+from argparse import *
+import socket
+import logging
+import logging.handlers
+
+
+class ParseException(Exception): pass
+class InvalidRequest(Exception): pass
+class MethodNotFound(Exception): pass
+class InvalidParams(Exception): pass
+
+class Scapy_wrapper:
+ def __init__(self):
+ self.scapy_master = Scapy_service()
+
+ def parse_req_msg(self,JSON_req):
+ try:
+ req = json.loads(JSON_req)
+ req_id='null'
+ if (type(req)!= type({})):
+ raise ParseException(req_id)
+ json_rpc_keys = ['jsonrpc','id','method']
+ if ((set(req.keys())!=set(json_rpc_keys)) and (set(req.keys())!=set(json_rpc_keys+['params']))) :
+ if 'id' in req.keys():
+ req_id = req['id']
+ raise InvalidRequest(req_id)
+ req_id = req['id']
+ if (req['method']=='shut_down'):
+ return 'shut_down',[],req_id
+ if not (self.scapy_master.supported_methods(req['method'])):
+ raise MethodNotFound(req_id)
+ scapy_method = eval("self.scapy_master."+req['method'])
+ arg_num_for_method = len(inspect.getargspec(scapy_method)[0])
+ if (arg_num_for_method>1) :
+ if not ('params' in req.keys()):
+ raise InvalidRequest(req_id)
+ params_len = len(req['params'])+1 # +1 because "self" is considered parameter in args for method
+ if not (params_len==arg_num_for_method):
+ raise InvalidParams(req_id)
+ return req['method'],req['params'],req_id
+ else:
+ return req['method'],[],req_id
+ except ValueError:
+ raise ParseException(req_id)
+
+ def create_error_response(self,error_code,error_msg,req_id):
+ return {"jsonrpc": "2.0", "error": {"code": error_code, "message": error_msg}, "id": req_id}
+
+ def create_success_response(self,result,req_id):
+ return {"jsonrpc": "2.0", "result": result, "id": req_id }
+
+ def get_exception(self):
+ return sys.exc_info()
+
+
+ def execute(self,method,params):
+ if len(params)>0:
+ result = eval('self.scapy_master.'+method+'(*'+str(params)+')')
+ else:
+ result = eval('self.scapy_master.'+method+'()')
+ return result
+
+
+ def error_handler(self,e,req_id):
+ response = []
+ try:
+ raise e
+ except ParseException as e:
+ response = self.create_error_response(-32700,'Parse error ',req_id)
+ except InvalidRequest as e:
+ response = self.create_error_response(-32600,'Invalid Request',req_id)
+ except MethodNotFound as e:
+ response = self.create_error_response(-32601,'Method not found',req_id)
+ except InvalidParams as e:
+ response = self.create_error_response(-32603,'Invalid params',req_id)
+ except SyntaxError as e:
+ response = self.create_error_response(-32097,'SyntaxError',req_id)
+ except Exception as e:
+ if hasattr(e,'message'):
+ response = self.create_error_response(-32098,'Scapy Server: '+str(e.message),req_id)
+ else:
+ response = self.create_error_response(-32096,'Scapy Server: Unknown Error',req_id)
+ finally:
+ return response
+
+class Scapy_server():
+ def __init__(self, args,port=4507):
+ self.scapy_wrapper = Scapy_wrapper()
+ self.port = port
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.REP)
+ self.socket.bind("tcp://*:"+str(port))
+ self.IP_address = socket.gethostbyname(socket.gethostname())
+ self.logger = logging.getLogger('scapy_logger')
+ self.logger.setLevel(logging.INFO)
+ console_h = logging.StreamHandler(sys.__stdout__)
+ formatter = logging.Formatter(fmt='%(asctime)s %(message)s',datefmt='%d-%m-%Y %H:%M:%S')
+ if args.log:
+ logfile_h = logging.FileHandler('scapy_server.log')
+ logfile_h.setLevel(logging.INFO)
+ logfile_h.setFormatter(formatter)
+ self.logger.addHandler(logfile_h)
+ if args.verbose:
+ console_h.setLevel(logging.INFO)
+ else:
+ console_h.setLevel(logging.WARNING)
+ console_h.setFormatter(formatter)
+ self.logger.addHandler(console_h)
+
+
+ def activate(self):
+ self.logger.info('***Scapy Server Started***')
+ self.logger.info('Listening on port: %d' % self.port)
+ self.logger.info('Server IP address: %s' % self.IP_address)
+ try:
+ while True:
+ message = self.socket.recv_string()
+ self.logger.info('Received Message: %s' % message)
+ try:
+ params = []
+ method=''
+ req_id = 'null'
+ method,params,req_id = self.scapy_wrapper.parse_req_msg(message)
+ if (method == 'shut_down'):
+ self.logger.info('Shut down by remote user')
+ result = 'Server shut down command received - server had shut down'
+ else:
+ result = self.scapy_wrapper.execute(method,params)
+ response = self.scapy_wrapper.create_success_response(result,req_id)
+ except Exception as e:
+ response = self.scapy_wrapper.error_handler(e,req_id)
+ self.logger.info('ERROR %s: %s',response['error']['code'], response['error']['message'])
+ self.logger.info('Exception info: %s' % traceback.format_exc())
+ finally:
+ try:
+ json_response = json.dumps(response)
+ self.logger.info('Sending Message: %s' % json_response)
+ except Exception as e:
+ # rare case when json can not be searialized due to encoding issues
+ # object is not JSON serializable
+ self.logger.error('Unexpected Error: %s' % traceback.format_exc())
+ json_response = json.dumps(self.scapy_wrapper.error_handler(e,req_id))
+
+ # Send reply back to client
+ self.socket.send_string(json_response)
+ if (method == 'shut_down'):
+ break
+
+ except KeyboardInterrupt:
+ self.logger.info(b'Terminated By local user')
+
+ finally:
+ self.socket.close()
+ self.context.destroy()
+
+
+
+#arg1 is port number for the server to listen to
+def main(args,port):
+ s = Scapy_server(args,port)
+ s.activate()
+
+if __name__=='__main__':
+
+ parser = ArgumentParser(description=' Runs Scapy Server ')
+ parser.add_argument('-s','--scapy-port',type=int, default = 4507, dest='scapy_port',
+ help='Select port to which Scapy Server will listen to.\n default is 4507.',action='store')
+ parser.add_argument('-v','--verbose',help='Print Client-Server Request-Reply information to console.',action='store_true',default = False)
+ parser.add_argument('-l','--log',help='Log every activity of the server to the log file scapy_server.log .The log does not discard older entries, the file is not limited by size.',
+ action='store_true',default = False)
+ args = parser.parse_args()
+ port = args.scapy_port
+ sys.exit(main(args,port))
+
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
new file mode 100644
index 00000000..17dd304a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
@@ -0,0 +1,84 @@
+import os
+import sys
+import json
+import base64
+import inspect
+from inspect import getcallargs
+# add paths to scapy_service and trex_stl_lib.api
+sys.path.append(os.path.abspath(os.pardir))
+sys.path.append(os.path.abspath(os.path.join(os.pardir, os.pardir, os.pardir)))
+
+from scapy_service import *
+from scapy.all import *
+
+service = Scapy_service()
+v_handler = service.get_version_handler('1','01')
+
+def pretty_json(obj):
+ return json.dumps(obj, indent=4)
+
+def pprint(obj):
+ print(pretty_json(obj))
+
+def is_verbose():
+ return True
+
+def pass_result(result, *args):
+ # returns result unchanged, but can display debug info if enabled
+ if is_verbose():
+ fargs = (inspect.stack()[-1][4])
+ print(fargs[0])
+ pprint(result)
+ return result
+
+def pass_pkt(result):
+ # returns packet unchanged, but can display debug info if enabled
+ if is_verbose() and result is not None:
+ result.show2()
+ return result
+
+# utility functions for tests
+
+def layer_def(layerId, **layerfields):
+ # test helper method to generate JSON-like protocol definition object for scapy
+ # ex. { "id": "Ether", "fields": [ { "id": "dst", "value": "10:10:10:10:10:10" } ] }
+ res = { "id": layerId }
+ if layerfields:
+ res["fields"] = [ {"id": k, "value": v} for k,v in layerfields.items() ]
+ return res
+
+def get_version_handler():
+ return pass_result(service.get_version_handler("1", "01"))
+
+def build_pkt(model_def):
+ return pass_result(service.build_pkt(v_handler, model_def))
+
+def build_pkt_get_scapy(model_def):
+ return build_pkt_to_scapy(build_pkt(model_def))
+
+def reconstruct_pkt(bytes_b64, model_def):
+ return pass_result(service.reconstruct_pkt(v_handler, bytes_b64, model_def))
+
+def get_definitions(def_filter):
+ return pass_result(service.get_definitions(v_handler, def_filter))
+
+def get_payload_classes(def_filter):
+ return pass_result(service.get_payload_classes(v_handler, def_filter))
+
+def build_pkt_to_scapy(buildpkt_result):
+ return pass_pkt(Ether(b64_to_bytes(buildpkt_result['binary'])))
+
+def fields_to_map(field_array):
+ # [{id, value, hvalue, offset}, ...] to map id -> {value, hvalue, offset}
+ res = {}
+ if field_array:
+ for f in field_array:
+ res[ f["id"] ] = f
+ return res
+
+def adapt_json_protocol_fields(protocols_array):
+ # replaces layer.fields(array) with map for easier access in tests
+ for protocol in protocols_array:
+ # change structure for easier
+ if protocol.get("fields"):
+ protocol["fields"] = fields_to_map(protocol["fields"])
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
new file mode 100644
index 00000000..9cd473d7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
@@ -0,0 +1,155 @@
+#
+# run with 'nosetests' utility
+
+import tempfile
+import re
+from basetest import *
+
+RE_MAC = "^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$"
+
+TEST_MAC_1 = "10:10:10:10:10:10"
+# Test scapy structure
+TEST_PKT = Ether(dst=TEST_MAC_1)/IP(src='127.0.0.1')/TCP(sport=443)
+
+# Corresponding JSON-like structure
+TEST_PKT_DEF = [
+ layer_def("Ether", dst=TEST_MAC_1),
+ layer_def("IP", dst="127.0.0.1"),
+ layer_def("TCP", sport="443")
+ ]
+
+def test_build_pkt():
+ pkt = build_pkt_get_scapy(TEST_PKT_DEF)
+ assert(pkt[TCP].sport == 443)
+
+def test_build_invalid_structure_pkt():
+ ether_fields = {"dst": TEST_MAC_1, "type": "LOOP"}
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether", **ether_fields),
+ layer_def("IP"),
+ layer_def("TCP", sport=8080)
+ ])
+ assert(pkt[Ether].dst == TEST_MAC_1)
+ assert(isinstance(pkt[Ether].payload, Raw))
+
+def test_reconstruct_pkt():
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), None)
+ pkt = build_pkt_to_scapy(res)
+ assert(pkt[TCP].sport == 443)
+
+def test_layer_del():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP", "delete": True},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert(not pkt[IP].payload)
+
+def test_layer_field_edit():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP", "fields": [{"id": "dport", "value": 777}]},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert(pkt[TCP].dport == 777)
+ assert(pkt[TCP].sport == 443)
+
+def test_layer_add():
+ modif = [
+ {"id": "Ether"},
+ {"id": "IP"},
+ {"id": "TCP"},
+ {"id": "Raw", "fields": [{"id": "load", "value": "GET /helloworld HTTP/1.0\n\n"}]},
+ ]
+ res = reconstruct_pkt(base64.b64encode(bytes(TEST_PKT)), modif)
+ pkt = build_pkt_to_scapy(res)
+ assert("GET /helloworld" in str(pkt[TCP].payload.load))
+
+def test_build_Raw():
+ pkt = build_pkt_get_scapy([
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("TCP"),
+ layer_def("Raw", load={"vtype": "BYTES", "base64": bytes_to_b64(b"hi")})
+ ])
+ assert(str(pkt[Raw].load == "hi"))
+
+def test_get_all():
+ service.get_all(v_handler)
+
+def test_get_definitions_all():
+ get_definitions(None)
+ def_classnames = [pdef['id'] for pdef in get_definitions(None)['protocols']]
+ assert("IP" in def_classnames)
+ assert("Dot1Q" in def_classnames)
+ assert("TCP" in def_classnames)
+
+def test_get_definitions_ether():
+ res = get_definitions(["Ether"])
+ assert(len(res) == 1)
+ assert(res['protocols'][0]['id'] == "Ether")
+
+def test_get_payload_classes():
+ eth_payloads = get_payload_classes([{"id":"Ether"}])
+ assert("IP" in eth_payloads)
+ assert("Dot1Q" in eth_payloads)
+ assert("TCP" not in eth_payloads)
+
+def test_pcap_read_and_write():
+ pkts_to_write = [bytes_to_b64(bytes(TEST_PKT))]
+ pcap_b64 = service.write_pcap(v_handler, pkts_to_write)
+ array_pkt = service.read_pcap(v_handler, pcap_b64)
+ pkt = build_pkt_to_scapy(array_pkt[0])
+ assert(pkt[Ether].dst == TEST_MAC_1)
+
+def test_layer_default_value():
+ res = build_pkt([
+ layer_def("Ether", src={"vtype": "UNDEFINED"})
+ ])
+ ether_fields = fields_to_map(res['data'][0]['fields'])
+ assert(re.match(RE_MAC, ether_fields['src']['value']))
+
+def test_layer_random_value():
+ res = build_pkt([
+ layer_def("Ether", src={"vtype": "RANDOM"})
+ ])
+ ether_fields = fields_to_map(res['data'][0]['fields'])
+ assert(re.match(RE_MAC, ether_fields['src']['value']))
+
+def test_layer_wrong_structure():
+ payload = [
+ layer_def("Ether"),
+ layer_def("IP"),
+ layer_def("Raw", load="dummy"),
+ layer_def("Ether"),
+ layer_def("IP"),
+ ]
+ res = build_pkt(payload)
+ pkt = build_pkt_to_scapy(res)
+ assert(type(pkt[0]) is Ether)
+ assert(type(pkt[1]) is IP)
+ assert(isinstance(pkt[2], Raw))
+ assert(not pkt[2].payload)
+ model = res["data"]
+ assert(len(payload) == len(model))
+ # verify same protocol structure as in abstract model
+ # and all fields defined
+ for depth in range(len(payload)):
+ layer_model = model[depth]
+ layer_fields = fields_to_map(layer_model["fields"])
+ assert(payload[depth]["id"] == model[depth]["id"])
+ for field in layer_model["fields"]:
+ required_field_properties = ["value", "hvalue", "offset"]
+ for field_property in required_field_properties:
+ assert(field[field_property] is not None)
+ if (model[depth]["id"] == "Ether"):
+ assert(layer_fields["type"]["hvalue"] == "IPv4")
+ real_structure = [layer["real_id"] for layer in model]
+ valid_structure_flags = [layer["valid_structure"] for layer in model]
+ assert(real_structure == ["Ether", "IP", "Raw", None, None])
+ assert(valid_structure_flags == [True, True, True, False, False])
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
new file mode 100755
index 00000000..8f7f7b01
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/zmq_for_scapy_server_test.py
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
new file mode 100644
index 00000000..c6e14df3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/__init__.py
@@ -0,0 +1,7 @@
+import sys
+
+if sys.version_info < (2, 7):
+ print("\n**** TRex STL package requires Python version >= 2.7 ***\n")
+ exit(-1)
+
+from . import trex_stl_ext
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
new file mode 100644
index 00000000..bd95a20a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/api.py
@@ -0,0 +1,18 @@
+
+# client and exceptions
+from .trex_stl_exceptions import *
+from .trex_stl_client import STLClient, LoggerApi
+
+# streams
+from .trex_stl_streams import *
+
+# packet builder
+from .trex_stl_packet_builder_scapy import *
+from scapy.all import *
+
+
+# simulator
+from .trex_stl_sim import STLSim
+
+# std lib (various lib functions)
+from .trex_stl_std import *
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
new file mode 100644
index 00000000..2c95844b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py
@@ -0,0 +1,440 @@
+#!/router/bin/python
+
+import json
+import threading
+import time
+import datetime
+import zmq
+import re
+import random
+
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+
+from .utils.text_opts import *
+from .trex_stl_stats import *
+from .trex_stl_types import *
+from .utils.zipmsg import ZippedMsg
+
+# basic async stats class
+class CTRexAsyncStats(object):
+ def __init__ (self):
+ self.ref_point = None
+ self.current = {}
+ self.last_update_ts = datetime.datetime.now()
+
+ def update (self, snapshot):
+
+ #update
+ self.last_update_ts = datetime.datetime.now()
+
+ self.current = snapshot
+
+ if self.ref_point == None:
+ self.ref_point = self.current
+
+ def clear(self):
+ self.ref_point = self.current
+
+
+ def get(self, field, format=False, suffix=""):
+
+ if not field in self.current:
+ return "N/A"
+
+ if not format:
+ return self.current[field]
+ else:
+ return format_num(self.current[field], suffix)
+
+ def get_rel (self, field, format=False, suffix=""):
+ if not field in self.current:
+ return "N/A"
+
+ if not format:
+ return (self.current[field] - self.ref_point[field])
+ else:
+ return format_num(self.current[field] - self.ref_point[field], suffix)
+
+
+ # return true if new data has arrived in the past 2 seconds
+ def is_online (self):
+ delta_ms = (datetime.datetime.now() - self.last_update_ts).total_seconds() * 1000
+ return (delta_ms < 2000)
+
+# describes the general stats provided by TRex
+class CTRexAsyncStatsGeneral(CTRexAsyncStats):
+ def __init__ (self):
+ super(CTRexAsyncStatsGeneral, self).__init__()
+
+
+# per port stats
+class CTRexAsyncStatsPort(CTRexAsyncStats):
+ def __init__ (self):
+ super(CTRexAsyncStatsPort, self).__init__()
+
+ def get_stream_stats (self, stream_id):
+ return None
+
+# stats manager
+class CTRexAsyncStatsManager():
+ def __init__ (self):
+
+ self.general_stats = CTRexAsyncStatsGeneral()
+ self.port_stats = {}
+
+
+ def get_general_stats(self):
+ return self.general_stats
+
+ def get_port_stats (self, port_id):
+
+ if not str(port_id) in self.port_stats:
+ return None
+
+ return self.port_stats[str(port_id)]
+
+
+ def update(self, data):
+ self.__handle_snapshot(data)
+
+ def __handle_snapshot(self, snapshot):
+
+ general_stats = {}
+ port_stats = {}
+
+ # filter the values per port and general
+ for key, value in snapshot.items():
+
+ # match a pattern of ports
+ m = re.search('(.*)\-([0-8])', key)
+ if m:
+
+ port_id = m.group(2)
+ field_name = m.group(1)
+
+ if not port_id in port_stats:
+ port_stats[port_id] = {}
+
+ port_stats[port_id][field_name] = value
+
+ else:
+ # no port match - general stats
+ general_stats[key] = value
+
+ # update the general object with the snapshot
+ self.general_stats.update(general_stats)
+
+ # update all ports
+ for port_id, data in port_stats.items():
+
+ if not port_id in self.port_stats:
+ self.port_stats[port_id] = CTRexAsyncStatsPort()
+
+ self.port_stats[port_id].update(data)
+
+
+
+
+
+class CTRexAsyncClient():
+ def __init__ (self, server, port, stateless_client):
+
+ self.port = port
+ self.server = server
+
+ self.stateless_client = stateless_client
+
+ self.event_handler = stateless_client.event_handler
+ self.logger = self.stateless_client.logger
+
+ self.raw_snapshot = {}
+
+ self.stats = CTRexAsyncStatsManager()
+
+ self.last_data_recv_ts = 0
+ self.async_barrier = None
+
+ self.monitor = AsyncUtil()
+
+ self.connected = False
+
+ self.zipped = ZippedMsg()
+
+ # connects the async channel
+ def connect (self):
+
+ if self.connected:
+ self.disconnect()
+
+ self.tr = "tcp://{0}:{1}".format(self.server, self.port)
+
+ # Socket to talk to server
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.SUB)
+
+
+ # before running the thread - mark as active
+ self.active = True
+ self.t = threading.Thread(target = self._run)
+
+ # kill this thread on exit and don't add it to the join list
+ self.t.setDaemon(True)
+ self.t.start()
+
+ self.connected = True
+
+ # first barrier - make sure async thread is up
+ rc = self.barrier()
+ if not rc:
+ self.disconnect()
+ return rc
+
+ # second barrier - sync all stats data as a baseline from the server
+ rc = self.barrier(baseline = True)
+ if not rc:
+ self.disconnect()
+ return rc
+
+ return RC_OK()
+
+
+
+
+ # disconnect
+ def disconnect (self):
+ if not self.connected:
+ return
+
+ # mark for join
+ self.active = False
+
+ # signal that the context was destroyed (exit the thread loop)
+ self.context.term()
+
+ # join
+ self.t.join()
+
+ # done
+ self.connected = False
+
+
+ # thread function
+ def _run (self):
+
+ # socket must be created on the same thread
+ self.socket.setsockopt(zmq.SUBSCRIBE, b'')
+ self.socket.setsockopt(zmq.RCVTIMEO, 5000)
+ self.socket.connect(self.tr)
+
+ got_data = False
+
+ self.monitor.reset()
+
+
+ while self.active:
+ try:
+
+ with self.monitor:
+ line = self.socket.recv()
+
+ self.monitor.on_recv_msg(line)
+
+ # try to decomrpess
+ unzipped = self.zipped.decompress(line)
+ if unzipped:
+ line = unzipped
+
+ line = line.decode()
+
+ self.last_data_recv_ts = time.time()
+
+ # signal once
+ if not got_data:
+ self.event_handler.on_async_alive()
+ got_data = True
+
+
+ # got a timeout - mark as not alive and retry
+ except zmq.Again:
+ # signal once
+ if got_data:
+ self.event_handler.on_async_dead()
+ got_data = False
+
+ continue
+
+ except zmq.ContextTerminated:
+ # outside thread signaled us to exit
+ assert(not self.active)
+ break
+
+ msg = json.loads(line)
+
+ name = msg['name']
+ data = msg['data']
+ type = msg['type']
+ baseline = msg.get('baseline', False)
+
+ self.raw_snapshot[name] = data
+
+ self.__dispatch(name, type, data, baseline)
+
+
+ # closing of socket must be from the same thread
+ self.socket.close(linger = 0)
+
+ def is_thread_alive (self):
+ return self.t.is_alive()
+
+ # did we get info for the last 3 seconds ?
+ def is_alive (self):
+ if self.last_data_recv_ts == None:
+ return False
+
+ return ( (time.time() - self.last_data_recv_ts) < 3 )
+
+ def get_stats (self):
+ return self.stats
+
+ def get_raw_snapshot (self):
+ return self.raw_snapshot
+
+ # dispatch the message to the right place
+ def __dispatch (self, name, type, data, baseline):
+
+ # stats
+ if name == "trex-global":
+ self.event_handler.on_async_stats_update(data, baseline)
+
+ # events
+ elif name == "trex-event":
+ self.event_handler.on_async_event(type, data)
+
+ # barriers
+ elif name == "trex-barrier":
+ self.handle_async_barrier(type, data)
+
+ elif name == "flow_stats":
+ self.event_handler.on_async_rx_stats_event(data, baseline)
+
+ elif name == "latency_stats":
+ self.event_handler.on_async_latency_stats_event(data, baseline)
+
+ else:
+ pass
+
+
+ # async barrier handling routine
+ def handle_async_barrier (self, type, data):
+ if self.async_barrier['key'] == type:
+ self.async_barrier['ack'] = True
+
+
+ # block on barrier for async channel
+ def barrier(self, timeout = 5, baseline = False):
+
+ # set a random key
+ key = random.getrandbits(32)
+ self.async_barrier = {'key': key, 'ack': False}
+
+ # expr time
+ expr = time.time() + timeout
+
+ while not self.async_barrier['ack']:
+
+ # inject
+ rc = self.stateless_client._transmit("publish_now", params = {'key' : key, 'baseline': baseline})
+ if not rc:
+ return rc
+
+ # fast loop
+ for i in range(0, 100):
+ if self.async_barrier['ack']:
+ break
+ time.sleep(0.001)
+
+ if time.time() > expr:
+ return RC_ERR("*** [subscriber] - timeout - no data flow from server at : " + self.tr)
+
+ return RC_OK()
+
+
+# a class to measure util. of async subscriber thread
+class AsyncUtil(object):
+
+ STATE_SLEEP = 1
+ STATE_AWAKE = 2
+
+ def __init__ (self):
+ self.reset()
+
+
+ def reset (self):
+ self.state = self.STATE_AWAKE
+ self.clock = time.time()
+
+ # reset the current interval
+ self.interval = {'ts': time.time(), 'total_sleep': 0, 'total_bits': 0}
+
+ # global counters
+ self.cpu_util = 0
+ self.bps = 0
+
+
+ def on_recv_msg (self, message):
+ self.interval['total_bits'] += len(message) * 8.0
+
+ self._tick()
+
+
+ def __enter__ (self):
+ assert(self.state == self.STATE_AWAKE)
+ self.state = self.STATE_SLEEP
+
+ self.sleep_start_ts = time.time()
+
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ assert(self.state == self.STATE_SLEEP)
+ self.state = self.STATE_AWAKE
+
+ # measure total sleep time for interval
+ self.interval['total_sleep'] += time.time() - self.sleep_start_ts
+
+ self._tick()
+
+ def _tick (self):
+ # how much time did the current interval lasted
+ ts = time.time() - self.interval['ts']
+ if ts < 1:
+ return
+
+ # if tick is in the middle of sleep - add the interval and reset
+ if self.state == self.STATE_SLEEP:
+ self.interval['total_sleep'] += time.time() - self.sleep_start_ts
+ self.sleep_start_ts = time.time()
+
+ # add the interval
+ if self.interval['total_sleep'] > 0:
+ # calculate
+ self.cpu_util = self.cpu_util * 0.75 + (float(ts - self.interval['total_sleep']) / ts) * 0.25
+ self.interval['total_sleep'] = 0
+
+
+ if self.interval['total_bits'] > 0:
+ # calculate
+ self.bps = self.bps * 0.75 + ( self.interval['total_bits'] / ts ) * 0.25
+ self.interval['total_bits'] = 0
+
+ # reset the interval's clock
+ self.interval['ts'] = time.time()
+
+
+ def get_cpu_util (self):
+ self._tick()
+ return (self.cpu_util * 100)
+
+ def get_bps (self):
+ self._tick()
+ return (self.bps)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
new file mode 100755
index 00000000..80a4c4dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -0,0 +1,3370 @@
+#!/router/bin/python
+
+# for API usage the path name must be full
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
+
+from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage
+from . import trex_stl_stats
+
+from .trex_stl_port import Port
+from .trex_stl_types import *
+from .trex_stl_async_client import CTRexAsyncClient
+
+from .utils import parsing_opts, text_tables, common
+from .utils.common import list_intersect, list_difference, is_sub_list, PassiveTimer
+from .utils.text_opts import *
+from functools import wraps
+
+from collections import namedtuple
+from yaml import YAMLError
+import time
+import datetime
+import re
+import random
+import json
+import traceback
+
+############################ logger #############################
+############################ #############################
+############################ #############################
+
+# logger API for the client
+class LoggerApi(object):
+ # verbose levels
+ VERBOSE_QUIET = 0
+ VERBOSE_REGULAR = 1
+ VERBOSE_HIGH = 2
+
+ def __init__(self):
+ self.level = LoggerApi.VERBOSE_REGULAR
+
+ # implemented by specific logger
+ def write(self, msg, newline = True):
+ raise Exception("Implement this")
+
+ # implemented by specific logger
+ def flush(self):
+ raise Exception("Implement this")
+
+ def set_verbose (self, level):
+ if not level in range(self.VERBOSE_QUIET, self.VERBOSE_HIGH + 1):
+ raise ValueError("Bad value provided for logger")
+
+ self.level = level
+
+ def get_verbose (self):
+ return self.level
+
+
+ def check_verbose (self, level):
+ return (self.level >= level)
+
+
+ # simple log message with verbose
+ def log (self, msg, level = VERBOSE_REGULAR, newline = True):
+ if not self.check_verbose(level):
+ return
+
+ self.write(msg, newline)
+
+ # logging that comes from async event
+ def async_log (self, msg, level = VERBOSE_REGULAR, newline = True):
+ self.log(msg, level, newline)
+
+
+ def pre_cmd (self, desc):
+ self.log(format_text('\n{:<60}'.format(desc), 'bold'), newline = False)
+ self.flush()
+
+ def post_cmd (self, rc):
+ if rc:
+ self.log(format_text("[SUCCESS]\n", 'green', 'bold'))
+ else:
+ self.log(format_text("[FAILED]\n", 'red', 'bold'))
+
+
+ def log_cmd (self, desc):
+ self.pre_cmd(desc)
+ self.post_cmd(True)
+
+
+ # supress object getter
+ def supress (self):
+ class Supress(object):
+ def __init__ (self, logger):
+ self.logger = logger
+
+ def __enter__ (self):
+ self.saved_level = self.logger.get_verbose()
+ self.logger.set_verbose(LoggerApi.VERBOSE_QUIET)
+
+ def __exit__ (self, type, value, traceback):
+ self.logger.set_verbose(self.saved_level)
+
+ return Supress(self)
+
+
+
+# default logger - to stdout
+class DefaultLogger(LoggerApi):
+
+ def __init__ (self):
+ super(DefaultLogger, self).__init__()
+
+ def write (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print (msg),
+
+ def flush (self):
+ sys.stdout.flush()
+
+
+############################ async event hander #############################
+############################ #############################
+############################ #############################
+
+# an event
+class Event(object):
+
+ def __init__ (self, origin, ev_type, msg):
+ self.origin = origin
+ self.ev_type = ev_type
+ self.msg = msg
+
+ self.ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
+
+ def __str__ (self):
+
+ prefix = "[{:^}][{:^}]".format(self.origin, self.ev_type)
+
+ return "{:<10} - {:18} - {:}".format(self.ts, prefix, format_text(self.msg, 'bold'))
+
+
+# handles different async events given to the client
+class EventsHandler(object):
+
+
+ def __init__ (self, client):
+ self.client = client
+ self.logger = self.client.logger
+
+ self.events = []
+
+ # public functions
+
+ def get_events (self, ev_type_filter = None):
+ if ev_type_filter:
+ return [ev for ev in self.events if ev.ev_type in listify(ev_type_filter)]
+ else:
+ return [ev for ev in self.events]
+
+
+ def clear_events (self):
+ self.events = []
+
+
+ def log_warning (self, msg, show = True):
+ self.__add_event_log('local', 'warning', msg, show)
+
+
+ # events called internally
+
+ def on_async_dead (self):
+ if self.client.connected:
+ msg = 'Lost connection to server'
+ self.__add_event_log('local', 'info', msg, True)
+ self.client.connected = False
+
+
+ def on_async_alive (self):
+ pass
+
+
+
+ def on_async_rx_stats_event (self, data, baseline):
+ self.client.flow_stats.update(data, baseline)
+
+ def on_async_latency_stats_event (self, data, baseline):
+ self.client.latency_stats.update(data, baseline)
+
+ # handles an async stats update from the subscriber
+ def on_async_stats_update(self, dump_data, baseline):
+ global_stats = {}
+ port_stats = {}
+
+ # filter the values per port and general
+ for key, value in dump_data.items():
+ # match a pattern of ports
+ m = re.search('(.*)\-(\d+)', key)
+ if m:
+ port_id = int(m.group(2))
+ field_name = m.group(1)
+ if port_id in self.client.ports:
+ if not port_id in port_stats:
+ port_stats[port_id] = {}
+ port_stats[port_id][field_name] = value
+ else:
+ continue
+ else:
+ # no port match - general stats
+ global_stats[key] = value
+
+ # update the general object with the snapshot
+ self.client.global_stats.update(global_stats, baseline)
+
+ # update all ports
+ for port_id, data in port_stats.items():
+ self.client.ports[port_id].port_stats.update(data, baseline)
+
+
+
+ # dispatcher for server async events (port started, port stopped and etc.)
+ def on_async_event (self, event_type, data):
+ # DP stopped
+ show_event = False
+
+ # port started
+ if (event_type == 0):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has started".format(port_id)
+ self.__async_event_port_started(port_id)
+
+ # port stopped
+ elif (event_type == 1):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has stopped".format(port_id)
+
+ # call the handler
+ self.__async_event_port_stopped(port_id)
+
+
+ # port paused
+ elif (event_type == 2):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has paused".format(port_id)
+
+ # call the handler
+ self.__async_event_port_paused(port_id)
+
+ # port resumed
+ elif (event_type == 3):
+ port_id = int(data['port_id'])
+ ev = "Port {0} has resumed".format(port_id)
+
+ # call the handler
+ self.__async_event_port_resumed(port_id)
+
+ # port finished traffic
+ elif (event_type == 4):
+ port_id = int(data['port_id'])
+ ev = "Port {0} job done".format(port_id)
+
+ # call the handler
+ self.__async_event_port_job_done(port_id)
+ show_event = True
+
+ # port was acquired - maybe stolen...
+ elif (event_type == 5):
+ session_id = data['session_id']
+
+ port_id = int(data['port_id'])
+ who = data['who']
+ force = data['force']
+
+ # if we hold the port and it was not taken by this session - show it
+ if port_id in self.client.get_acquired_ports() and session_id != self.client.session_id:
+ show_event = True
+
+ # format the thief/us...
+ if session_id == self.client.session_id:
+ user = 'you'
+ elif who == self.client.username:
+ user = 'another session of you'
+ else:
+ user = "'{0}'".format(who)
+
+ if force:
+ ev = "Port {0} was forcely taken by {1}".format(port_id, user)
+ else:
+ ev = "Port {0} was taken by {1}".format(port_id, user)
+
+ # call the handler in case its not this session
+ if session_id != self.client.session_id:
+ self.__async_event_port_acquired(port_id, who)
+
+
+ # port was released
+ elif (event_type == 6):
+ port_id = int(data['port_id'])
+ who = data['who']
+ session_id = data['session_id']
+
+ if session_id == self.client.session_id:
+ user = 'you'
+ elif who == self.client.username:
+ user = 'another session of you'
+ else:
+ user = "'{0}'".format(who)
+
+ ev = "Port {0} was released by {1}".format(port_id, user)
+
+ # call the handler in case its not this session
+ if session_id != self.client.session_id:
+ self.__async_event_port_released(port_id)
+
+ elif (event_type == 7):
+ port_id = int(data['port_id'])
+ ev = "port {0} job failed".format(port_id)
+ show_event = True
+
+ # port attr changed
+ elif (event_type == 8):
+ port_id = int(data['port_id'])
+ if data['attr'] == self.client.ports[port_id].attr:
+ return # false alarm
+ old_info = self.client.ports[port_id].get_info()
+ self.__async_event_port_attr_changed(port_id, data['attr'])
+ new_info = self.client.ports[port_id].get_info()
+ ev = "port {0} attributes changed".format(port_id)
+ for key, old_val in old_info.items():
+ new_val = new_info[key]
+ if old_val != new_val:
+ ev += '\n {key}: {old} -> {new}'.format(
+ key = key,
+ old = old_val.lower() if type(old_val) is str else old_val,
+ new = new_val.lower() if type(new_val) is str else new_val)
+ show_event = True
+
+ # server stopped
+ elif (event_type == 100):
+ ev = "Server has stopped"
+ self.__async_event_server_stopped()
+ show_event = True
+
+
+ else:
+ # unknown event - ignore
+ return
+
+
+ self.__add_event_log('server', 'info', ev, show_event)
+
+
+ # private functions
+
+ # on rare cases events may come on a non existent prot
+ # (server was re-run with different config)
+ def __async_event_port_job_done (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_job_done()
+
+ def __async_event_port_stopped (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_stopped()
+
+
+ def __async_event_port_started (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_started()
+
+ def __async_event_port_paused (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_paused()
+
+
+ def __async_event_port_resumed (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_resumed()
+
+ def __async_event_port_acquired (self, port_id, who):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_acquired(who)
+
+ def __async_event_port_released (self, port_id):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_released()
+
+ def __async_event_server_stopped (self):
+ self.client.connected = False
+
+ def __async_event_port_attr_changed (self, port_id, attr):
+ if port_id in self.client.ports:
+ self.client.ports[port_id].async_event_port_attr_changed(attr)
+
+ # add event to log
+ def __add_event_log (self, origin, ev_type, msg, show = False):
+
+ event = Event(origin, ev_type, msg)
+ self.events.append(event)
+ if show:
+ self.logger.async_log("\n\n{0}".format(str(event)))
+
+
+
+
+
+############################ RPC layer #############################
+############################ #############################
+############################ #############################
+
+class CCommLink(object):
+ """Describes the connectivity of the stateless client method"""
+ def __init__(self, server="localhost", port=5050, virtual=False, client = None):
+ self.virtual = virtual
+ self.server = server
+ self.port = port
+ self.rpc_link = JsonRpcClient(self.server, self.port, client)
+
+ @property
+ def is_connected(self):
+ if not self.virtual:
+ return self.rpc_link.connected
+ else:
+ return True
+
+ def get_server (self):
+ return self.server
+
+ def get_port (self):
+ return self.port
+
+ def connect(self):
+ if not self.virtual:
+ return self.rpc_link.connect()
+
+ def disconnect(self):
+ if not self.virtual:
+ return self.rpc_link.disconnect()
+
+ def transmit(self, method_name, params = None, api_class = 'core'):
+ if self.virtual:
+ self._prompt_virtual_tx_msg()
+ _, msg = self.rpc_link.create_jsonrpc_v2(method_name, params, api_class)
+ print(msg)
+ return
+ else:
+ return self.rpc_link.invoke_rpc_method(method_name, params, api_class)
+
+ def transmit_batch(self, batch_list):
+ if self.virtual:
+ self._prompt_virtual_tx_msg()
+ print([msg
+ for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params, command.api_class)
+ for command in batch_list]])
+ else:
+ batch = self.rpc_link.create_batch()
+ for command in batch_list:
+ batch.add(command.method, command.params, command.api_class)
+ # invoke the batch
+ return batch.invoke()
+
+ def _prompt_virtual_tx_msg(self):
+ print("Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
+ port=self.port))
+
+
+
+############################ client #############################
+############################ #############################
+############################ #############################
+
+class STLClient(object):
+ """TRex Stateless client object - gives operations per TRex/user"""
+
+ # different modes for attaching traffic to ports
+ CORE_MASK_SPLIT = 1
+ CORE_MASK_PIN = 2
+
+ def __init__(self,
+ username = common.get_current_user(),
+ server = "localhost",
+ sync_port = 4501,
+ async_port = 4500,
+ verbose_level = LoggerApi.VERBOSE_QUIET,
+ logger = None,
+ virtual = False):
+ """
+ Configure the connection settings
+
+ :parameters:
+ username : string
+ the user name, for example imarom
+
+ server : string
+ the server name or ip
+
+ sync_port : int
+ the RPC port
+
+ async_port : int
+ the ASYNC port
+
+ .. code-block:: python
+
+ # Example
+
+ # connect to local TRex server
+ c = STLClient()
+
+ # connect to remote server trex-remote-server
+ c = STLClient(server = "trex-remote-server" )
+
+ c = STLClient(server = "10.0.0.10" )
+
+ # verbose mode
+ c = STLClient(server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH )
+
+ # change user name
+ c = STLClient(username = "root",server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH )
+
+ c.connect()
+
+ c.disconnect()
+
+ """
+
+ self.username = username
+
+ # init objects
+ self.ports = {}
+ self.server_version = {}
+ self.system_info = {}
+ self.session_id = random.getrandbits(32)
+ self.connected = False
+
+ # API classes
+ self.api_vers = [ {'type': 'core', 'major': 2, 'minor': 3 } ]
+ self.api_h = {'core': None}
+
+ # logger
+ self.logger = DefaultLogger() if not logger else logger
+
+ # initial verbose
+ self.logger.set_verbose(verbose_level)
+
+ # low level RPC layer
+ self.comm_link = CCommLink(server,
+ sync_port,
+ virtual,
+ self)
+
+ # async event handler manager
+ self.event_handler = EventsHandler(self)
+
+ # async subscriber level
+ self.async_client = CTRexAsyncClient(server,
+ async_port,
+ self)
+
+
+
+
+ # stats
+ self.connection_info = {"username": username,
+ "server": server,
+ "sync_port": sync_port,
+ "async_port": async_port,
+ "virtual": virtual}
+
+
+ self.global_stats = trex_stl_stats.CGlobalStats(self.connection_info,
+ self.server_version,
+ self.ports,
+ self.event_handler)
+
+ self.flow_stats = trex_stl_stats.CRxStats(self.ports)
+
+ self.latency_stats = trex_stl_stats.CLatencyStats(self.ports)
+
+ self.util_stats = trex_stl_stats.CUtilStats(self)
+
+ self.xstats = trex_stl_stats.CXStats(self)
+
+ self.stats_generator = trex_stl_stats.CTRexInfoGenerator(self.global_stats,
+ self.ports,
+ self.flow_stats,
+ self.latency_stats,
+ self.util_stats,
+ self.xstats,
+ self.async_client.monitor)
+
+
+
+
+ ############# private functions - used by the class itself ###########
+
+ # some preprocessing for port argument
+ def __ports (self, port_id_list):
+
+ # none means all
+ if port_id_list == None:
+ return range(0, self.get_port_count())
+
+ # always list
+ if isinstance(port_id_list, int):
+ port_id_list = [port_id_list]
+
+ if not isinstance(port_id_list, list):
+ raise ValueError("Bad port id list: {0}".format(port_id_list))
+
+ for port_id in port_id_list:
+ if not isinstance(port_id, int) or (port_id < 0) or (port_id > self.get_port_count()):
+ raise ValueError("Bad port id {0}".format(port_id))
+
+ return port_id_list
+
+
+ # sync ports
+ def __sync_ports (self, port_id_list = None, force = False):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].sync())
+
+ return rc
+
+ # acquire ports, if port_list is none - get all
+ def __acquire (self, port_id_list = None, force = False, sync_streams = True):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].acquire(force, sync_streams))
+
+ return rc
+
+ # release ports
+ def __release (self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].release())
+
+ return rc
+
+
+ def __add_streams(self, stream_list, port_id_list = None):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].add_streams(stream_list))
+
+ return rc
+
+
+
+ def __remove_streams(self, stream_id_list, port_id_list = None):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].remove_streams(stream_id_list))
+
+ return rc
+
+
+
+ def __remove_all_streams(self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].remove_all_streams())
+
+ return rc
+
+
+ def __get_stream(self, stream_id, port_id, get_pkt = False):
+
+ return self.ports[port_id].get_stream(stream_id)
+
+
+ def __get_all_streams(self, port_id, get_pkt = False):
+
+ return self.ports[port_id].get_all_streams()
+
+
+ def __get_stream_id_list(self, port_id):
+
+ return self.ports[port_id].get_stream_id_list()
+
+
+ def __start (self,
+ multiplier,
+ duration,
+ port_id_list,
+ force,
+ core_mask):
+
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].start(multiplier,
+ duration,
+ force,
+ core_mask[port_id]))
+
+ return rc
+
+
+ def __resume (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].resume())
+
+ return rc
+
+ def __pause (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].pause())
+
+ return rc
+
+
+ def __stop (self, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].stop(force))
+
+ return rc
+
+
+ def __update (self, mult, port_id_list = None, force = False):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].update(mult, force))
+
+ return rc
+
+
+ def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+
+ # for dual, provide the slave handler as well
+ slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
+
+ rc.add(self.ports[port_id].push_remote(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ duration,
+ is_dual,
+ slave_handler))
+
+ return rc
+
+
+ def __validate (self, port_id_list = None):
+ port_id_list = self.__ports(port_id_list)
+
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].validate())
+
+ return rc
+
+
+ def __set_port_attr (self, port_id_list = None, attr_dict = None):
+
+ port_id_list = self.__ports(port_id_list)
+ rc = RC()
+
+ for port_id in port_id_list:
+ rc.add(self.ports[port_id].set_attr(attr_dict))
+
+ return rc
+
+
+
+ # connect to server
+ def __connect(self):
+
+ # first disconnect if already connected
+ if self.is_connected():
+ self.__disconnect()
+
+ # clear this flag
+ self.connected = False
+
+ # connect sync channel
+ self.logger.pre_cmd("Connecting to RPC server on {0}:{1}".format(self.connection_info['server'], self.connection_info['sync_port']))
+ rc = self.comm_link.connect()
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ return rc
+
+
+ # API sync
+ rc = self._transmit("api_sync", params = {'api_vers': self.api_vers}, api_class = None)
+ if not rc:
+ return rc
+
+ # decode
+ for api in rc.data()['api_vers']:
+ self.api_h[ api['type'] ] = api['api_h']
+
+
+ # version
+ rc = self._transmit("get_version")
+ if not rc:
+ return rc
+
+ self.server_version = rc.data()
+ self.global_stats.server_version = rc.data()
+
+ # cache system info
+ rc = self._transmit("get_system_info")
+ if not rc:
+ return rc
+
+ self.system_info = rc.data()
+ self.global_stats.system_info = rc.data()
+
+ # cache supported commands
+ rc = self._transmit("get_supported_cmds")
+ if not rc:
+ return rc
+
+ self.supported_cmds = sorted(rc.data())
+
+ # create ports
+ for port_id in range(self.system_info["port_count"]):
+ info = self.system_info['ports'][port_id]
+
+ self.ports[port_id] = Port(port_id,
+ self.username,
+ self.comm_link,
+ self.session_id,
+ info)
+
+
+ # sync the ports
+ rc = self.__sync_ports()
+ if not rc:
+ return rc
+
+
+ # connect async channel
+ self.logger.pre_cmd("Connecting to publisher server on {0}:{1}".format(self.connection_info['server'], self.connection_info['async_port']))
+ rc = self.async_client.connect()
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ return rc
+
+ self.connected = True
+
+ return RC_OK()
+
+
+ # disconenct from server
+ def __disconnect(self, release_ports = True):
+ # release any previous acquired ports
+ if self.is_connected() and release_ports:
+ self.__release(self.get_acquired_ports())
+
+ self.comm_link.disconnect()
+ self.async_client.disconnect()
+
+ self.connected = False
+
+ return RC_OK()
+
+
+ # clear stats
+ def __clear_stats(self, port_id_list, clear_global, clear_flow_stats, clear_latency_stats, clear_xstats):
+
+ # we must be sync with the server
+ self.async_client.barrier()
+
+ for port_id in port_id_list:
+ self.ports[port_id].clear_stats()
+
+ if clear_global:
+ self.global_stats.clear_stats()
+
+ if clear_flow_stats:
+ self.flow_stats.clear_stats()
+
+ if clear_latency_stats:
+ self.latency_stats.clear_stats()
+
+ if clear_xstats:
+ self.xstats.clear_stats()
+
+ self.logger.log_cmd("Clearing stats on port(s) {0}:".format(port_id_list))
+
+ return RC
+
+
+ # get stats
+ def __get_stats (self, port_id_list):
+ stats = {}
+
+ stats['global'] = self.global_stats.get_stats()
+
+ total = {}
+ for port_id in port_id_list:
+ port_stats = self.ports[port_id].get_stats()
+ stats[port_id] = port_stats
+
+ for k, v in port_stats.items():
+ if not k in total:
+ total[k] = v
+ else:
+ total[k] += v
+
+ stats['total'] = total
+
+ stats['flow_stats'] = self.flow_stats.get_stats()
+ stats['latency'] = self.latency_stats.get_stats()
+
+ return stats
+
+
+ def __decode_core_mask (self, ports, core_mask):
+
+ # predefined modes
+ if isinstance(core_mask, int):
+ if core_mask not in [self.CORE_MASK_PIN, self.CORE_MASK_SPLIT]:
+ raise STLError("'core_mask' can be either CORE_MASK_PIN, CORE_MASK_SPLIT or a list of masks")
+
+ decoded_mask = {}
+ for port in ports:
+ # a pin mode was requested and we have
+ # the second port from the group in the start list
+ if (core_mask == self.CORE_MASK_PIN) and ( (port ^ 0x1) in ports ):
+ decoded_mask[port] = 0x55555555 if( port % 2) == 0 else 0xAAAAAAAA
+ else:
+ decoded_mask[port] = None
+
+ return decoded_mask
+
+ # list of masks
+ elif isinstance(core_mask, list):
+ if len(ports) != len(core_mask):
+ raise STLError("'core_mask' list must be the same length as 'ports' list")
+
+ decoded_mask = {}
+ for i, port in enumerate(ports):
+ decoded_mask[port] = core_mask[i]
+
+ return decoded_mask
+
+
+
+ ############ functions used by other classes but not users ##############
+
+ def _validate_port_list (self, port_id_list):
+ # listfiy single int
+ if isinstance(port_id_list, int):
+ port_id_list = [port_id_list]
+
+ # should be a list
+ if not isinstance(port_id_list, list):
+ raise STLTypeError('port_id_list', type(port_id_list), list)
+
+ if not port_id_list:
+ raise STLError('No ports provided')
+
+ valid_ports = self.get_all_ports()
+ for port_id in port_id_list:
+ if not port_id in valid_ports:
+ raise STLError("Port ID '{0}' is not a valid port ID - valid values: {1}".format(port_id, valid_ports))
+
+ return port_id_list
+
+
+ # transmit request on the RPC link
+ def _transmit(self, method_name, params = None, api_class = 'core'):
+ return self.comm_link.transmit(method_name, params, api_class)
+
+ # transmit batch request on the RPC link
+ def _transmit_batch(self, batch_list):
+ return self.comm_link.transmit_batch(batch_list)
+
+ # stats
+ def _get_formatted_stats(self, port_id_list, stats_mask = trex_stl_stats.COMPACT):
+
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, stats_mask)
+
+ stats_obj = OrderedDict()
+ for stats_type in stats_opts:
+ stats_obj.update(self.stats_generator.generate_single_statistic(port_id_list, stats_type))
+
+ return stats_obj
+
+ def _get_streams(self, port_id_list, streams_mask=set()):
+
+ streams_obj = self.stats_generator.generate_streams_info(port_id_list, streams_mask)
+
+ return streams_obj
+
+
+ def _invalidate_stats (self, port_id_list):
+ for port_id in port_id_list:
+ self.ports[port_id].invalidate_stats()
+
+ self.global_stats.invalidate()
+ self.flow_stats.invalidate()
+
+ return RC_OK()
+
+
+ # remove all RX filters in a safe manner
+ def _remove_rx_filters (self, ports, rx_delay_ms):
+
+ # get the enabled RX ports
+ rx_ports = [port_id for port_id in ports if self.ports[port_id].has_rx_enabled()]
+
+ if not rx_ports:
+ return RC_OK()
+
+ # block while any RX configured port has not yet have it's delay expired
+ while any([not self.ports[port_id].has_rx_delay_expired(rx_delay_ms) for port_id in rx_ports]):
+ time.sleep(0.01)
+
+ # remove RX filters
+ rc = RC()
+ for port_id in rx_ports:
+ rc.add(self.ports[port_id].remove_rx_filters())
+
+ return rc
+
+
+ #################################
+ # ------ private methods ------ #
+ @staticmethod
+ def __get_mask_keys(ok_values={True}, **kwargs):
+ masked_keys = set()
+ for key, val in kwargs.items():
+ if val in ok_values:
+ masked_keys.add(key)
+ return masked_keys
+
+ @staticmethod
+ def __filter_namespace_args(namespace, ok_values):
+ return {k: v for k, v in namespace.__dict__.items() if k in ok_values}
+
+
+ # API decorator - double wrap because of argument
+ def __api_check(connected = True):
+
+ def wrap (f):
+ @wraps(f)
+ def wrap2(*args, **kwargs):
+ client = args[0]
+
+ func_name = f.__name__
+
+ # check connection
+ if connected and not client.is_connected():
+ raise STLStateError(func_name, 'disconnected')
+
+ try:
+ ret = f(*args, **kwargs)
+ except KeyboardInterrupt as e:
+ raise STLError("Interrupted by a keyboard signal (probably ctrl + c)")
+
+ return ret
+ return wrap2
+
+ return wrap
+
+
+
+ ############################ API #############################
+ ############################ #############################
+ ############################ #############################
+ def __enter__ (self):
+ self.connect()
+ self.acquire(force = True)
+ self.reset()
+ return self
+
+ def __exit__ (self, type, value, traceback):
+ if self.get_active_ports():
+ self.stop(self.get_active_ports())
+ self.disconnect()
+
+ ############################ Getters #############################
+ ############################ #############################
+ ############################ #############################
+
+
+ # return verbose level of the logger
+ def get_verbose (self):
+ """
+ Get the verbose mode
+
+ :parameters:
+ none
+
+ :return:
+ Get the verbose mode as Bool
+
+ :raises:
+ None
+
+ """
+ return self.logger.get_verbose()
+
+ # is the client on read only mode ?
+ def is_all_ports_acquired (self):
+ """
+ is_all_ports_acquired
+
+ :parameters:
+ None
+
+ :return:
+ Returns True if all ports are acquired
+
+ :raises:
+ None
+
+ """
+
+ return (self.get_all_ports() == self.get_acquired_ports())
+
+
+ # is the client connected ?
+ def is_connected (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ is_connected
+
+ :raises:
+ None
+
+ """
+
+ return self.connected and self.comm_link.is_connected
+
+
+ # get connection info
+ def get_connection_info (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.connection_info
+
+
+ # get supported commands by the server
+ def get_server_supported_cmds(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.supported_cmds
+
+ # get server version
+ def get_server_version(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.server_version
+
+ # get server system info
+ def get_server_system_info(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return self.system_info
+
+ # get port count
+ def get_port_count(self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return len(self.ports)
+
+
+ # returns the port object
+ def get_port (self, port_id):
+ port = self.ports.get(port_id, None)
+ if (port != None):
+ return port
+ else:
+ raise STLArgumentError('port id', port_id, valid_values = self.get_all_ports())
+
+
+ # get all ports as IDs
+ def get_all_ports (self):
+ """
+
+ :parameters:
+ None
+
+ :return:
+ Connection dict
+
+ :raises:
+ None
+
+ """
+
+ return list(self.ports)
+
+ # get all acquired ports
+ def get_acquired_ports(self):
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_acquired()]
+
+ # get all active ports (TX or pause)
+ def get_active_ports(self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_active() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_active()]
+
+
+ # get paused ports
+ def get_paused_ports (self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_paused() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_paused()]
+
+
+ # get all TX ports
+ def get_transmitting_ports (self, owned = True):
+ if owned:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_transmitting() and port_obj.is_acquired()]
+ else:
+ return [port_id
+ for port_id, port_obj in self.ports.items()
+ if port_obj.is_transmitting()]
+
+
+ # get stats
+ def get_stats (self, ports = None, sync_now = True):
+ """
+ Return dictionary containing statistics information gathered from the server.
+
+ :parameters:
+
+ ports - List of ports to retreive stats on.
+ If None, assume the request is for all acquired ports.
+
+ sync_now - Boolean - If true, create a call to the server to get latest stats, and wait for result to arrive. Otherwise, return last stats saved in client cache.
+ Downside of putting True is a slight delay (few 10th msecs) in getting the result. For practical uses, value should be True.
+ :return:
+ Statistics dictionary of dictionaries with the following format:
+
+ =============================== ===============
+ key Meaning
+ =============================== ===============
+ :ref:`numbers (0,1,..<total>` Statistcs per port number
+ :ref:`total <total>` Sum of port statistics
+ :ref:`flow_stats <flow_stats>` Per flow statistics
+ :ref:`global <global>` Global statistics
+ :ref:`latency <latency>` Per flow statistics regarding flow latency
+ =============================== ===============
+
+ Below is description of each of the inner dictionaries.
+
+ .. _total:
+
+ **total** and per port statistics contain dictionary with following format.
+
+ Most of the bytes counters (unless specified otherwise) are in L2 layer, including the Ethernet FCS. e.g. minimum packet size is 64 bytes
+
+ =============================== ===============
+ key Meaning
+ =============================== ===============
+ ibytes Number of input bytes
+ ierrors Number of input errors
+ ipackets Number of input packets
+ obytes Number of output bytes
+ oerrors Number of output errors
+ opackets Number of output packets
+ rx_bps Receive bytes per second rate (L2 layer)
+ rx_pps Receive packet per second rate
+ tx_bps Transmit bytes per second rate (L2 layer)
+ tx_pps Transmit packet per second rate
+ =============================== ===============
+
+ .. _flow_stats:
+
+ **flow_stats** contains :ref:`global dictionary <flow_stats_global>`, and dictionaries per packet group id (pg id). See structures below.
+
+ **per pg_id flow stat** dictionaries have following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ rx_bps Received bytes per second rate
+ rx_bps_l1 Received bytes per second rate, including layer one
+ rx_bytes Total number of received bytes
+ rx_pkts Total number of received packets
+ rx_pps Received packets per second
+ tx_bps Transmit bytes per second rate
+ tx_bps_l1 Transmit bytes per second rate, including layer one
+ tx_bytes Total number of sent bytes
+ tx_pkts Total number of sent packets
+ tx_pps Transmit packets per second rate
+ ================= ===============
+
+ .. _flow_stats_global:
+
+ **global flow stats** dictionary has the following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ rx_err Number of flow statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ tx_err Number of flow statistics packets transmitted that we could not associate to any pg_id. This is never expected. If you see this different than 0, please report.
+ ================= ===============
+
+ .. _global:
+
+ **global**
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ bw_per_core Estimated byte rate Trex can support per core. This is calculated by extrapolation of current rate and load on transmitting cores.
+ cpu_util Estimate of the average utilization percentage of the transimitting cores
+ queue_full Total number of packets transmitted while the NIC TX queue was full. The packets will be transmitted, eventually, but will create high CPU%due to polling the queue. This usually indicates that the rate we trying to transmit is too high for this port.
+ rx_cpu_util Estimate of the utilization percentage of the core handling RX traffic. Too high value of this CPU utilization could cause drop of latency streams.
+ rx_drop_bps Received bytes per second drop rate
+ rx_bps Received bytes per second rate
+ rx_pps Received packets per second rate
+ tx_bps Transmit bytes per second rate
+ tx_pps Transmit packets per second rate
+ ================= ===============
+
+ .. _latency:
+
+ **latency** contains :ref:`global dictionary <lat_stats_global>`, and dictionaries per packet group id (pg id). Each one with the following structure.
+
+ **per pg_id latency stat** dictionaries have following structure:
+
+ =========================== ===============
+ key Meaning
+ =========================== ===============
+ :ref:`err_cntrs<err-cntrs>` Counters describing errors that occured with this pg id
+ :ref:`latency<lat_inner>` Information regarding packet latency
+ =========================== ===============
+
+ Following are the inner dictionaries of latency
+
+ .. _err-cntrs:
+
+ **err-cntrs**
+
+ ================= ===============
+ key Meaning (see better explanation below the table)
+ ================= ===============
+ dropped How many packets were dropped (estimation)
+ dup How many packets were duplicated.
+ out_of_order How many packets we received out of order.
+ seq_too_high How many events of packet with sequence number too high we saw.
+ seq_too_low How many events of packet with sequence number too low we saw.
+ ================= ===============
+
+ For calculating packet error events, we add sequence number to each packet's payload. We decide what went wrong only according to sequence number
+ of last packet received and that of the previous packet. 'seq_too_low' and 'seq_too_high' count events we see. 'dup', 'out_of_order' and 'dropped'
+ are heuristics we apply to try and understand what happened. They will be accurate in common error scenarios.
+ We describe few scenarios below to help understand this.
+
+ Scenario 1: Received packet with seq num 10, and another one with seq num 10. We increment 'dup' and 'seq_too_low' by 1.
+
+ Scenario 2: Received pacekt with seq num 10 and then packet with seq num 15. We assume 4 packets were dropped, and increment 'dropped' by 4, and 'seq_too_high' by 1.
+ We expect next packet to arrive with sequence number 16.
+
+ Scenario 2 continue: Received packet with seq num 11. We increment 'seq_too_low' by 1. We increment 'out_of_order' by 1. We *decrement* 'dropped' by 1.
+ (We assume here that one of the packets we considered as dropped before, actually arrived out of order).
+
+
+ .. _lat_inner:
+
+ **latency**
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ average Average latency over the stream lifetime (usec).Low pass filter is applied to the last window average.It is computed each sampling period by following formula: <average> = <prev average>/2 + <last sampling period average>/2
+ histogram Dictionary describing logarithmic distribution histogram of packet latencies. Keys in the dictionary represent range of latencies (in usec). Values are the total number of packets received in this latency range. For example, an entry {100:13} would mean that we saw 13 packets with latency in the range between 100 and 200 usec.
+ jitter Jitter of latency samples, computed as described in :rfc:`3550#appendix-A.8`
+ last_max Maximum latency measured between last two data reads from server (0.5 sec window).
+ total_max Maximum latency measured over the stream lifetime (in usec).
+ total_min Minimum latency measured over the stream lifetime (in usec).
+ ================= ===============
+
+ .. _lat_stats_global:
+
+ **global latency stats** dictionary has the following structure:
+
+ ================= ===============
+ key Meaning
+ ================= ===============
+ old_flow Number of latency statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
+ bad_hdr Number of latency packets received with bad latency data. This can happen becuase of garbage packets in the network, or if the DUT causes packet corruption.
+ ================= ===============
+
+ :raises:
+ None
+
+ """
+ # by default use all acquired ports
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # check async barrier
+ if not type(sync_now) is bool:
+ raise STLArgumentError('sync_now', sync_now)
+
+
+ # if the user requested a barrier - use it
+ if sync_now:
+ rc = self.async_client.barrier()
+ if not rc:
+ raise STLError(rc)
+
+ return self.__get_stats(ports)
+
+
+ def get_events (self, ev_type_filter = None):
+ """
+ returns all the logged events
+
+ :parameters:
+ ev_type_filter - 'info', 'warning' or a list of those
+ default: no filter
+
+ :return:
+ logged events
+
+ :raises:
+ None
+
+ """
+ return self.event_handler.get_events(ev_type_filter)
+
+
+ def get_warnings (self):
+ """
+ returns all the warnings logged events
+
+ :parameters:
+ None
+
+ :return:
+ warning logged events
+
+ :raises:
+ None
+
+ """
+ return self.get_events(ev_type_filter = 'warning')
+
+
+ def get_info (self):
+ """
+ returns all the info logged events
+
+ :parameters:
+ None
+
+ :return:
+ warning logged events
+
+ :raises:
+ None
+
+ """
+ return self.get_events(ev_type_filter = 'info')
+
+
+ # get port(s) info as a list of dicts
+ @__api_check(True)
+ def get_port_info (self, ports = None):
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ return [self.ports[port_id].get_info() for port_id in ports]
+
+
+ ############################ Commands #############################
+ ############################ #############################
+ ############################ #############################
+
+
+ def set_verbose (self, level):
+ """
+ Sets verbose level
+
+ :parameters:
+ level : str
+ "high"
+ "low"
+ "normal"
+
+ :raises:
+ None
+
+ """
+ modes = {'low' : LoggerApi.VERBOSE_QUIET, 'normal': LoggerApi.VERBOSE_REGULAR, 'high': LoggerApi.VERBOSE_HIGH}
+
+ if not level in modes.keys():
+ raise STLArgumentError('level', level)
+
+ self.logger.set_verbose(modes[level])
+
+
+ @__api_check(False)
+ def connect (self):
+ """
+
+ Connects to the TRex server
+
+ :parameters:
+ None
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ rc = self.__connect()
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(False)
+ def disconnect (self, stop_traffic = True, release_ports = True):
+ """
+ Disconnects from the server
+
+ :parameters:
+ stop_traffic : bool
+ Attempts to stop traffic before disconnecting.
+ release_ports : bool
+ Attempts to release all the acquired ports.
+
+ """
+
+ # try to stop ports but do nothing if not possible
+ if stop_traffic:
+ try:
+ self.stop()
+ except STLError:
+ pass
+
+
+ self.logger.pre_cmd("Disconnecting from server at '{0}':'{1}'".format(self.connection_info['server'],
+ self.connection_info['sync_port']))
+ rc = self.__disconnect(release_ports)
+ self.logger.post_cmd(rc)
+
+
+
+ @__api_check(True)
+ def acquire (self, ports = None, force = False, sync_streams = True):
+ """
+ Acquires ports for executing commands
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ force : bool
+ Force acquire the ports.
+
+ sync_streams: bool
+ sync with the server about the configured streams
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ # by default use all ports
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ if force:
+ self.logger.pre_cmd("Force acquiring ports {0}:".format(ports))
+ else:
+ self.logger.pre_cmd("Acquiring ports {0}:".format(ports))
+
+ rc = self.__acquire(ports, force, sync_streams)
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ # cleanup
+ self.__release(ports)
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def release (self, ports = None):
+ """
+ Release ports
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Releasing ports {0}:".format(ports))
+ rc = self.__release(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def ping(self):
+ """
+ Pings the server
+
+ :parameters:
+ None
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd("Pinging the server on '{0}' port '{1}': ".format(self.connection_info['server'],
+ self.connection_info['sync_port']))
+ rc = self._transmit("ping", api_class = None)
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def server_shutdown (self, force = False):
+ """
+ Sends the server a request for total shutdown
+
+ :parameters:
+ force - shutdown server even if some ports are owned by another
+ user
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd("Sending shutdown request for the server")
+
+ rc = self._transmit("shutdown", params = {'force': force, 'user': self.username})
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def get_active_pgids(self):
+ """
+ Get active group IDs
+
+ :parameters:
+ None
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ self.logger.pre_cmd( "Getting active packet group ids")
+
+ rc = self._transmit("get_active_pgids")
+
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def get_util_stats(self):
+ """
+ Get utilization stats:
+ History of TRex CPU utilization per thread (list of lists)
+ MBUFs memory consumption per CPU socket.
+
+ :parameters:
+ None
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ self.logger.pre_cmd('Getting Utilization stats')
+ return self.util_stats.get_stats()
+
+ @__api_check(True)
+ def get_xstats(self, port_id):
+ print(port_id)
+ """
+ Get extended stats of port: all the counters as dict.
+
+ :parameters:
+ port_id: int
+
+ :returns:
+ Dict with names of counters as keys and values of uint64. Actual keys may vary per NIC.
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ self.logger.pre_cmd('Getting xstats')
+ return self.xstats.get_stats(port_id)
+
+
+ @__api_check(True)
+ def reset(self, ports = None):
+ """
+ Force acquire ports, stop the traffic, remove all streams and clear stats
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ # force take the port and ignore any streams on it
+ self.acquire(ports, force = True, sync_streams = False)
+ self.stop(ports, rx_delay_ms = 0)
+ self.remove_all_streams(ports)
+ self.clear_stats(ports)
+
+
+ @__api_check(True)
+ def remove_all_streams (self, ports = None):
+ """
+ remove all streams from port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Removing all streams from port(s) {0}:".format(ports))
+ rc = self.__remove_all_streams(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def add_streams (self, streams, ports = None):
+ """
+ Add a list of streams to port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+ streams: list
+ Streams to attach (or profile)
+
+ :returns:
+ List of stream IDs in order of the stream list
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ if isinstance(streams, STLProfile):
+ streams = streams.get_streams()
+
+ # transform single stream
+ if not isinstance(streams, list):
+ streams = [streams]
+
+ # check streams
+ if not all([isinstance(stream, STLStream) for stream in streams]):
+ raise STLArgumentError('streams', streams)
+
+ self.logger.pre_cmd("Attaching {0} streams to port(s) {1}:".format(len(streams), ports))
+ rc = self.__add_streams(streams, ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ # return the stream IDs
+ return rc.data()
+
+ @__api_check(True)
+ def add_profile(self, filename, ports = None, **kwargs):
+ """ | Add streams from profile by its type. Supported types are:
+ | .py
+ | .yaml
+ | .pcap file that converted to profile automatically
+
+ :parameters:
+ filename : string
+ filename (with path) of the profile
+ ports : list
+ list of ports to add the profile (default: all acquired)
+ kwargs : dict
+ forward those key-value pairs to the profile (tunables)
+
+ :returns:
+ List of stream IDs in order of the stream list
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ validate_type('filename', filename, basestring)
+ profile = STLProfile.load(filename, **kwargs)
+ return self.add_streams(profile.get_streams(), ports)
+
+
+ @__api_check(True)
+ def remove_streams (self, stream_id_list, ports = None):
+ """
+ Remove a list of streams from ports
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+ stream_id_list: list
+ Stream id list to remove
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # transform single stream
+ if not isinstance(stream_id_list, list):
+ stream_id_list = [stream_id_list]
+
+ # check streams
+ for stream_id in stream_id_list:
+ validate_type('stream_id', stream_id, int)
+
+ # remove streams
+ self.logger.pre_cmd("Removing {0} streams from port(s) {1}:".format(len(stream_id_list), ports))
+ rc = self.__remove_streams(stream_id_list, ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def start (self,
+ ports = None,
+ mult = "1",
+ force = False,
+ duration = -1,
+ total = False,
+ core_mask = CORE_MASK_SPLIT):
+ """
+ Start traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
+
+ force : bool
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
+
+ duration : int
+ Limit the run time (seconds)
+ -1 = unlimited
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+ core_mask: CORE_MASK_SPLIT, CORE_MASK_PIN or a list of masks (one per port)
+ Determine the allocation of cores per port
+ In CORE_MASK_SPLIT all the traffic will be divided equally between all the cores
+ associated with each port
+ In CORE_MASK_PIN, for each dual ports (a group that shares the same cores)
+ the cores will be divided half pinned for each port
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('force', force, bool)
+ validate_type('duration', duration, (int, float))
+ validate_type('total', total, bool)
+ validate_type('core_mask', core_mask, (int, list))
+
+ # verify link status
+ ports_link_down = [port_id for port_id in ports if self.ports[port_id].attr.get('link',{}).get('up') == False]
+ if not force and ports_link_down:
+ raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down)
+
+ #########################
+ # decode core mask argument
+ decoded_mask = self.__decode_core_mask(ports, core_mask)
+ #######################
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = False,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+
+ # verify ports are stopped or force stop them
+ active_ports = list(set(self.get_active_ports()).intersection(ports))
+ if active_ports:
+ if not force:
+ raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports))
+ else:
+ rc = self.stop(active_ports)
+ if not rc:
+ raise STLError(rc)
+
+
+ # start traffic
+ self.logger.pre_cmd("Starting traffic on port(s) {0}:".format(ports))
+ rc = self.__start(mult_obj, duration, ports, force, decoded_mask)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def stop (self, ports = None, rx_delay_ms = 10):
+ """
+ Stop port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ rx_delay_ms : int
+ time to wait until RX filters are removed
+ this value should reflect the time it takes
+ packets which were transmitted to arrive
+ to the destination.
+ after this time the RX filters will be removed
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ if ports is None:
+ ports = self.get_active_ports()
+ if not ports:
+ return
+
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports))
+ rc = self.__stop(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def update (self, ports = None, mult = "1", total = False, force = False):
+ """
+ Update traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Can also specify +/-
+ Examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
+
+ force : bool
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_active_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('force', force, bool)
+ validate_type('total', total, bool)
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = True,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+
+ # call low level functions
+ self.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
+ rc = self.__update(mult_obj, ports, force)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def pause (self, ports = None):
+ """
+ Pause traffic on port(s). Works only for ports that are active, and only if all streams are in Continuous mode.
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_transmitting_ports()
+ ports = self._validate_port_list(ports)
+
+ self.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
+ rc = self.__pause(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ @__api_check(True)
+ def resume (self, ports = None):
+ """
+ Resume traffic on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_paused_ports()
+ ports = self._validate_port_list(ports)
+
+
+ self.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
+ rc = self.__resume(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def push_remote (self,
+ pcap_filename,
+ ports = None,
+ ipg_usec = None,
+ speedup = 1.0,
+ count = 1,
+ duration = -1,
+ is_dual = False):
+ """
+ Push a remote server-reachable PCAP file
+ the path must be fullpath accessible to the server
+
+ :parameters:
+ pcap_filename : str
+ PCAP file name in full path and accessible to the server
+
+ ports : list
+ Ports on which to execute the command
+
+ ipg_usec : float
+ Inter-packet gap in microseconds
+
+ speedup : float
+ A factor to adjust IPG. effectively IPG = IPG / speedup
+
+ count: int
+ How many times to transmit the cap
+
+ duration: float
+ Limit runtime by duration in seconds
+
+ is_dual: bool
+ Inject from both directions.
+ requires ERF file with meta data for direction.
+ also requires that all the ports will be in master mode
+ with their adjacent ports as slaves
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('pcap_filename', pcap_filename, basestring)
+ validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
+ validate_type('speedup', speedup, (float, int))
+ validate_type('count', count, int)
+ validate_type('duration', duration, (float, int))
+ validate_type('is_dual', is_dual, bool)
+
+ # for dual mode check that all are masters
+ if is_dual:
+ if not pcap_filename.endswith('erf'):
+ raise STLError("dual mode: only ERF format is supported for dual mode")
+
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ if slave in ports:
+ raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+
+ if not slave in self.get_acquired_ports():
+ raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
+
+
+ self.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
+ rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def push_pcap (self,
+ pcap_filename,
+ ports = None,
+ ipg_usec = None,
+ speedup = 1.0,
+ count = 1,
+ duration = -1,
+ force = False,
+ vm = None,
+ packet_hook = None,
+ is_dual = False):
+ """
+ Push a local PCAP to the server
+ This is equivalent to loading a PCAP file to a profile
+ and attaching the profile to port(s)
+
+ file size is limited to 1MB
+
+ :parameters:
+ pcap_filename : str
+ PCAP filename (accessible locally)
+
+ ports : list
+ Ports on which to execute the command
+
+ ipg_usec : float
+ Inter-packet gap in microseconds
+
+ speedup : float
+ A factor to adjust IPG. effectively IPG = IPG / speedup
+
+ count: int
+ How many times to transmit the cap
+
+ duration: float
+ Limit runtime by duration in seconds
+
+ force: bool
+ Ignore file size limit - push any file size to the server
+
+ vm: list of VM instructions
+ VM instructions to apply for every packet
+
+ packet_hook : Callable or function
+ Will be applied to every packet
+
+ is_dual: bool
+ Inject from both directions.
+ requires ERF file with meta data for direction.
+ also requires that all the ports will be in master mode
+ with their adjacent ports as slaves
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('pcap_filename', pcap_filename, basestring)
+ validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
+ validate_type('speedup', speedup, (float, int))
+ validate_type('count', count, int)
+ validate_type('duration', duration, (float, int))
+ validate_type('vm', vm, (list, type(None)))
+ validate_type('is_dual', is_dual, bool)
+
+
+ # no support for > 1MB PCAP - use push remote
+ if not force and os.path.getsize(pcap_filename) > (1024 * 1024):
+ raise STLError("PCAP size of {:} is too big for local push - consider using remote push or provide 'force'".format(format_num(os.path.getsize(pcap_filename), suffix = 'B')))
+
+ if is_dual:
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ if slave in ports:
+ raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
+
+ if not slave in self.get_acquired_ports():
+ raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
+
+ # regular push
+ if not is_dual:
+
+ # create the profile from the PCAP
+ try:
+ self.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
+ profile = STLProfile.load_pcap(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ vm = vm,
+ packet_hook = packet_hook)
+ self.logger.post_cmd(RC_OK)
+ except STLError as e:
+ self.logger.post_cmd(RC_ERR(e))
+ raise
+
+
+ self.remove_all_streams(ports = ports)
+ id_list = self.add_streams(profile.get_streams(), ports)
+
+ return self.start(ports = ports, duration = duration)
+
+ else:
+
+ # create a dual profile
+ split_mode = 'MAC'
+
+ try:
+ self.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
+ profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
+ ipg_usec,
+ speedup,
+ count,
+ vm = vm,
+ packet_hook = packet_hook,
+ split_mode = split_mode)
+
+ self.logger.post_cmd(RC_OK())
+
+ except STLError as e:
+ self.logger.post_cmd(RC_ERR(e))
+ raise
+
+ all_ports = ports + [p ^ 0x1 for p in ports]
+
+ self.remove_all_streams(ports = all_ports)
+
+ for port in ports:
+ master = port
+ slave = port ^ 0x1
+
+ self.add_streams(profile_a.get_streams(), master)
+ self.add_streams(profile_b.get_streams(), slave)
+
+ return self.start(ports = all_ports, duration = duration)
+
+
+
+
+
+ @__api_check(True)
+ def validate (self, ports = None, mult = "1", duration = -1, total = False):
+ """
+ Validate port(s) configuration
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ mult : str
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
+
+ duration : int
+ Limit the run time (seconds)
+ -1 = unlimited
+
+ total : bool
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ validate_type('mult', mult, basestring)
+ validate_type('duration', duration, (int, float))
+ validate_type('total', total, bool)
+
+
+ # verify multiplier
+ mult_obj = parsing_opts.decode_multiplier(mult,
+ allow_update = True,
+ divide_count = len(ports) if total else 1)
+ if not mult_obj:
+ raise STLArgumentError('mult', mult)
+
+ self.logger.pre_cmd("Validating streams on port(s) {0}:".format(ports))
+ rc = self.__validate(ports)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ for port in ports:
+ self.ports[port].print_profile(mult_obj, duration)
+
+
+ @__api_check(False)
+ def clear_stats (self, ports = None, clear_global = True, clear_flow_stats = True, clear_latency_stats = True, clear_xstats = True):
+ """
+ Clear stats on port(s)
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ clear_global : bool
+ Clear the global stats
+
+ clear_flow_stats : bool
+ Clear the flow stats
+
+ clear_latency_stats : bool
+ Clear the latency stats
+
+ clear_xstats : bool
+ Clear the extended stats
+
+ :raises:
+ + :exc:`STLError`
+
+ """
+
+ ports = ports if ports is not None else self.get_all_ports()
+ ports = self._validate_port_list(ports)
+
+ # verify clear global
+ if not type(clear_global) is bool:
+ raise STLArgumentError('clear_global', clear_global)
+
+ rc = self.__clear_stats(ports, clear_global, clear_flow_stats, clear_latency_stats, clear_xstats)
+ if not rc:
+ raise STLError(rc)
+
+
+
+ @__api_check(True)
+ def is_traffic_active (self, ports = None):
+ """
+ Return if specified port(s) have traffic
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+
+ :raises:
+ + :exc:`STLTimeoutError` - in case timeout has expired
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ return set(self.get_active_ports()).intersection(ports)
+
+
+
+ @__api_check(True)
+ def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = 10):
+ """
+ .. _wait_on_traffic:
+
+ Block until traffic on specified port(s) has ended
+
+ :parameters:
+ ports : list
+ Ports on which to execute the command
+
+ timeout : int
+ timeout in seconds
+ default will be blocking
+
+ rx_delay_ms : int
+ Time to wait (in milliseconds) after last packet was sent, until RX filters used for
+ measuring flow statistics and latency are removed.
+ This value should reflect the time it takes packets which were transmitted to arrive
+ to the destination.
+ After this time, RX filters will be removed, and packets arriving for per flow statistics feature and latency flows will be counted as errors.
+
+ :raises:
+ + :exc:`STLTimeoutError` - in case timeout has expired
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+
+ timer = PassiveTimer(timeout)
+
+ # wait while any of the required ports are active
+ while set(self.get_active_ports()).intersection(ports):
+
+ # make sure ASYNC thread is still alive - otherwise we will be stuck forever
+ if not self.async_client.is_thread_alive():
+ raise STLError("subscriber thread is dead")
+
+ time.sleep(0.01)
+ if timer.has_expired():
+ raise STLTimeoutError(timeout)
+
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
+
+ @__api_check(True)
+ def set_port_attr (self, ports = None, promiscuous = None, link_up = None, led_on = None, flow_ctrl = None):
+ """
+ Set port attributes
+
+ :parameters:
+ promiscuous - True or False
+ link_up - True or False
+ led_on - True or False
+ flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable
+
+ :raises:
+ + :exe:'STLError'
+
+ """
+
+ ports = ports if ports is not None else self.get_acquired_ports()
+ ports = self._validate_port_list(ports)
+
+ # check arguments
+ validate_type('promiscuous', promiscuous, (bool, type(None)))
+ validate_type('link_up', link_up, (bool, type(None)))
+ validate_type('led_on', led_on, (bool, type(None)))
+ validate_type('flow_ctrl', flow_ctrl, (int, type(None)))
+
+ # build attributes
+ attr_dict = {}
+ if promiscuous is not None:
+ attr_dict['promiscuous'] = {'enabled': promiscuous}
+ if link_up is not None:
+ attr_dict['link_status'] = {'up': link_up}
+ if led_on is not None:
+ attr_dict['led_status'] = {'on': led_on}
+ if flow_ctrl is not None:
+ attr_dict['flow_ctrl_mode'] = {'mode': flow_ctrl}
+
+ # no attributes to set
+ if not attr_dict:
+ return
+
+ self.logger.pre_cmd("Applying attributes on port(s) {0}:".format(ports))
+ rc = self.__set_port_attr(ports, attr_dict)
+ self.logger.post_cmd(rc)
+
+ if not rc:
+ raise STLError(rc)
+
+ def clear_events (self):
+ """
+ Clear all events
+
+ :parameters:
+ None
+
+ :raises:
+ None
+
+ """
+ self.event_handler.clear_events()
+
+
+ ############################ Line #############################
+ ############################ Commands #############################
+ ############################ #############################
+
+ # console decorator
+ def __console(f):
+ @wraps(f)
+ def wrap(*args):
+ client = args[0]
+
+ time1 = time.time()
+
+ try:
+ rc = f(*args)
+ except STLError as e:
+ client.logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold'))
+ return RC_ERR(e.brief())
+
+ # if got true - print time
+ if rc:
+ delta = time.time() - time1
+ client.logger.log(format_time(delta) + "\n")
+
+ return rc
+
+ return wrap
+
+ @__console
+ def ping_line (self, line):
+ '''pings the server'''
+ self.ping()
+ return RC_OK()
+
+ @__console
+ def shutdown_line (self, line):
+ '''shutdown the server'''
+ parser = parsing_opts.gen_parser(self,
+ "shutdown",
+ self.shutdown_line.__doc__,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ self.server_shutdown(force = opts.force)
+ return RC_OK()
+
+ @__console
+ def connect_line (self, line):
+ '''Connects to the TRex server and acquire ports'''
+ parser = parsing_opts.gen_parser(self,
+ "connect",
+ self.connect_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
+ if not opts:
+ return opts
+
+ self.connect()
+ self.acquire(ports = opts.ports, force = opts.force)
+
+ return RC_OK()
+
+
+ @__console
+ def acquire_line (self, line):
+ '''Acquire ports\n'''
+
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "acquire",
+ self.acquire_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
+ if not opts:
+ return opts
+
+ # filter out all the already owned ports
+ ports = list_difference(opts.ports, self.get_acquired_ports())
+ if not ports:
+ msg = "acquire - all of port(s) {0} are already acquired".format(opts.ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ self.acquire(ports = ports, force = opts.force)
+
+ return RC_OK()
+
+
+ #
+ @__console
+ def release_line (self, line):
+ '''Release ports\n'''
+
+ parser = parsing_opts.gen_parser(self,
+ "release",
+ self.release_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports())
+ if not opts:
+ return opts
+
+ ports = list_intersect(opts.ports, self.get_acquired_ports())
+ if not ports:
+ if not opts.ports:
+ msg = "release - no acquired ports"
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ msg = "release - none of port(s) {0} are acquired".format(opts.ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+
+ self.release(ports = ports)
+
+ return RC_OK()
+
+
+ @__console
+ def reacquire_line (self, line):
+ '''reacquire all the ports under your username which are not acquired by your session'''
+
+ parser = parsing_opts.gen_parser(self,
+ "reacquire",
+ self.reacquire_line.__doc__)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ # find all the on-owned ports under your name
+ my_unowned_ports = list_difference([k for k, v in self.ports.items() if v.get_owner() == self.username], self.get_acquired_ports())
+ if not my_unowned_ports:
+ msg = "reacquire - no unowned ports under '{0}'".format(self.username)
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.acquire(ports = my_unowned_ports, force = True)
+ return RC_OK()
+
+
+ @__console
+ def disconnect_line (self, line):
+ self.disconnect()
+
+
+ @__console
+ def reset_line (self, line):
+ '''Reset ports - if no ports are provided all acquired ports will be reset'''
+
+ parser = parsing_opts.gen_parser(self,
+ "reset",
+ self.reset_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ self.reset(ports = opts.ports)
+
+ return RC_OK()
+
+
+
+ @__console
+ def start_line (self, line):
+ '''Start selected traffic on specified ports on TRex\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "start",
+ self.start_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.TOTAL,
+ parsing_opts.FORCE,
+ parsing_opts.FILE_PATH,
+ parsing_opts.DURATION,
+ parsing_opts.TUNABLES,
+ parsing_opts.MULTIPLIER_STRICT,
+ parsing_opts.DRY_RUN,
+ parsing_opts.CORE_MASK_GROUP)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # core mask
+ if opts.core_mask is not None:
+ core_mask = opts.core_mask
+ else:
+ core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
+
+ # just for sanity - will be checked on the API as well
+ self.__decode_core_mask(opts.ports, core_mask)
+
+ active_ports = list_intersect(self.get_active_ports(), opts.ports)
+ if active_ports:
+ if not opts.force:
+ msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ self.stop(active_ports)
+
+
+ # process tunables
+ if type(opts.tunables) is dict:
+ tunables = opts.tunables
+ else:
+ tunables = {}
+
+
+ # remove all streams
+ self.remove_all_streams(opts.ports)
+
+ # pack the profile
+ try:
+ for port in opts.ports:
+
+ profile = STLProfile.load(opts.file[0],
+ direction = tunables.get('direction', port % 2),
+ port_id = port,
+ **tunables)
+
+ self.add_streams(profile.get_streams(), ports = port)
+
+ except STLError as e:
+ error = 'Unknown error.'
+ for line in e.brief().split('\n'):
+ if line:
+ error = line
+ msg = format_text("\nError loading profile '{0}'".format(opts.file[0]), 'bold')
+ self.logger.log(msg + '\n')
+ self.logger.log(e.brief() + "\n")
+ return RC_ERR("%s: %s" % (msg, error))
+
+
+ if opts.dry:
+ self.validate(opts.ports, opts.mult, opts.duration, opts.total)
+ else:
+
+ self.start(opts.ports,
+ opts.mult,
+ opts.force,
+ opts.duration,
+ opts.total,
+ core_mask)
+
+ return RC_OK()
+
+
+
+ @__console
+ def stop_line (self, line):
+ '''Stop active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "stop",
+ self.stop_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_active_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'stop - no active ports'
+ else:
+ msg = 'stop - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ # call API
+ self.stop(ports)
+
+ return RC_OK()
+
+
+ @__console
+ def update_line (self, line):
+ '''Update port(s) speed currently active\n'''
+ parser = parsing_opts.gen_parser(self,
+ "update",
+ self.update_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.MULTIPLIER,
+ parsing_opts.TOTAL,
+ parsing_opts.FORCE)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_active_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_active_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'update - no active ports'
+ else:
+ msg = 'update - no active traffic on ports {0}'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.update(ports, opts.mult, opts.total, opts.force)
+
+ return RC_OK()
+
+
+ @__console
+ def pause_line (self, line):
+ '''Pause active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "pause",
+ self.pause_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_transmitting_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # check for already paused case
+ if opts.ports and is_sub_list(opts.ports, self.get_paused_ports()):
+ msg = 'pause - all of port(s) {0} are already paused'.format(opts.ports)
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_transmitting_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'pause - no transmitting ports'
+ else:
+ msg = 'pause - none of ports {0} are transmitting'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+ self.pause(ports)
+
+ return RC_OK()
+
+
+ @__console
+ def resume_line (self, line):
+ '''Resume active traffic on specified ports on TRex\n'''
+ parser = parsing_opts.gen_parser(self,
+ "resume",
+ self.resume_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_paused_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ # find the relevant ports
+ ports = list_intersect(opts.ports, self.get_paused_ports())
+ if not ports:
+ if not opts.ports:
+ msg = 'resume - no paused ports'
+ else:
+ msg = 'resume - none of ports {0} are paused'.format(opts.ports)
+
+ self.logger.log(msg)
+ return RC_ERR(msg)
+
+
+ self.resume(ports)
+
+ # true means print time
+ return RC_OK()
+
+
+ @__console
+ def clear_stats_line (self, line):
+ '''Clear cached local statistics\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "clear",
+ self.clear_stats_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ self.clear_stats(opts.ports)
+
+ return RC_OK()
+
+
+ @__console
+ def show_stats_line (self, line):
+ '''Get statistics from TRex server by port\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "stats",
+ self.show_stats_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.STATS_MASK)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ # determine stats mask
+ mask = self.__get_mask_keys(**self.__filter_namespace_args(opts, trex_stl_stats.ALL_STATS_OPTS))
+ if not mask:
+ # set to show all stats if no filter was given
+ mask = trex_stl_stats.COMPACT
+
+ stats_opts = common.list_intersect(trex_stl_stats.ALL_STATS_OPTS, mask)
+
+ stats = self._get_formatted_stats(opts.ports, mask)
+
+
+ # print stats to screen
+ for stat_type, stat_data in stats.items():
+ text_tables.print_table_with_header(stat_data.text_table, stat_type)
+
+
+ @__console
+ def show_streams_line(self, line):
+ '''Get stream statistics from TRex server by port\n'''
+ # define a parser
+ parser = parsing_opts.gen_parser(self,
+ "streams",
+ self.show_streams_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.STREAMS_MASK)
+
+ opts = parser.parse_args(line.split())
+
+ if not opts:
+ return opts
+
+ streams = self._get_streams(opts.ports, set(opts.streams))
+ if not streams:
+ self.logger.log(format_text("No streams found with desired filter.\n", "bold", "magenta"))
+
+ else:
+ # print stats to screen
+ for stream_hdr, port_streams_data in streams.items():
+ text_tables.print_table_with_header(port_streams_data.text_table,
+ header= stream_hdr.split(":")[0] + ":",
+ untouched_header= stream_hdr.split(":")[1])
+
+
+
+
+ @__console
+ def validate_line (self, line):
+ '''Validates port(s) stream configuration\n'''
+
+ parser = parsing_opts.gen_parser(self,
+ "validate",
+ self.validate_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ self.validate(opts.ports)
+
+
+
+
+ @__console
+ def push_line (self, line):
+ '''Push a pcap file '''
+
+ parser = parsing_opts.gen_parser(self,
+ "push",
+ self.push_line.__doc__,
+ parsing_opts.FILE_PATH,
+ parsing_opts.REMOTE_FILE,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.COUNT,
+ parsing_opts.DURATION,
+ parsing_opts.IPG,
+ parsing_opts.SPEEDUP,
+ parsing_opts.FORCE,
+ parsing_opts.DUAL)
+
+ opts = parser.parse_args(line.split(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ active_ports = list(set(self.get_active_ports()).intersection(opts.ports))
+
+ if active_ports:
+ if not opts.force:
+ msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports)
+ self.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+ else:
+ self.stop(active_ports)
+
+
+ if opts.remote:
+ self.push_remote(opts.file[0],
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ is_dual = opts.dual)
+
+ else:
+ self.push_pcap(opts.file[0],
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ force = opts.force,
+ is_dual = opts.dual)
+
+
+
+ return RC_OK()
+
+
+
+ @__console
+ def set_port_attr_line (self, line):
+ '''Sets port attributes '''
+
+ parser = parsing_opts.gen_parser(self,
+ "port_attr",
+ self.set_port_attr_line.__doc__,
+ parsing_opts.PORT_LIST_WITH_ALL,
+ parsing_opts.PROMISCUOUS,
+ parsing_opts.LINK_STATUS,
+ parsing_opts.LED_STATUS,
+ parsing_opts.FLOW_CTRL,
+ parsing_opts.SUPPORTED,
+ )
+
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ if not opts:
+ return opts
+
+ opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom)
+ opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link)
+ opts.led = parsing_opts.ON_OFF_DICT.get(opts.led)
+ opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl)
+
+ # if no attributes - fall back to printing the status
+ if not filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp]):
+ self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports)))
+ return
+
+ if opts.supp:
+ info = self.ports[0].get_info() # assume for now all ports are same
+ print('')
+ print('Supported attributes for current NICs:')
+ print(' Promiscuous: yes')
+ print(' Link status: %s' % info['link_change_supported'])
+ print(' LED status: %s' % info['led_change_supported'])
+ print(' Flow control: %s' % info['fc_supported'])
+ print('')
+ else:
+ return self.set_port_attr(opts.ports, opts.prom, opts.link, opts.led, opts.flow_ctrl)
+
+
+ @__console
+ def show_profile_line (self, line):
+ '''Shows profile information'''
+
+ parser = parsing_opts.gen_parser(self,
+ "port",
+ self.show_profile_line.__doc__,
+ parsing_opts.FILE_PATH)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+ info = STLProfile.get_info(opts.file[0])
+
+ self.logger.log(format_text('\nProfile Information:\n', 'bold'))
+
+ # general info
+ self.logger.log(format_text('\nGeneral Information:', 'underline'))
+ self.logger.log('Filename: {:^12}'.format(opts.file[0]))
+ self.logger.log('Stream count: {:^12}'.format(info['stream_count']))
+
+ # specific info
+ profile_type = info['type']
+ self.logger.log(format_text('\nSpecific Information:', 'underline'))
+
+ if profile_type == 'python':
+ self.logger.log('Type: {:^12}'.format('Python Module'))
+ self.logger.log('Tunables: {:^12}'.format(str(['{0} = {1}'.format(k ,v) for k, v in info['tunables'].items()])))
+
+ elif profile_type == 'yaml':
+ self.logger.log('Type: {:^12}'.format('YAML'))
+
+ elif profile_type == 'pcap':
+ self.logger.log('Type: {:^12}'.format('PCAP file'))
+
+ self.logger.log("")
+
+
+ @__console
+ def get_events_line (self, line):
+ '''shows events recieved from server\n'''
+
+ x = [parsing_opts.ArgumentPack(['-c','--clear'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "clear the events log"}),
+
+ parsing_opts.ArgumentPack(['-i','--info'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "show info events"}),
+
+ parsing_opts.ArgumentPack(['-w','--warn'],
+ {'action' : "store_true",
+ 'default': False,
+ 'help': "show warning events"}),
+
+ ]
+
+
+ parser = parsing_opts.gen_parser(self,
+ "events",
+ self.get_events_line.__doc__,
+ *x)
+
+ opts = parser.parse_args(line.split())
+ if not opts:
+ return opts
+
+
+ ev_type_filter = []
+
+ if opts.info:
+ ev_type_filter.append('info')
+
+ if opts.warn:
+ ev_type_filter.append('warning')
+
+ if not ev_type_filter:
+ ev_type_filter = None
+
+ events = self.get_events(ev_type_filter)
+ for ev in events:
+ self.logger.log(ev)
+
+ if opts.clear:
+ self.clear_events()
+
+ def generate_prompt (self, prefix = 'trex'):
+ if not self.is_connected():
+ return "{0}(offline)>".format(prefix)
+
+ elif not self.get_acquired_ports():
+ return "{0}(read-only)>".format(prefix)
+
+ elif self.is_all_ports_acquired():
+ return "{0}>".format(prefix)
+
+ else:
+ return "{0} {1}>".format(prefix, self.get_acquired_ports())
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
new file mode 100644
index 00000000..2ca92cb8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_exceptions.py
@@ -0,0 +1,71 @@
+import os
+import sys
+import traceback
+
+from .utils.text_opts import *
+
+try:
+ basestring
+except NameError:
+ basestring = str
+
+# basic error for API
+class STLError(Exception):
+ def __init__ (self, msg):
+ self.msg = str(msg)
+ self.tb = traceback.extract_stack()
+
+ def __str__ (self):
+
+ fname = os.path.split(self.tb[-2][0])[1]
+ lineno = self.tb[-2][1]
+ func = self.tb[-2][2]
+ src = self.tb[-2][3]
+
+ s = "\n******\n"
+ s += "Error at {0}:{1} - '{2}'\n\n".format(format_text(fname, 'bold'), format_text(lineno, 'bold'), format_text(src.strip(), 'bold'))
+ s += "specific error:\n\n{0}\n".format(format_text(self.msg, 'bold'))
+
+ return s
+
+ def brief (self):
+ return self.msg
+
+
+# raised when the client state is invalid for operation
+class STLStateError(STLError):
+ def __init__ (self, op, state):
+ self.msg = "Operation '{0}' is not valid while '{1}'".format(op, state)
+ self.tb = traceback.extract_stack()
+
+# port state error
+class STLPortStateError(STLError):
+ def __init__ (self, port, op, state):
+ self.msg = "Operation '{0}' on port(s) '{1}' is not valid while port(s) '{2}'".format(op, port, state)
+ self.tb = traceback.extract_stack()
+
+# raised when argument value is not valid for operation
+class STLArgumentError(STLError):
+ def __init__ (self, name, got, valid_values = None, extended = None):
+ self.tb = traceback.extract_stack()
+ self.msg = "Argument: '{0}' invalid value: '{1}'".format(name, got)
+ if valid_values:
+ self.msg += " - valid values are '{0}'".format(valid_values)
+
+ if extended:
+ self.msg += "\n{0}".format(extended)
+
+# raised when argument type is not valid for operation
+class STLTypeError(STLError):
+ def __init__ (self, arg_name, arg_type, valid_types):
+ self.tb = traceback.extract_stack()
+ self.msg = "Argument: '%s' invalid type: '%s', expecting type(s): %s." % (arg_name, arg_type.__name__,
+ [t.__name__ for t in valid_types] if isinstance(valid_types, tuple) else valid_types.__name__)
+
+# raised when timeout occurs
+class STLTimeoutError(STLError):
+ def __init__ (self, timeout):
+ self.tb = traceback.extract_stack()
+ self.msg = "Timeout: operation took more than '{0}' seconds".format(timeout)
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
new file mode 100644
index 00000000..306302dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
@@ -0,0 +1,65 @@
+import sys
+import os
+import warnings
+import platform
+
+# if not set - set it to default
+TREX_STL_EXT_PATH = os.environ.get('TREX_STL_EXT_PATH')
+
+# take default
+if not TREX_STL_EXT_PATH or not os.path.exists(TREX_STL_EXT_PATH):
+ CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, 'external_libs'))
+if not os.path.exists(TREX_STL_EXT_PATH):
+ # ../../../../external_libs
+ TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs'))
+if not os.path.exists(TREX_STL_EXT_PATH):
+ raise Exception('Could not determine path of external_libs, try setting TREX_STL_EXT_PATH variable')
+
+# the modules required
+# py-dep requires python2/python3 directories
+# arch-dep requires cel59/fedora and 32bit/64bit directories
+CLIENT_UTILS_MODULES = [ {'name': 'texttable-0.8.4'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'scapy-2.3.1', 'py-dep': True},
+ {'name': 'pyzmq-14.5.0', 'py-dep': True, 'arch-dep': True}
+ ]
+
+
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(TREX_STL_EXT_PATH, *platform_path))
+
+
+def import_module_list(modules_list):
+
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
+ for p in modules_list:
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
+
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using TREX_STL_EXT_PATH variable")
+ print("current path used: '{0}'".format(full_path))
+ exit(1)
+
+ sys.path.insert(1, full_path)
+
+
+
+
+
+import_module_list(CLIENT_UTILS_MODULES)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
new file mode 100755
index 00000000..464869aa
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_hltapi.py
@@ -0,0 +1,1595 @@
+#!/router/bin/python
+
+'''
+Supported functions/arguments/defaults:
+'''
+
+connect_kwargs = {
+ 'device': 'localhost', # ip or hostname of TRex
+ 'port_list': None, # list of ports
+ 'username': 'TRexUser',
+ 'reset': True,
+ 'break_locks': False,
+}
+
+cleanup_session_kwargs = {
+ 'maintain_lock': False, # release ports at the end or not
+ 'port_list': None,
+ 'port_handle': None,
+}
+
+traffic_config_kwargs = {
+ 'mode': None, # ( create | modify | remove | reset )
+ 'split_by_cores': 'split', # ( split | duplicate | single ) TRex extention: split = split traffic by cores, duplicate = duplicate traffic for all cores, single = run only with sinle core (not implemented yet)
+ 'load_profile': None, # TRex extention: path to filename with stream profile (stream builder parameters will be ignored, limitation: modify)
+ 'consistent_random': False, # TRex extention: False (default) = random sequence will be different every run, True = random sequence will be same every run
+ 'ignore_macs': False, # TRex extention: True = use MACs from server configuration, no MAC VM (workaround on lack of ARP)
+ 'disable_flow_stats': False, # TRex extention: True = don't use flow stats for this stream, (workaround for limitation on type of packet for flow_stats)
+ 'flow_stats_id': None, # TRex extention: uint, for use of STLHltStream, specifies id for flow stats (see stateless manual for flow_stats details)
+ 'port_handle': None,
+ 'port_handle2': None,
+ 'bidirectional': False,
+ # stream builder parameters
+ 'transmit_mode': 'continuous', # ( continuous | multi_burst | single_burst )
+ 'rate_pps': None,
+ 'rate_bps': None,
+ 'rate_percent': 10,
+ 'stream_id': None,
+ 'name': None,
+ 'direction': 0, # TRex extention: 1 = exchange sources and destinations, 0 = do nothing
+ 'pkts_per_burst': 1,
+ 'burst_loop_count': 1,
+ 'inter_burst_gap': 12,
+ 'length_mode': 'fixed', # ( auto | fixed | increment | decrement | random | imix )
+ 'l3_imix1_size': 64,
+ 'l3_imix1_ratio': 7,
+ 'l3_imix2_size': 570,
+ 'l3_imix2_ratio': 4,
+ 'l3_imix3_size': 1518,
+ 'l3_imix3_ratio': 1,
+ 'l3_imix4_size': 9230,
+ 'l3_imix4_ratio': 0,
+ #L2
+ 'frame_size': 64,
+ 'frame_size_min': 64,
+ 'frame_size_max': 64,
+ 'frame_size_step': 1,
+ 'l2_encap': 'ethernet_ii', # ( ethernet_ii | ethernet_ii_vlan )
+ 'mac_src': '00:00:01:00:00:01',
+ 'mac_dst': '00:00:00:00:00:00',
+ 'mac_src2': '00:00:01:00:00:01',
+ 'mac_dst2': '00:00:00:00:00:00',
+ 'mac_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_src_step': 1,
+ 'mac_src_count': 1,
+ 'mac_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_dst_step': 1,
+ 'mac_dst_count': 1,
+ 'mac_src2_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_src2_step': 1,
+ 'mac_src2_count': 1,
+ 'mac_dst2_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'mac_dst2_step': 1,
+ 'mac_dst2_count': 1,
+ # vlan options below can have multiple values for nested Dot1Q headers
+ 'vlan_user_priority': 1,
+ 'vlan_priority_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'vlan_priority_count': 1,
+ 'vlan_priority_step': 1,
+ 'vlan_id': 0,
+ 'vlan_id_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'vlan_id_count': 1,
+ 'vlan_id_step': 1,
+ 'vlan_cfi': 1,
+ 'vlan_protocol_tag_id': None,
+ #L3, general
+ 'l3_protocol': None, # ( ipv4 | ipv6 )
+ 'l3_length_min': 110,
+ 'l3_length_max': 238,
+ 'l3_length_step': 1,
+ #L3, IPv4
+ 'ip_precedence': 0,
+ 'ip_tos_field': 0,
+ 'ip_mbz': 0,
+ 'ip_delay': 0,
+ 'ip_throughput': 0,
+ 'ip_reliability': 0,
+ 'ip_cost': 0,
+ 'ip_reserved': 0,
+ 'ip_dscp': 0,
+ 'ip_cu': 0,
+ 'l3_length': None,
+ 'ip_id': 0,
+ 'ip_fragment_offset': 0,
+ 'ip_ttl': 64,
+ 'ip_checksum': None,
+ 'ip_src_addr': '0.0.0.0',
+ 'ip_dst_addr': '192.0.0.1',
+ 'ip_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ip_src_step': 1, # ip or number
+ 'ip_src_count': 1,
+ 'ip_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ip_dst_step': 1, # ip or number
+ 'ip_dst_count': 1,
+ #L3, IPv6
+ 'ipv6_traffic_class': 0,
+ 'ipv6_flow_label': 0,
+ 'ipv6_length': None,
+ 'ipv6_next_header': None,
+ 'ipv6_hop_limit': 64,
+ 'ipv6_src_addr': 'fe80:0:0:0:0:0:0:12',
+ 'ipv6_dst_addr': 'fe80:0:0:0:0:0:0:22',
+ 'ipv6_src_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ipv6_src_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
+ 'ipv6_src_count': 1,
+ 'ipv6_dst_mode': 'fixed', # ( fixed | increment | decrement | random )
+ 'ipv6_dst_step': 1, # we are changing only 32 lowest bits; can be ipv6 or number
+ 'ipv6_dst_count': 1,
+ #L4, TCP
+ 'l4_protocol': None, # ( tcp | udp )
+ 'tcp_src_port': 1024,
+ 'tcp_dst_port': 80,
+ 'tcp_seq_num': 1,
+ 'tcp_ack_num': 1,
+ 'tcp_data_offset': 5,
+ 'tcp_fin_flag': 0,
+ 'tcp_syn_flag': 0,
+ 'tcp_rst_flag': 0,
+ 'tcp_psh_flag': 0,
+ 'tcp_ack_flag': 0,
+ 'tcp_urg_flag': 0,
+ 'tcp_window': 4069,
+ 'tcp_checksum': None,
+ 'tcp_urgent_ptr': 0,
+ 'tcp_src_port_mode': 'increment', # ( increment | decrement | random )
+ 'tcp_src_port_step': 1,
+ 'tcp_src_port_count': 1,
+ 'tcp_dst_port_mode': 'increment', # ( increment | decrement | random )
+ 'tcp_dst_port_step': 1,
+ 'tcp_dst_port_count': 1,
+ # L4, UDP
+ 'udp_src_port': 1024,
+ 'udp_dst_port': 80,
+ 'udp_length': None,
+ 'udp_dst_port_mode': 'increment', # ( increment | decrement | random )
+ 'udp_src_port_step': 1,
+ 'udp_src_port_count': 1,
+ 'udp_src_port_mode': 'increment', # ( increment | decrement | random )
+ 'udp_dst_port_step': 1,
+ 'udp_dst_port_count': 1,
+}
+
+traffic_control_kwargs = {
+ 'action': None, # ( clear_stats | run | stop | sync_run | poll | reset )
+ 'port_handle': None,
+}
+
+traffic_stats_kwargs = {
+ 'mode': 'aggregate', # ( all | aggregate | streams )
+ 'port_handle': None,
+}
+
+
+import sys
+import os
+import socket
+import copy
+from collections import defaultdict
+
+from .api import *
+from .trex_stl_types import *
+from .utils.common import get_number
+
+class HLT_ERR(dict):
+ def __init__(self, log = 'Unknown error', **kwargs):
+ dict.__init__(self, {'status': 0})
+ if type(log) is dict:
+ dict.update(self, log)
+ elif type(log) is str and not log.startswith('[ERR]'):
+ self['log'] = '[ERR] ' + log
+ else:
+ self['log'] = log
+ dict.update(self, kwargs)
+
+class HLT_OK(dict):
+ def __init__(self, init_dict = {}, **kwargs):
+ dict.__init__(self, {'status': 1, 'log': None})
+ dict.update(self, init_dict)
+ dict.update(self, kwargs)
+
+def merge_kwargs(default_kwargs, user_kwargs):
+ kwargs = copy.deepcopy(default_kwargs)
+ for key, value in user_kwargs.items():
+ if key in kwargs:
+ kwargs[key] = value
+ elif key in ('save_to_yaml', 'save_to_pcap', 'pg_id'): # internal arguments
+ kwargs[key] = value
+ else:
+ print("Warning: provided parameter '%s' is not supported" % key)
+ return kwargs
+
+# change MACs from formats 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10} to Scapy format 01:23:45:67:89:10
+def correct_macs(kwargs):
+ list_of_mac_args = ['mac_src', 'mac_dst', 'mac_src2', 'mac_dst2']
+ list_of_mac_steps = ['mac_src_step', 'mac_dst_step', 'mac_src2_step', 'mac_dst2_step']
+ for mac_arg in list_of_mac_args + list_of_mac_steps:
+ if mac_arg in kwargs:
+ mac_value = kwargs[mac_arg]
+ if is_integer(mac_value) and mac_arg in list_of_mac_steps: # step can be number
+ continue
+ if type(mac_value) is not str: raise STLError('Argument %s should be str' % mac_arg)
+ mac_value = mac_value.replace('{', '').replace('}', '').strip().replace('-', ' ').replace(':', ' ').replace('.', ' ')
+ if mac_value[4] == ' ' and mac_value[9] == ' ':
+ mac_value = ' '.join([mac_value[0:2], mac_value[2:7], mac_value[7:12], mac_value[12:14]])
+ mac_value = ':'.join(mac_value.split())
+ try:
+ mac2str(mac_value) # verify we are ok
+ kwargs[mac_arg] = mac_value
+ except:
+ raise STLError('Incorrect MAC %s=%s, please use 01:23:45:67:89:10 or 01-23-45-67-89-10 or 0123.4567.8910 or {01 23 45 67 89 10}' % (mac_arg, kwargs[mac_arg]))
+
+def is_true(input):
+ if input in (True, 'True', 'true', 1, '1', 'enable', 'Enable', 'Yes', 'yes', 'y', 'Y', 'enabled', 'Enabled'):
+ return True
+ return False
+
+def error(err = None):
+ if not err:
+ raise Exception('Unknown exception, look traceback')
+ if type(err) is str and not err.startswith('[ERR]'):
+ err = '[ERR] ' + err
+ print(err)
+ sys.exit(1)
+
+def check_res(res):
+ if res['status'] == 0:
+ error('Encountered error:\n%s' % res['log'])
+ return res
+
+def print_brief_stats(res):
+ title_str = ' '*3
+ tx_str = 'TX:'
+ rx_str = 'RX:'
+ for port_id, stat in res.items():
+ if type(port_id) is not int:
+ continue
+ title_str += ' '*10 + 'Port%s' % port_id
+ tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
+ rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
+ print(title_str)
+ print(tx_str)
+ print(rx_str)
+
+def wait_with_progress(seconds):
+ for i in range(0, seconds):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ print('')
+
+# dict of streams per port
+# hlt_history = False: holds list of stream_id per port
+# hlt_history = True: act as dictionary (per port) stream_id -> hlt arguments used for build
+class CStreamsPerPort(defaultdict):
+ def __init__(self, hlt_history = False):
+ self.hlt_history = hlt_history
+ if self.hlt_history:
+ defaultdict.__init__(self, dict)
+ else:
+ defaultdict.__init__(self, list)
+
+ def get_stream_list(self, ports_list = None):
+ if self.hlt_history:
+ if ports_list is None:
+ ports_list = self.keys()
+ elif not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ ret = {}
+ for port in ports_list:
+ ret[port] = self[port].keys()
+ return ret
+ else:
+ return self
+
+ # add to stream_id list per port, no HLT args, res = HLT result
+ def add_streams_from_res(self, res):
+ if self.hlt_history: raise STLError('CStreamsPerPort: this object is not meant for HLT history, try init with hlt_history = False')
+ if not isinstance(res, dict): raise STLError('CStreamsPerPort: res should be dict')
+ if res.get('status') != 1: raise STLError('CStreamsPerPort: res has status %s' % res.get('status'))
+ res_streams = res.get('stream_id')
+ if not isinstance(res_streams, dict):
+ raise STLError('CStreamsPerPort: stream_id in res should be dict')
+ for port, port_stream_ids in res_streams.items():
+ if type(port_stream_ids) is not list:
+ port_stream_ids = [port_stream_ids]
+ self[port].extend(port_stream_ids)
+
+ # save HLT args to modify streams later
+ def save_stream_args(self, ports_list, stream_id, stream_hlt_args):
+ if stream_id is None: raise STLError('CStreamsPerPort: no stream_id in stream')
+ if stream_hlt_args.get('load_profile'): return # can't modify profiles, don't save
+ if not self.hlt_history: raise STLError('CStreamsPerPort: this object works only with HLT history, try init with hlt_history = True')
+ if not is_integer(stream_id): raise STLError('CStreamsPerPort: stream_id should be number')
+ if not isinstance(stream_hlt_args, dict): raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
+ if not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ for port in ports_list:
+ if stream_id not in self[port]:
+ self[port][stream_id] = {}
+ self[port][stream_id].update(stream_hlt_args)
+
+ def remove_stream(self, ports_list, stream_id):
+ if not isinstance(ports_list, list):
+ ports_list = [ports_list]
+ if not isinstance(stream_id, dict):
+ raise STLError('CStreamsPerPort: stream_hlt_args should be dict')
+ for port in ports_list:
+ if port not in self:
+ raise STLError('CStreamsPerPort: port %s not defined' % port)
+ if stream_id not in self[port]:
+ raise STLError('CStreamsPerPort: stream_id %s not found at port %s' % (port, stream_id))
+ if self.hlt_history:
+ del self[port][stream_id]
+ else:
+ self[port].pop(stream_id)
+
+class CTRexHltApi(object):
+
+ def __init__(self, verbose = 0):
+ self.trex_client = None
+ self.verbose = verbose
+ self._last_pg_id = 0 # pg_id acts as stream_handle
+ self._streams_history = {} # streams in format of HLT arguments for modify later
+ self._native_handle_by_pg_id = {} # pg_id -> native handle + port
+ self._pg_id_by_id = {} # stream_id -> pg_id
+ self._pg_id_by_name = {} # name -> pg_id
+
+###########################
+# Session functions #
+###########################
+
+ def connect(self, **user_kwargs):
+ kwargs = merge_kwargs(connect_kwargs, user_kwargs)
+ device = kwargs['device']
+ try:
+ device = socket.gethostbyname(device) # work with ip
+ except: # give it another try
+ try:
+ device = socket.gethostbyname(device)
+ except Exception as e:
+ return HLT_ERR('Could not translate hostname "%s" to IP: %s' % (device, e))
+
+ try:
+ self.trex_client = STLClient(kwargs['username'], device, verbose_level = self.verbose)
+ except Exception as e:
+ return HLT_ERR('Could not init stateless client %s: %s' % (device, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ try:
+ self.trex_client.connect()
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Could not connect to device %s: %s' % (device, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ # connection successfully created with server, try acquiring ports of TRex
+ try:
+ port_list = self._parse_port_list(kwargs['port_list'])
+ self.trex_client.acquire(ports = port_list, force = kwargs['break_locks'])
+ for port in port_list:
+ self._native_handle_by_pg_id[port] = {}
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Could not acquire ports %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ # arrived here, all desired ports were successfully acquired
+ if kwargs['reset']:
+ # remove all port traffic configuration from TRex
+ try:
+ self.trex_client.stop(ports = port_list)
+ self.trex_client.reset(ports = port_list)
+ except Exception as e:
+ self.trex_client = None
+ return HLT_ERR('Error in reset traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ self._streams_history = CStreamsPerPort(hlt_history = True)
+ return HLT_OK(port_handle = dict([(port_id, port_id) for port_id in port_list]))
+
+ def cleanup_session(self, **user_kwargs):
+ kwargs = merge_kwargs(cleanup_session_kwargs, user_kwargs)
+ if not kwargs['maintain_lock']:
+ # release taken ports
+ port_list = kwargs['port_list'] or kwargs['port_handle'] or 'all'
+ try:
+ if port_list == 'all':
+ port_list = self.trex_client.get_acquired_ports()
+ else:
+ port_list = self._parse_port_list(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to determine which ports to release: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ try:
+ self.trex_client.stop(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to stop traffic %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.remove_all_streams(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to remove all streams %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.release(port_list)
+ except Exception as e:
+ return HLT_ERR('Unable to release ports %s: %s' % (port_list, e if isinstance(e, STLError) else traceback.format_exc()))
+ try:
+ self.trex_client.disconnect(stop_traffic = False, release_ports = False)
+ except Exception as e:
+ return HLT_ERR('Error disconnecting: %s' % e)
+ self.trex_client = None
+ return HLT_OK()
+
+ def interface_config(self, port_handle, mode='config'):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ ALLOWED_MODES = ['config', 'modify', 'destroy']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
+ # pass this function for now...
+ return HLT_ERR('interface_config not implemented yet')
+
+
+###########################
+# Traffic functions #
+###########################
+
+ def traffic_config(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ try:
+ correct_macs(user_kwargs)
+ except Exception as e:
+ return HLT_ERR(e if isinstance(e, STLError) else traceback.format_exc())
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ stream_id = kwargs['stream_id']
+ mode = kwargs['mode']
+ pg_id = kwargs['flow_stats_id']
+ port_handle = port_list = self._parse_port_list(kwargs['port_handle'])
+
+ ALLOWED_MODES = ['create', 'modify', 'remove', 'enable', 'disable', 'reset']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR('Mode must be one of the following values: %s' % ALLOWED_MODES)
+
+ if mode == 'reset':
+ try:
+ self.trex_client.remove_all_streams(port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ return HLT_OK()
+ except Exception as e:
+ return HLT_ERR('Could not reset streams at ports %s: %s' % (port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ if mode == 'remove':
+ if stream_id is None:
+ return HLT_ERR('Please specify stream_id to remove.')
+ if stream_id == 'all':
+ try:
+ self.trex_client.remove_all_streams(port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ except Exception as e:
+ return HLT_ERR('Could not remove all streams at ports %s: %s' % (port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+ else:
+ try:
+ self._remove_stream(stream_id, port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not remove streams with specified by %s, error: %s' % (stream_id, e if isinstance(e, STLError) else traceback.format_exc()))
+ return HLT_OK()
+
+ #if mode == 'enable':
+ # stream_id = kwargs.get('stream_id')
+ # if stream_id is None:
+ # return HLT_ERR('Please specify stream_id to enable.')
+ # if stream_id not in self._streams_history:
+ # return HLT_ERR('This stream_id (%s) was not used before, please create new.' % stream_id)
+ # self._streams_history[stream_id].update(kwargs) # <- the modification
+
+ if mode == 'modify': # we remove stream and create new one with same stream_id
+ pg_id = kwargs.get('stream_id')
+ if pg_id is None:
+ return HLT_ERR('Please specify stream_id to modify.')
+
+ if len(port_handle) > 1:
+ for port in port_handle:
+ try:
+ user_kwargs['port_handle'] = port
+ res = self.traffic_config(**user_kwargs)
+ if res['status'] == 0:
+ return HLT_ERR('Error during modify of stream: %s' % res['log'])
+ except Exception as e:
+ return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+ return HLT_OK()
+ else:
+ if type(port_handle) is list:
+ port = port_handle[0]
+ else:
+ port = port_handle
+ if port not in self._streams_history:
+ return HLT_ERR('Port %s was not used/acquired' % port)
+ if pg_id not in self._streams_history[port]:
+ return HLT_ERR('This stream_id (%s) was not used before at port %s, please create new.' % (stream_id, port))
+ new_kwargs = {}
+ new_kwargs.update(self._streams_history[port][pg_id])
+ new_kwargs.update(user_kwargs)
+ user_kwargs = new_kwargs
+ try:
+ self._remove_stream(pg_id, [port])
+ except Exception as e:
+ return HLT_ERR('Could not remove stream(s) %s from port(s) %s: %s' % (stream_id, port_handle, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ if mode == 'create' or mode == 'modify':
+ # create a new stream with desired attributes, starting by creating packet
+ if is_true(kwargs['bidirectional']): # two streams with opposite directions
+ del user_kwargs['bidirectional']
+ stream_per_port = {}
+ save_to_yaml = user_kwargs.get('save_to_yaml')
+ bidirect_err = 'When using bidirectional flag, '
+ if len(port_handle) != 1:
+ return HLT_ERR(bidirect_err + 'port_handle should be single port handle.')
+ port_handle = port_handle[0]
+ port_handle2 = kwargs['port_handle2']
+ if (type(port_handle2) is list and len(port_handle2) > 1) or port_handle2 is None:
+ return HLT_ERR(bidirect_err + 'port_handle2 should be single port handle.')
+ try:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi1.yaml')
+ res1 = self.traffic_config(**user_kwargs)
+ if res1['status'] == 0:
+ raise STLError('Could not create bidirectional stream 1: %s' % res1['log'])
+ stream_per_port[port_handle] = res1['stream_id']
+ kwargs['direction'] = 1 - kwargs['direction'] # not
+ correct_direction(user_kwargs, kwargs)
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_bi2.yaml')
+ user_kwargs['port_handle'] = port_handle2
+ res2 = self.traffic_config(**user_kwargs)
+ if res2['status'] == 0:
+ raise STLError('Could not create bidirectional stream 2: %s' % res2['log'])
+ stream_per_port[port_handle2] = res2['stream_id']
+ except Exception as e:
+ return HLT_ERR('Could not generate bidirectional traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ if mode == 'create':
+ return HLT_OK(stream_id = stream_per_port)
+ else:
+ return HLT_OK()
+
+ try:
+ if not pg_id:
+ pg_id = self._get_available_pg_id()
+ if kwargs['load_profile']:
+ stream_obj = STLProfile.load_py(kwargs['load_profile'], direction = kwargs['direction'])
+ else:
+ user_kwargs['pg_id'] = pg_id
+ stream_obj = STLHltStream(**user_kwargs)
+ except Exception as e:
+ return HLT_ERR('Could not create stream: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ # try adding the stream per ports
+ try:
+ for port in port_handle:
+ stream_id_arr = self.trex_client.add_streams(streams = stream_obj,
+ ports = port)
+ self._streams_history.save_stream_args(port, pg_id, user_kwargs)
+ if type(stream_id_arr) is not list:
+ stream_id_arr = [stream_id_arr]
+ self._native_handle_by_pg_id[port][pg_id] = stream_id_arr
+ except Exception as e:
+ return HLT_ERR('Could not add stream to ports: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ if mode == 'create':
+ return HLT_OK(stream_id = pg_id)
+ else:
+ return HLT_OK()
+
+ return HLT_ERR('Got to the end of traffic_config, mode not implemented or forgot "return" somewhere.')
+
+ def traffic_control(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ kwargs = merge_kwargs(traffic_control_kwargs, user_kwargs)
+ action = kwargs['action']
+ port_handle = kwargs['port_handle']
+ ALLOWED_ACTIONS = ['clear_stats', 'run', 'stop', 'sync_run', 'poll', 'reset']
+ if action not in ALLOWED_ACTIONS:
+ return HLT_ERR('Action must be one of the following values: {actions}'.format(actions=ALLOWED_ACTIONS))
+
+ if action == 'run':
+ try:
+ self.trex_client.start(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not start traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'sync_run': # (clear_stats + run)
+ try:
+ self.trex_client.clear_stats(ports = port_handle)
+ self.trex_client.start(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Unable to do sync_run: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'stop':
+ try:
+ self.trex_client.stop(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not stop traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'reset':
+ try:
+ self.trex_client.reset(ports = port_handle)
+ for port in port_handle:
+ if port in self._streams_history:
+ del self._streams_history[port]
+ except Exception as e:
+ return HLT_ERR('Could not reset traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action == 'clear_stats':
+ try:
+ self.trex_client.clear_stats(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Could not clear stats: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ elif action != 'poll': # at poll just return 'stopped' status
+ return HLT_ERR("Action '%s' is not supported yet on TRex" % action)
+
+ try:
+ is_traffic_active = self.trex_client.is_traffic_active(ports = port_handle)
+ except Exception as e:
+ return HLT_ERR('Unable to determine ports status: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ return HLT_OK(stopped = not is_traffic_active)
+
+ def traffic_stats(self, **user_kwargs):
+ if not self.trex_client:
+ return HLT_ERR('Connect first')
+ kwargs = merge_kwargs(traffic_stats_kwargs, user_kwargs)
+ mode = kwargs['mode']
+ port_handle = kwargs['port_handle']
+ if type(port_handle) is not list:
+ port_handle = [port_handle]
+ ALLOWED_MODES = ['aggregate', 'streams', 'all']
+ if mode not in ALLOWED_MODES:
+ return HLT_ERR("'mode' must be one of the following values: %s" % ALLOWED_MODES)
+ hlt_stats_dict = dict([(port, {}) for port in port_handle])
+ try:
+ stats = self.trex_client.get_stats(port_handle)
+ if mode in ('all', 'aggregate'):
+ for port_id in port_handle:
+ port_stats = stats[port_id]
+ if is_integer(port_id):
+ hlt_stats_dict[port_id]['aggregate'] = {
+ 'tx': {
+ 'pkt_bit_rate': port_stats.get('tx_bps', 0),
+ 'pkt_byte_count': port_stats.get('obytes', 0),
+ 'pkt_count': port_stats.get('opackets', 0),
+ 'pkt_rate': port_stats.get('tx_pps', 0),
+ 'total_pkt_bytes': port_stats.get('obytes', 0),
+ 'total_pkt_rate': port_stats.get('tx_pps', 0),
+ 'total_pkts': port_stats.get('opackets', 0),
+ },
+ 'rx': {
+ 'pkt_bit_rate': port_stats.get('rx_bps', 0),
+ 'pkt_byte_count': port_stats.get('ibytes', 0),
+ 'pkt_count': port_stats.get('ipackets', 0),
+ 'pkt_rate': port_stats.get('rx_pps', 0),
+ 'total_pkt_bytes': port_stats.get('ibytes', 0),
+ 'total_pkt_rate': port_stats.get('rx_pps', 0),
+ 'total_pkts': port_stats.get('ipackets', 0),
+ }
+ }
+ if mode in ('all', 'streams'):
+ for pg_id, pg_stats in stats['flow_stats'].items():
+ for port_id in port_handle:
+ if 'stream' not in hlt_stats_dict[port_id]:
+ hlt_stats_dict[port_id]['stream'] = {}
+ hlt_stats_dict[port_id]['stream'][pg_id] = {
+ 'tx': {
+ 'total_pkts': pg_stats['tx_pkts'].get(port_id, 0),
+ 'total_pkt_bytes': pg_stats['tx_bytes'].get(port_id, 0),
+ 'total_pkts_bytes': pg_stats['tx_bytes'].get(port_id, 0),
+ 'total_pkt_bit_rate': pg_stats['tx_bps'].get(port_id, 0),
+ 'total_pkt_rate': pg_stats['tx_pps'].get(port_id, 0),
+ 'line_rate_percentage': pg_stats['tx_line_util'].get(port_id, 0),
+ },
+ 'rx': {
+ 'total_pkts': pg_stats['rx_pkts'].get(port_id, 0),
+ 'total_pkt_bytes': pg_stats['rx_bytes'].get(port_id, 0),
+ 'total_pkts_bytes': pg_stats['rx_bytes'].get(port_id, 0),
+ 'total_pkt_bit_rate': pg_stats['rx_bps'].get(port_id, 0),
+ 'total_pkt_rate': pg_stats['rx_pps'].get(port_id, 0),
+ 'line_rate_percentage': pg_stats['rx_line_util'].get(port_id, 0),
+ },
+ }
+ except Exception as e:
+ return HLT_ERR('Could not retrieve stats: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+ return HLT_OK(hlt_stats_dict)
+
+ # timeout = maximal time to wait
+ def wait_on_traffic(self, port_handle = None, timeout = 60):
+ try:
+ self.trex_client.wait_on_traffic(port_handle, timeout)
+ except Exception as e:
+ return HLT_ERR('Unable to run wait_on_traffic: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+###########################
+# Private functions #
+###########################
+
+ def _get_available_pg_id(self):
+ pg_id = self._last_pg_id
+ used_pg_ids = self.trex_client.get_stats()['flow_stats'].keys()
+ for i in range(65535):
+ pg_id += 1
+ if pg_id not in used_pg_ids:
+ self._last_pg_id = pg_id
+ return pg_id
+ if pg_id == 65535:
+ pg_id = 0
+ raise STLError('Could not find free pg_id in range [1, 65535].')
+
+ # remove streams from given port(s).
+ # stream_id can be:
+ # * int - exact stream_id value
+ # * list - list of stream_id values or strings (see below)
+ # * string - exact stream_id value, mix of ranges/list separated by comma: 2, 4-13
+ def _remove_stream(self, stream_id, port_handle):
+ stream_num = get_number(stream_id)
+ if stream_num is not None: # exact value of int or str
+ for port in port_handle:
+ native_handles = self._native_handle_by_pg_id[port][stream_num]
+ self.trex_client.remove_streams(native_handles, port) # actual remove
+ del self._native_handle_by_pg_id[port][stream_num]
+ del self._streams_history[port][stream_num]
+ return
+ if type(stream_id) is list: # list of values/strings
+ for each_stream_id in stream_id:
+ self._remove_stream(each_stream_id, port_handle) # recurse
+ return
+ if type(stream_id) is str: # range or list in string
+ if ',' in stream_id:
+ for each_stream_id_element in stream_id.split(','):
+ self._remove_stream(each_stream_id_element, port_handle) # recurse
+ return
+ if '-' in stream_id:
+ stream_id_min, stream_id_max = stream_id.split('-', 1)
+ stream_id_min = get_number(stream_id_min)
+ stream_id_max = get_number(stream_id_max)
+ if stream_id_min is None:
+ raise STLError('_remove_stream: wrong range param %s' % stream_id_min)
+ if stream_id_max is None:
+ raise STLError('_remove_stream: wrong range param %s' % stream_id_max)
+ if stream_id_max < stream_id_min:
+ raise STLError('_remove_stream: right range param is smaller than left one: %s-%s' % (stream_id_min, stream_id_max))
+ for each_stream_id in xrange(stream_id_min, stream_id_max + 1):
+ self._remove_stream(each_stream_id, port_handle) # recurse
+ return
+ raise STLError('_remove_stream: wrong stream_id param %s' % stream_id)
+
+ @staticmethod
+ def _parse_port_list(port_list):
+ if type(port_list) is str:
+ return [int(port) for port in port_list.strip().split()]
+ elif type(port_list) is list:
+ return [int(port) for port in port_list]
+ elif is_integer(port_list):
+ return [int(port_list)]
+ raise STLError('port_list should be string with ports, list, or single number')
+
+def STLHltStream(**user_kwargs):
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ # verify rate is given by at most one arg
+ rate_args = set(['rate_pps', 'rate_bps', 'rate_percent'])
+ intersect_rate_args = list(rate_args & set(user_kwargs.keys()))
+ if len(intersect_rate_args) > 1:
+ raise STLError('More than one rate argument specified: %s' % intersect_rate_args)
+ try:
+ rate_key = intersect_rate_args[0]
+ except IndexError:
+ rate_key = 'rate_percent'
+ if rate_key is 'rate_percent' and float(kwargs['rate_percent']) > 100:
+ raise STLError('rate_percent should not exceed 100%')
+
+ if kwargs['length_mode'] == 'imix': # several streams with given length
+ streams_arr = []
+ user_kwargs['length_mode'] = 'fixed'
+ if kwargs['l3_imix1_size'] < 32 or kwargs['l3_imix2_size'] < 32 or kwargs['l3_imix3_size'] < 32 or kwargs['l3_imix4_size'] < 32:
+ raise STLError('l3_imix*_size should be at least 32')
+ save_to_yaml = kwargs.get('save_to_yaml')
+ total_rate = float(kwargs[rate_key])
+ if rate_key == 'rate_pps': # ratio in packets as is
+ imix1_weight = kwargs['l3_imix1_ratio']
+ imix2_weight = kwargs['l3_imix2_ratio']
+ imix3_weight = kwargs['l3_imix3_ratio']
+ imix4_weight = kwargs['l3_imix4_ratio']
+ if rate_key == 'rate_bps': # ratio dependent on L2 size too
+ imix1_weight = kwargs['l3_imix1_ratio'] * kwargs['l3_imix1_size']
+ imix2_weight = kwargs['l3_imix2_ratio'] * kwargs['l3_imix2_size']
+ imix3_weight = kwargs['l3_imix3_ratio'] * kwargs['l3_imix3_size']
+ imix4_weight = kwargs['l3_imix4_ratio'] * kwargs['l3_imix4_size']
+ elif rate_key == 'rate_percent': # ratio dependent on L1 size too
+ imix1_weight = kwargs['l3_imix1_ratio'] * (kwargs['l3_imix1_size'] + 20)
+ imix2_weight = kwargs['l3_imix2_ratio'] * (kwargs['l3_imix2_size'] + 20)
+ imix3_weight = kwargs['l3_imix3_ratio'] * (kwargs['l3_imix3_size'] + 20)
+ imix4_weight = kwargs['l3_imix4_ratio'] * (kwargs['l3_imix4_size'] + 20)
+ total_weight = float(imix1_weight + imix2_weight + imix3_weight + imix4_weight)
+ if total_weight == 0:
+ raise STLError('Used length_mode imix, but all the ratios are 0')
+ if kwargs['l3_imix1_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix1.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix1_size']
+ user_kwargs[rate_key] = total_rate * imix1_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix2_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix2.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix2_size']
+ user_kwargs[rate_key] = total_rate * imix2_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix3_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix3.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix3_size']
+ user_kwargs[rate_key] = total_rate * imix3_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ if kwargs['l3_imix4_ratio'] > 0:
+ if save_to_yaml and type(save_to_yaml) is str:
+ user_kwargs['save_to_yaml'] = save_to_yaml.replace('.yaml', '_imix4.yaml')
+ user_kwargs['frame_size'] = kwargs['l3_imix4_size']
+ user_kwargs[rate_key] = total_rate * imix4_weight / total_weight
+ streams_arr.append(STLHltStream(**user_kwargs))
+ return streams_arr
+
+ # packet generation
+ packet = generate_packet(**user_kwargs)
+
+ # stream generation
+ try:
+ rate_types_dict = {'rate_pps': 'pps', 'rate_bps': 'bps_L2', 'rate_percent': 'percentage'}
+ rate_stateless = {rate_types_dict[rate_key]: float(kwargs[rate_key])}
+ transmit_mode = kwargs['transmit_mode']
+ pkts_per_burst = kwargs['pkts_per_burst']
+ if transmit_mode == 'continuous':
+ transmit_mode_obj = STLTXCont(**rate_stateless)
+ elif transmit_mode == 'single_burst':
+ transmit_mode_obj = STLTXSingleBurst(total_pkts = pkts_per_burst, **rate_stateless)
+ elif transmit_mode == 'multi_burst':
+ transmit_mode_obj = STLTXMultiBurst(total_pkts = pkts_per_burst, count = int(kwargs['burst_loop_count']),
+ ibg = kwargs['inter_burst_gap'], **rate_stateless)
+ else:
+ raise STLError('transmit_mode %s not supported/implemented')
+ except Exception as e:
+ raise STLError('Could not create transmit_mode object %s: %s' % (transmit_mode, e if isinstance(e, STLError) else traceback.format_exc()))
+
+ try:
+ if kwargs['l3_protocol'] == 'ipv4' and not kwargs['disable_flow_stats']:
+ pg_id = kwargs.get('pg_id', kwargs.get('flow_stats_id'))
+ else:
+ pg_id = None
+ stream = STLStream(packet = packet,
+ random_seed = 1 if is_true(kwargs['consistent_random']) else 0,
+ #enabled = True,
+ #self_start = True,
+ flow_stats = STLFlowStats(pg_id) if pg_id else None,
+ mode = transmit_mode_obj,
+ )
+ except Exception as e:
+ raise STLError('Could not create stream: %s' % e if isinstance(e, STLError) else traceback.format_exc())
+
+ debug_filename = kwargs.get('save_to_yaml')
+ if type(debug_filename) is str:
+ print('saving to %s' % debug_filename)
+ stream.dump_to_yaml(debug_filename)
+ return stream
+
+packet_cache = LRU_cache(maxlen = 20)
+
+def generate_packet(**user_kwargs):
+ correct_macs(user_kwargs)
+ if repr(user_kwargs) in packet_cache:
+ return packet_cache[repr(user_kwargs)]
+ kwargs = merge_kwargs(traffic_config_kwargs, user_kwargs)
+ correct_sizes(kwargs) # we are producing the packet - 4 bytes fcs
+ correct_direction(kwargs, kwargs)
+
+ vm_cmds = []
+ vm_variables_cache = {} # we will keep in cache same variables (inc/dec, var size in bytes, number of steps, step)
+ fix_ipv4_checksum = False
+
+ ### L2 ###
+ if kwargs['l2_encap'] in ('ethernet_ii', 'ethernet_ii_vlan'):
+ #fields_desc = [ MACField("dst","00:00:00:01:00:00"),
+ # MACField("src","00:00:00:02:00:00"),
+ # XShortEnumField("type", 0x9000, ETHER_TYPES) ]
+ if kwargs['ignore_macs']: # workaround for lack of ARP
+ kwargs['mac_src'] = None
+ kwargs['mac_dst'] = None
+ kwargs['mac_src_mode'] = 'fixed'
+ kwargs['mac_dst_mode'] = 'fixed'
+ ethernet_kwargs = {}
+ if kwargs['mac_src']:
+ ethernet_kwargs['src'] = kwargs['mac_src']
+ if kwargs['mac_dst']:
+ ethernet_kwargs['dst'] = kwargs['mac_dst']
+ l2_layer = Ether(**ethernet_kwargs)
+
+ # Eth VM, change only 32 lsb
+ if kwargs['mac_src_mode'] != 'fixed':
+ count = int(kwargs['mac_src_count']) - 1
+ if count < 0:
+ raise STLError('mac_src_count has to be at least 1')
+ if count > 0 or kwargs['mac_src_mode'] == 'random':
+ mac_src = ipv4_str_to_num(mac2str(kwargs['mac_src'])[2:]) # take only 32 lsb
+
+ step = kwargs['mac_src_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_src_step has to be at least 1')
+
+ if kwargs['mac_src_mode'] == 'increment':
+ add_val = mac_src - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_src_mode'] == 'decrement':
+ add_val = mac_src - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'mac_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('mac_src_mode %s is not supported' % kwargs['mac_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.src', offset_fixup = 2, add_val = add_val))
+
+ if kwargs['mac_dst_mode'] != 'fixed':
+ count = int(kwargs['mac_dst_count']) - 1
+ if count < 0:
+ raise STLError('mac_dst_count has to be at least 1')
+ if count > 0 or kwargs['mac_dst_mode'] == 'random':
+ mac_dst = ipv4_str_to_num(mac2str(kwargs['mac_dst'])[2:]) # take only 32 lsb
+ step = kwargs['mac_dst_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(mac2str(step)[2:]) # take only 32 lsb
+
+ if step < 1:
+ raise STLError('mac_dst_step has to be at least 1')
+
+ if kwargs['mac_dst_mode'] == 'increment':
+ add_val = mac_dst - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_dst_mode'] == 'decrement':
+ add_val = mac_dst - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['mac_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'mac_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('mac_dst_mode %s is not supported' % kwargs['mac_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'Ethernet.dst', offset_fixup = 2, add_val = add_val))
+
+ if kwargs['l2_encap'] == 'ethernet_ii_vlan' or (kwargs['l2_encap'] == 'ethernet_ii' and vlan_in_args(user_kwargs)):
+ #fields_desc = [ BitField("prio", 0, 3),
+ # BitField("id", 0, 1),
+ # BitField("vlan", 1, 12),
+ # XShortEnumField("type", 0x0000, ETHER_TYPES) ]
+ for i, vlan_kwargs in enumerate(split_vlan_args(kwargs)):
+ vlan_id = int(vlan_kwargs['vlan_id'])
+ dot1q_kwargs = {'prio': int(vlan_kwargs['vlan_user_priority']),
+ 'vlan': vlan_id,
+ 'id': int(vlan_kwargs['vlan_cfi'])}
+ vlan_protocol_tag_id = vlan_kwargs['vlan_protocol_tag_id']
+ if vlan_protocol_tag_id is not None:
+ if type(vlan_protocol_tag_id) is str:
+ vlan_protocol_tag_id = int(vlan_protocol_tag_id, 16)
+ dot1q_kwargs['type'] = vlan_protocol_tag_id
+ l2_layer /= Dot1Q(**dot1q_kwargs)
+
+ # vlan VM
+ vlan_id_mode = vlan_kwargs['vlan_id_mode']
+ if vlan_id_mode != 'fixed':
+ count = int(vlan_kwargs['vlan_id_count']) - 1
+ if count < 0:
+ raise STLError('vlan_id_count has to be at least 1')
+ if count > 0 or vlan_id_mode == 'random':
+ var_name = 'vlan_id%s' % i
+ step = int(vlan_kwargs['vlan_id_step'])
+ if step < 1:
+ raise STLError('vlan_id_step has to be at least 1')
+ if vlan_id_mode == 'increment':
+ add_val = vlan_id - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif vlan_id_mode == 'decrement':
+ add_val = vlan_id - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif vlan_id_mode == 'random':
+ add_val = 0
+ var_name = 'vlan_id_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('vlan_id_mode %s is not supported' % vlan_id_mode)
+ vm_cmds.append(STLVmWrMaskFlowVar(fv_name = var_name, pkt_offset = '802|1Q:%s.vlan' % i,
+ pkt_cast_size = 2, mask = 0xfff, add_value = add_val))
+ else:
+ raise NotImplementedError("l2_encap does not support the desired encapsulation '%s'" % kwargs['l2_encap'])
+ base_pkt = l2_layer
+
+ ### L3 ###
+ if kwargs['l3_protocol'] is None:
+ l3_layer = None
+ elif kwargs['l3_protocol'] == 'ipv4':
+ #fields_desc = [ BitField("version" , 4 , 4),
+ # BitField("ihl", None, 4),
+ # XByteField("tos", 0),
+ # ShortField("len", None),
+ # ShortField("id", 1),
+ # FlagsField("flags", 0, 3, ["MF","DF","evil"]),
+ # BitField("frag", 0, 13),
+ # ByteField("ttl", 64),
+ # ByteEnumField("proto", 0, IP_PROTOS),
+ # XShortField("chksum", None),
+ # Emph(IPField("src", "16.0.0.1")),
+ # Emph(IPField("dst", "48.0.0.1")),
+ # PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
+ ip_tos = get_TOS(user_kwargs, kwargs)
+ if ip_tos < 0 or ip_tos > 255:
+ raise STLError('TOS %s is not in range 0-255' % ip_tos)
+ l3_layer = IP(tos = ip_tos,
+ #len = kwargs['l3_length'], don't let user create corrupt packets
+ id = kwargs['ip_id'],
+ frag = kwargs['ip_fragment_offset'],
+ ttl = kwargs['ip_ttl'],
+ chksum = kwargs['ip_checksum'],
+ src = kwargs['ip_src_addr'],
+ dst = kwargs['ip_dst_addr'],
+ )
+ # IPv4 VM
+ if kwargs['ip_src_mode'] != 'fixed':
+ count = int(kwargs['ip_src_count']) - 1
+ if count < 0:
+ raise STLError('ip_src_count has to be at least 1')
+ if count > 0 or kwargs['ip_src_mode'] == 'random':
+ fix_ipv4_checksum = True
+ ip_src_addr = kwargs['ip_src_addr']
+ if type(ip_src_addr) is str:
+ ip_src_addr = ipv4_str_to_num(is_valid_ipv4(ip_src_addr))
+ step = kwargs['ip_src_step']
+ if type(step) is str:
+ step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_src_step has to be at least 1')
+
+ if kwargs['ip_src_mode'] == 'increment':
+ add_val = ip_src_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_src_mode'] == 'decrement':
+ add_val = ip_src_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'ip_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ip_src_mode %s is not supported' % kwargs['ip_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.src', add_val = add_val))
+
+ if kwargs['ip_dst_mode'] != 'fixed':
+ count = int(kwargs['ip_dst_count']) - 1
+ if count < 0:
+ raise STLError('ip_dst_count has to be at least 1')
+ if count > 0 or kwargs['ip_dst_mode'] == 'random':
+ fix_ipv4_checksum = True
+ ip_dst_addr = kwargs['ip_dst_addr']
+ if type(ip_dst_addr) is str:
+ ip_dst_addr = ipv4_str_to_num(is_valid_ipv4(ip_dst_addr))
+ step = kwargs['ip_dst_step']
+
+ if type(step) is str:
+ step = ipv4_str_to_num(is_valid_ipv4(step))
+
+ if step < 1:
+ raise STLError('ip_dst_step has to be at least 1')
+
+ if kwargs['ip_dst_mode'] == 'increment':
+ add_val = ip_dst_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_dst_mode'] == 'decrement':
+ add_val = ip_dst_addr - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ip_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'ip_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ip_dst_mode %s is not supported' % kwargs['ip_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IP.dst', add_val = add_val))
+
+ elif kwargs['l3_protocol'] == 'ipv6':
+ #fields_desc = [ BitField("version" , 6 , 4),
+ # BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
+ # BitField("fl", 0, 20),
+ # ShortField("plen", None),
+ # ByteEnumField("nh", 59, ipv6nh),
+ # ByteField("hlim", 64),
+ # IP6Field("dst", "::2"),
+ # #SourceIP6Field("src", "dst"), # dst is for src @ selection
+ # IP6Field("src", "::1") ]
+ ipv6_kwargs = {'tc': kwargs['ipv6_traffic_class'],
+ 'fl': kwargs['ipv6_flow_label'],
+ 'plen': kwargs['ipv6_length'],
+ 'hlim': kwargs['ipv6_hop_limit'],
+ 'src': kwargs['ipv6_src_addr'],
+ 'dst': kwargs['ipv6_dst_addr']}
+ if kwargs['ipv6_next_header'] is not None:
+ ipv6_kwargs['nh'] = kwargs['ipv6_next_header']
+ l3_layer = IPv6(**ipv6_kwargs)
+
+ # IPv6 VM, change only 32 lsb
+ if kwargs['ipv6_src_mode'] != 'fixed':
+ count = int(kwargs['ipv6_src_count']) - 1
+ if count < 0:
+ raise STLError('ipv6_src_count has to be at least 1')
+ if count > 0 or kwargs['ipv6_src_mode'] == 'random':
+ ipv6_src_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_src_addr'])[-4:])
+ step = kwargs['ipv6_src_step']
+
+ if type(step) is str: # convert ipv6 step to number
+ step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_src_step has to be at least 1')
+
+ if kwargs['ipv6_src_mode'] == 'increment':
+ add_val = ipv6_src_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_src_mode'] == 'decrement':
+ add_val = ipv6_src_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_src_mode'] == 'random':
+ add_val = 0
+ var_name = 'ipv6_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ipv6_src_mode %s is not supported' % kwargs['ipv6_src_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.src', offset_fixup = 12, add_val = add_val))
+
+ if kwargs['ipv6_dst_mode'] != 'fixed':
+ count = int(kwargs['ipv6_dst_count']) - 1
+ if count < 0:
+ raise STLError('ipv6_dst_count has to be at least 1')
+ if count > 0 or kwargs['ipv6_dst_mode'] == 'random':
+ ipv6_dst_addr_num = ipv4_str_to_num(is_valid_ipv6(kwargs['ipv6_dst_addr'])[-4:])
+ step = kwargs['ipv6_dst_step']
+
+ if type(step) is str: # convert ipv6 step to number
+ step = ipv4_str_to_num(is_valid_ipv6(step)[-4:])
+
+ if step < 1:
+ raise STLError('ipv6_dst_step has to be at least 1')
+
+ if kwargs['ipv6_dst_mode'] == 'increment':
+ add_val = ipv6_dst_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('inc', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'inc', step = step,
+ min_value = 0x7fffffff,
+ max_value = 0x7fffffff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_dst_mode'] == 'decrement':
+ add_val = ipv6_dst_addr_num - 0x7fffffff
+ var_name = '%s_%s_%s_%s' % ('dec', 4, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'dec', step = step,
+ min_value = 0x7fffffff - count * step,
+ max_value = 0x7fffffff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['ipv6_dst_mode'] == 'random':
+ add_val = 0
+ var_name = 'ipv6_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 4, op = 'random', max_value = 0xffffffff))
+ else:
+ raise STLError('ipv6_dst_mode %s is not supported' % kwargs['ipv6_dst_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'IPv6.dst', offset_fixup = 12, add_val = add_val))
+
+ elif kwargs['l3_protocol'] is not None:
+ raise NotImplementedError("l3_protocol '%s' is not supported by TRex yet." % kwargs['l3_protocol'])
+ if l3_layer is not None:
+ base_pkt /= l3_layer
+
+ ### L4 ###
+ l4_layer = None
+ if kwargs['l4_protocol'] == 'tcp':
+ assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'TCP must be over ipv4/ipv6'
+ #fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
+ # ShortEnumField("dport", 80, TCP_SERVICES),
+ # IntField("seq", 0),
+ # IntField("ack", 0),
+ # BitField("dataofs", None, 4),
+ # BitField("reserved", 0, 4),
+ # FlagsField("flags", 0x2, 8, "FSRPAUEC"),
+ # ShortField("window", 8192),
+ # XShortField("chksum", None),
+ # ShortField("urgptr", 0),
+ # TCPOptionsField("options", {}) ]
+
+ tcp_flags = ('F' if kwargs['tcp_fin_flag'] else '' +
+ 'S' if kwargs['tcp_syn_flag'] else '' +
+ 'R' if kwargs['tcp_rst_flag'] else '' +
+ 'P' if kwargs['tcp_psh_flag'] else '' +
+ 'A' if kwargs['tcp_ack_flag'] else '' +
+ 'U' if kwargs['tcp_urg_flag'] else '')
+
+ l4_layer = TCP(sport = kwargs['tcp_src_port'],
+ dport = kwargs['tcp_dst_port'],
+ seq = kwargs['tcp_seq_num'],
+ ack = kwargs['tcp_ack_num'],
+ dataofs = kwargs['tcp_data_offset'],
+ flags = tcp_flags,
+ window = kwargs['tcp_window'],
+ chksum = kwargs['tcp_checksum'],
+ urgptr = kwargs['tcp_urgent_ptr'],
+ )
+ # TCP VM
+ if kwargs['tcp_src_port_mode'] != 'fixed':
+ count = int(kwargs['tcp_src_port_count']) - 1
+ if count < 0:
+ raise STLError('tcp_src_port_count has to be at least 1')
+ if count > 0 or kwargs['tcp_src_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['tcp_src_port_step']
+ if step < 1:
+ raise STLError('tcp_src_port_step has to be at least 1')
+ if kwargs['tcp_src_port_mode'] == 'increment':
+ add_val = kwargs['tcp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_src_port_mode'] == 'decrement':
+ add_val = kwargs['tcp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_src_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'tcp_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('tcp_src_port_mode %s is not supported' % kwargs['tcp_src_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.sport', add_val = add_val))
+
+ if kwargs['tcp_dst_port_mode'] != 'fixed':
+ count = int(kwargs['tcp_dst_port_count']) - 1
+ if count < 0:
+ raise STLError('tcp_dst_port_count has to be at least 1')
+ if count > 0 or kwargs['tcp_dst_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['tcp_dst_port_step']
+ if step < 1:
+ raise STLError('tcp_dst_port_step has to be at least 1')
+ if kwargs['tcp_dst_port_mode'] == 'increment':
+ add_val = kwargs['tcp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_dst_port_mode'] == 'decrement':
+ add_val = kwargs['tcp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['tcp_dst_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'tcp_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('tcp_dst_port_mode %s is not supported' % kwargs['tcp_dst_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'TCP.dport', add_val = add_val))
+
+ elif kwargs['l4_protocol'] == 'udp':
+ assert kwargs['l3_protocol'] in ('ipv4', 'ipv6'), 'UDP must be over ipv4/ipv6'
+ #fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
+ # ShortEnumField("dport", 53, UDP_SERVICES),
+ # ShortField("len", None),
+ # XShortField("chksum", None), ]
+ l4_layer = UDP(sport = kwargs['udp_src_port'],
+ dport = kwargs['udp_dst_port'],
+ len = kwargs['udp_length'], chksum = None)
+ # UDP VM
+ if kwargs['udp_src_port_mode'] != 'fixed':
+ count = int(kwargs['udp_src_port_count']) - 1
+ if count < 0:
+ raise STLError('udp_src_port_count has to be at least 1')
+ if count > 0 or kwargs['udp_src_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['udp_src_port_step']
+ if step < 1:
+ raise STLError('udp_src_port_step has to be at least 1')
+ if kwargs['udp_src_port_mode'] == 'increment':
+ add_val = kwargs['udp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ vm_variables_cache[var_name] = True
+ elif kwargs['udp_src_port_mode'] == 'decrement':
+ add_val = kwargs['udp_src_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ vm_variables_cache[var_name] = True
+ elif kwargs['udp_src_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'udp_src_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('udp_src_port_mode %s is not supported' % kwargs['udp_src_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.sport', add_val = add_val))
+
+ if kwargs['udp_dst_port_mode'] != 'fixed':
+ count = int(kwargs['udp_dst_port_count']) - 1
+ if count < 0:
+ raise STLError('udp_dst_port_count has to be at least 1')
+ if count > 0 or kwargs['udp_dst_port_mode'] == 'random':
+ fix_ipv4_checksum = True
+ step = kwargs['udp_dst_port_step']
+ if step < 1:
+ raise STLError('udp_dst_port_step has to be at least 1')
+ if kwargs['udp_dst_port_mode'] == 'increment':
+ add_val = kwargs['udp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('inc', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'inc', step = step,
+ min_value = 0x7fff,
+ max_value = 0x7fff + count * step))
+ elif kwargs['udp_dst_port_mode'] == 'decrement':
+ add_val = kwargs['udp_dst_port'] - 0x7fff
+ var_name = '%s_%s_%s_%s' % ('dec', 2, count, step)
+ if var_name not in vm_variables_cache:
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'dec', step = step,
+ min_value = 0x7fff - count * step,
+ max_value = 0x7fff))
+ elif kwargs['udp_dst_port_mode'] == 'random':
+ add_val = 0
+ var_name = 'udp_dst_random'
+ vm_cmds.append(STLVmFlowVar(name = var_name, size = 2, op = 'random', max_value = 0xffff))
+ else:
+ raise STLError('udp_dst_port_mode %s is not supported' % kwargs['udp_dst_port_mode'])
+ vm_cmds.append(STLVmWrFlowVar(fv_name = var_name, pkt_offset = 'UDP.dport', add_val = add_val))
+ elif kwargs['l4_protocol'] is not None:
+ raise NotImplementedError("l4_protocol '%s' is not supported by TRex yet." % kwargs['l4_protocol'])
+ if l4_layer is not None:
+ base_pkt /= l4_layer
+
+ trim_dict = {'increment': 'inc', 'decrement': 'dec', 'random': 'random'}
+ length_mode = kwargs['length_mode']
+ if length_mode == 'auto':
+ payload_len = 0
+ elif length_mode == 'fixed':
+ if 'frame_size' in user_kwargs: # L2 has higher priority over L3
+ payload_len = kwargs['frame_size'] - len(base_pkt)
+ elif 'l3_length' in user_kwargs:
+ payload_len = kwargs['l3_length'] - (len(base_pkt) - len(l2_layer))
+ else: # default
+ payload_len = kwargs['frame_size'] - len(base_pkt)
+ elif length_mode == 'imix':
+ raise STLError("length_mode 'imix' should be treated at stream creating level.")
+ elif length_mode in trim_dict:
+ if 'frame_size_min' in user_kwargs or 'frame_size_max' in user_kwargs: # size is determined by L2, higher priority over L3 size
+ if kwargs['frame_size_min'] < 44 or kwargs['frame_size_max'] < 44:
+ raise STLError('frame_size_min and frame_size_max should be at least 44')
+ if kwargs['frame_size_min'] > kwargs['frame_size_max']:
+ raise STLError('frame_size_min is bigger than frame_size_max')
+ if kwargs['frame_size_min'] != kwargs['frame_size_max']:
+ fix_ipv4_checksum = True
+ vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['frame_size_step'],
+ min_value = kwargs['frame_size_min'],
+ max_value = kwargs['frame_size_max']))
+ vm_cmds.append(STLVmTrimPktSize('pkt_len'))
+ payload_len = kwargs['frame_size_max'] - len(base_pkt)
+ else: # size is determined by L3
+ if kwargs['l3_length_min'] < 40 or kwargs['l3_length_max'] < 40:
+ raise STLError('l3_length_min and l3_length_max should be at least 40')
+ if kwargs['l3_length_min'] > kwargs['l3_length_max']:
+ raise STLError('l3_length_min is bigger than l3_length_max')
+ if kwargs['l3_length_min'] != kwargs['l3_length_max']:
+ fix_ipv4_checksum = True
+ vm_cmds.append(STLVmFlowVar(name = 'pkt_len', size = 2, op = trim_dict[length_mode], step = kwargs['l3_length_step'],
+ min_value = kwargs['l3_length_min'] + len(l2_layer),
+ max_value = kwargs['l3_length_max'] + len(l2_layer)))
+ payload_len = kwargs['l3_length_max'] + len(l2_layer) - len(base_pkt)
+ vm_cmds.append(STLVmTrimPktSize('pkt_len'))
+
+ if (l3_layer and l3_layer.name == 'IP'):
+ vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'IP.len', add_val = -len(l2_layer)))
+ if (l4_layer and l4_layer.name == 'UDP'):
+ vm_cmds.append(STLVmWrFlowVar(fv_name = 'pkt_len', pkt_offset = 'UDP.len', add_val = -len(l2_layer) - len(l3_layer)))
+ else:
+ raise STLError('length_mode should be one of the following: %s' % ['auto', 'fixed'] + trim_dict.keys())
+
+ if payload_len < 0:
+ raise STLError('Packet length is bigger than defined by frame_size* or l3_length*. We got payload size %s' % payload_len)
+ base_pkt /= '!' * payload_len
+
+ pkt = STLPktBuilder()
+ pkt.set_packet(base_pkt)
+ if fix_ipv4_checksum and l3_layer.name == 'IP' and kwargs['ip_checksum'] is None:
+ vm_cmds.append(STLVmFixIpv4(offset = 'IP'))
+ if vm_cmds:
+ split_by_field = None
+ if kwargs['split_by_cores'] == 'split':
+ max_length = 0
+ for cmd in vm_cmds:
+ if isinstance(cmd, STLVmFlowVar):
+ if cmd.op not in ('inc', 'dec'):
+ continue
+ length = float(cmd.max_value - cmd.min_value) / cmd.step
+ if cmd.name == 'ip_src' and length > 7: # priority is to split by ip_src
+ split_by_field = 'ip_src'
+ break
+ if length > max_length:
+ max_length = length
+ split_by_field = cmd.name
+ elif kwargs['split_by_cores'] == 'single':
+ raise STLError("split_by_cores 'single' not implemented yet")
+ elif kwargs['split_by_cores'] != 'duplicate':
+ raise STLError("split_by_cores '%s' is not supported" % kwargs['split_by_cores'])
+ pkt.add_command(STLScVmRaw(vm_cmds, split_by_field))
+
+ # debug (only the base packet, without VM)
+ debug_filename = kwargs.get('save_to_pcap')
+ if type(debug_filename) is str:
+ pkt.dump_pkt_to_pcap(debug_filename)
+ packet_cache[repr(user_kwargs)] = pkt
+ return pkt
+
+def get_TOS(user_kwargs, kwargs):
+ TOS0 = set(['ip_precedence', 'ip_tos_field', 'ip_mbz'])
+ TOS1 = set(['ip_precedence', 'ip_delay', 'ip_throughput', 'ip_reliability', 'ip_cost', 'ip_reserved'])
+ TOS2 = set(['ip_dscp', 'ip_cu'])
+ user_args = set(user_kwargs.keys())
+ if user_args & (TOS1 - TOS0) and user_args & (TOS0 - TOS1):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS1))
+ if user_args & (TOS2 - TOS0) and user_args & (TOS0 - TOS2):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS0, TOS2))
+ if user_args & (TOS2 - TOS1) and user_args & (TOS1 - TOS2):
+ raise STLError('You have mixed %s and %s TOS parameters' % (TOS1, TOS2))
+ if user_args & (TOS0 - TOS1 - TOS2):
+ return (kwargs['ip_precedence'] << 5) + (kwargs['ip_tos_field'] << 2) + kwargs['ip_mbz']
+ if user_args & (TOS1 - TOS2):
+ return (kwargs['ip_precedence'] << 5) + (kwargs['ip_delay'] << 4) + (kwargs['ip_throughput'] << 3) + (kwargs['ip_reliability'] << 2) + (kwargs['ip_cost'] << 1) + kwargs['ip_reserved']
+ return (kwargs['ip_dscp'] << 2) + kwargs['ip_cu']
+
+def vlan_in_args(user_kwargs):
+ for arg in user_kwargs:
+ if arg.startswith('vlan_'):
+ return True
+ return False
+
+def split_vlan_arg(vlan_arg):
+ if type(vlan_arg) is list:
+ return vlan_arg
+ if is_integer(vlan_arg) or vlan_arg is None:
+ return [vlan_arg]
+ if type(vlan_arg) is str:
+ return vlan_arg.replace('{', '').replace('}', '').strip().split()
+ raise STLError('vlan argument invalid (expecting list, int, long, str, None): %s' % vlan_arg)
+
+def split_vlan_args(kwargs):
+ vlan_args_dict = {}
+ for arg, value in kwargs.items():
+ if arg.startswith('vlan_'):
+ vlan_args_dict[arg] = split_vlan_arg(value)
+ dot1q_headers_count = max([len(x) for x in vlan_args_dict.values()])
+ vlan_args_per_header = [{} for _ in range(dot1q_headers_count)]
+ for arg, value in vlan_args_dict.items():
+ for i in range(dot1q_headers_count):
+ if len(value) > i:
+ vlan_args_per_header[i][arg] = value[i]
+ else:
+ vlan_args_per_header[i][arg] = traffic_config_kwargs[arg]
+ return vlan_args_per_header
+
+def correct_direction(user_kwargs, kwargs):
+ if kwargs['direction'] == 0:
+ return
+ user_kwargs['mac_src'] = kwargs['mac_src2']
+ user_kwargs['mac_dst'] = kwargs['mac_dst2']
+ if kwargs['l3_protocol'] == 'ipv4':
+ for arg in kwargs.keys():
+ if 'ip_src_' in arg:
+ dst_arg = 'ip_dst_' + arg[7:]
+ user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
+ elif kwargs['l3_protocol'] == 'ipv6':
+ for arg in kwargs.keys():
+ if 'ipv6_src_' in arg:
+ dst_arg = 'ipv6_dst_' + arg[9:]
+ user_kwargs[arg], user_kwargs[dst_arg] = kwargs[dst_arg], kwargs[arg]
+
+# we produce packets without fcs, so need to reduce produced sizes
+def correct_sizes(kwargs):
+ for arg, value in kwargs.items():
+ if is_integer(value):
+ if arg.endswith(('_length', '_size', '_size_min', '_size_max', '_length_min', '_length_max')):
+ kwargs[arg] -= 4
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
new file mode 100644
index 00000000..1461fcec
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -0,0 +1,284 @@
+#!/router/bin/python
+
+import zmq
+import json
+import re
+from collections import namedtuple
+import zlib
+import struct
+
+from .trex_stl_types import *
+from .utils.common import random_id_gen
+from .utils.zipmsg import ZippedMsg
+
+class bcolors:
+ BLUE = '\033[94m'
+ GREEN = '\033[32m'
+ YELLOW = '\033[93m'
+ RED = '\033[31m'
+ MAGENTA = '\033[35m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+# sub class to describe a batch
+class BatchMessage(object):
+ def __init__ (self, rpc_client):
+ self.rpc_client = rpc_client
+ self.batch_list = []
+
+ def add (self, method_name, params = None, api_class = 'core'):
+
+ id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False)
+ self.batch_list.append(msg)
+
+ def invoke(self, block = False):
+ if not self.rpc_client.connected:
+ return RC_ERR("Not connected to server")
+
+ msg = json.dumps(self.batch_list)
+
+ return self.rpc_client.send_msg(msg)
+
+
+# JSON RPC v2.0 client
+class JsonRpcClient(object):
+
+ def __init__ (self, default_server, default_port, client):
+ self.client_api = client.api_h
+ self.logger = client.logger
+ self.connected = False
+
+ # default values
+ self.port = default_port
+ self.server = default_server
+
+ self.id_gen = random_id_gen()
+ self.zipper = ZippedMsg()
+
+ def get_connection_details (self):
+ rc = {}
+ rc['server'] = self.server
+ rc['port'] = self.port
+
+ return rc
+
+ # pretty print for JSON
+ def pretty_json (self, json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}\2{1}'.format(bcolors.BLUE, bcolors.ENDC), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}\2{1}'.format(bcolors.MAGENTA, bcolors.ENDC), pretty_str)
+ # strings
+
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}\2{1}'.format(bcolors.RED, bcolors.ENDC), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(bcolors.MAGENTA, bcolors.RED), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+ def verbose_msg (self, msg):
+ self.logger.log("\n\n[verbose] " + msg, level = self.logger.VERBOSE_HIGH)
+
+
+ # batch messages
+ def create_batch (self):
+ return BatchMessage(self)
+
+ def create_jsonrpc_v2 (self, method_name, params = None, api_class = 'core', encode = True):
+ msg = {}
+ msg["jsonrpc"] = "2.0"
+ msg["method"] = method_name
+ msg["id"] = next(self.id_gen)
+
+ msg["params"] = params if params is not None else {}
+
+ # if this RPC has an API class - add it's handler
+ if api_class:
+ msg["params"]["api_h"] = self.client_api[api_class]
+
+
+ if encode:
+ return id, json.dumps(msg)
+ else:
+ return id, msg
+
+
+ def invoke_rpc_method (self, method_name, params = None, api_class = 'core'):
+ if not self.connected:
+ return RC_ERR("Not connected to server")
+
+ id, msg = self.create_jsonrpc_v2(method_name, params, api_class)
+
+ return self.send_msg(msg)
+
+
+ def send_msg (self, msg):
+ # print before
+ if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
+ self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
+
+ # encode string to buffer
+ buffer = msg.encode()
+
+ if self.zipper.check_threshold(buffer):
+ response = self.send_raw_msg(self.zipper.compress(buffer))
+ if response:
+ response = self.zipper.decompress(response)
+ else:
+ response = self.send_raw_msg(buffer)
+
+ if not response:
+ return response
+
+ # return to string
+ response = response.decode()
+
+ # print after
+ if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
+ self.verbose_msg("Server Response:\n\n" + self.pretty_json(response) + "\n")
+
+ # process response (batch and regular)
+ try:
+ response_json = json.loads(response)
+ except (TypeError, ValueError):
+ return RC_ERR("*** [RPC] - Failed to decode response from server")
+
+ if isinstance(response_json, list):
+ return self.process_batch_response(response_json)
+ else:
+ return self.process_single_response(response_json)
+
+
+
+ # low level send of string message
+ def send_raw_msg (self, msg):
+
+ tries = 0
+ while True:
+ try:
+ self.socket.send(msg)
+ break
+ except zmq.Again:
+ tries += 1
+ if tries > 5:
+ self.disconnect()
+ return RC_ERR("*** [RPC] - Failed to send message to server")
+
+
+ tries = 0
+ while True:
+ try:
+ response = self.socket.recv()
+ break
+ except zmq.Again:
+ tries += 1
+ if tries > 5:
+ self.disconnect()
+ return RC_ERR("*** [RPC] - Failed to get server response from {0}".format(self.transport))
+
+
+ return response
+
+
+
+ # processs a single response from server
+ def process_single_response (self, response_json):
+
+ if (response_json.get("jsonrpc") != "2.0"):
+ return RC_ERR("Malformed Response ({0})".format(str(response_json)))
+
+ # error reported by server
+ if ("error" in response_json):
+ if "specific_err" in response_json["error"]:
+ return RC_ERR(response_json["error"]["specific_err"])
+ else:
+ return RC_ERR(response_json["error"]["message"])
+
+
+ # if no error there should be a result
+ if ("result" not in response_json):
+ return RC_ERR("Malformed Response ({0})".format(str(response_json)))
+
+ return RC_OK(response_json["result"])
+
+
+
+ # process a batch response
+ def process_batch_response (self, response_json):
+ rc_batch = RC()
+
+ for single_response in response_json:
+ rc = self.process_single_response(single_response)
+ rc_batch.add(rc)
+
+ return rc_batch
+
+
+ def disconnect (self):
+ if self.connected:
+ self.socket.close(linger = 0)
+ self.context.destroy(linger = 0)
+ self.connected = False
+ return RC_OK()
+ else:
+ return RC_ERR("Not connected to server")
+
+
+ def connect(self, server = None, port = None):
+ if self.connected:
+ self.disconnect()
+
+ self.context = zmq.Context()
+
+ self.server = (server if server else self.server)
+ self.port = (port if port else self.port)
+
+ # Socket to talk to server
+ self.transport = "tcp://{0}:{1}".format(self.server, self.port)
+
+ self.socket = self.context.socket(zmq.REQ)
+ try:
+ self.socket.connect(self.transport)
+ except zmq.error.ZMQError as e:
+ return RC_ERR("ZMQ Error: Bad server or port name: " + str(e))
+
+ self.socket.setsockopt(zmq.SNDTIMEO, 10000)
+ self.socket.setsockopt(zmq.RCVTIMEO, 10000)
+
+ self.connected = True
+
+ rc = self.invoke_rpc_method('ping', api_class = None)
+ if not rc:
+ self.connected = False
+ return rc
+
+ return RC_OK()
+
+
+ def reconnect(self):
+ # connect using current values
+ return self.connect()
+
+ if not self.connected:
+ return RC_ERR("Not connected to server")
+
+ # reconnect
+ return self.connect(self.server, self.port)
+
+
+ def is_connected(self):
+ return self.connected
+
+ def __del__(self):
+ self.logger.log("Shutting down RPC client\n")
+ if hasattr(self, "context"):
+ self.context.destroy(linger=0)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py
new file mode 100644
index 00000000..b6e7c026
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_interface.py
@@ -0,0 +1,43 @@
+
+# base object class for a packet builder
+class CTrexPktBuilderInterface(object):
+
+ def compile (self):
+ """
+ Compiles the packet and VM
+ """
+ raise Exception("implement me")
+
+
+ def dump_pkt(self):
+ """
+ Dumps the packet as a decimal array of bytes (each item x gets value between 0-255)
+
+ :parameters:
+ None
+
+ :return:
+ + packet representation as array of bytes
+
+ :raises:
+ + :exc:`CTRexPktBuilder.EmptyPacketError`, in case packet is empty.
+
+ """
+
+ raise Exception("implement me")
+
+
+ def get_vm_data(self):
+ """
+ Dumps the instructions
+
+ :parameters:
+ None
+
+ :return:
+ + json object of instructions
+
+ """
+
+ raise Exception("implement me")
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
new file mode 100755
index 00000000..dc06f9fb
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
@@ -0,0 +1,1698 @@
+import random
+import string
+import struct
+import socket
+import json
+import yaml
+import binascii
+import base64
+import inspect
+import copy
+
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_types import *
+from scapy.all import *
+
+class CTRexPacketBuildException(Exception):
+ """
+ This is the general Packet Building error exception class.
+ """
+ def __init__(self, code, message):
+ self.code = code
+ self.message = message
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return u"[errcode:%r] %r" % (self.code, self.message)
+
+################################################################################################
+
+def safe_ord (c):
+ if type(c) is str:
+ return ord(c)
+ elif type(c) is int:
+ return c
+ else:
+ raise TypeError("Cannot convert: {0} of type: {1}".format(c, type(c)))
+
+def _buffer_to_num(str_buffer):
+ validate_type('str_buffer', str_buffer, bytes)
+ res=0
+ for i in str_buffer:
+ res = res << 8
+ res += safe_ord(i)
+ return res
+
+
+def ipv4_str_to_num (ipv4_buffer):
+ validate_type('ipv4_buffer', ipv4_buffer, bytes)
+ assert len(ipv4_buffer)==4, 'Size of ipv4_buffer is not 4'
+ return _buffer_to_num(ipv4_buffer)
+
+def mac_str_to_num (mac_buffer):
+ validate_type('mac_buffer', mac_buffer, bytes)
+ assert len(mac_buffer)==6, 'Size of mac_buffer is not 6'
+ return _buffer_to_num(mac_buffer)
+
+
+def is_valid_ipv4(ip_addr):
+ """
+ Return buffer in network order
+ """
+ if type(ip_addr) == bytes and len(ip_addr) == 4:
+ return ip_addr
+
+ if type(ip_addr)== int:
+ ip_addr = socket.inet_ntoa(struct.pack("!I", ip_addr))
+
+ try:
+ return socket.inet_pton(socket.AF_INET, ip_addr)
+ except AttributeError: # no inet_pton here, sorry
+ return socket.inet_aton(ip_addr)
+ except socket.error: # not a valid address
+ raise CTRexPacketBuildException(-10,"Not valid ipv4 format");
+
+
+def is_valid_ipv6(ipv6_addr):
+ """
+ Return buffer in network order
+ """
+ if type(ipv6_addr) == bytes and len(ipv6_addr) == 16:
+ return ipv6_addr
+ try:
+ return socket.inet_pton(socket.AF_INET6, ipv6_addr)
+ except AttributeError: # no inet_pton here, sorry
+ raise CTRexPacketBuildException(-10, 'No inet_pton function available')
+ except:
+ raise CTRexPacketBuildException(-10, 'Not valid ipv6 format')
+
+class CTRexScriptsBase(object):
+ """
+ VM Script base class
+ """
+ def clone (self):
+ return copy.deepcopy(self)
+
+
+class CTRexScFieldRangeBase(CTRexScriptsBase):
+
+ FILED_TYPES = ['inc', 'dec', 'rand']
+
+ def __init__(self, field_name,
+ field_type
+ ):
+ super(CTRexScFieldRangeBase, self).__init__()
+ self.field_name =field_name
+ self.field_type =field_type
+ if not self.field_type in CTRexScFieldRangeBase.FILED_TYPES :
+ raise CTRexPacketBuildException(-12, 'Field type should be in %s' % FILED_TYPES);
+
+
+class CTRexScFieldRangeValue(CTRexScFieldRangeBase):
+ """
+ Range of field values
+ """
+ def __init__(self, field_name,
+ field_type,
+ min_value,
+ max_value
+ ):
+ super(CTRexScFieldRangeValue, self).__init__(field_name,field_type)
+ self.min_value =min_value;
+ self.max_value =max_value;
+ if min_value > max_value:
+ raise CTRexPacketBuildException(-12, 'Invalid range: min is greater than max.');
+ if min_value == max_value:
+ raise CTRexPacketBuildException(-13, "Invalid range: min value is equal to max value.");
+
+
+class CTRexScIpv4SimpleRange(CTRexScFieldRangeBase):
+ """
+ Range of ipv4 ip
+ """
+ def __init__(self, field_name, field_type, min_ip, max_ip):
+ super(CTRexScIpv4SimpleRange, self).__init__(field_name,field_type)
+ self.min_ip = min_ip
+ self.max_ip = max_ip
+ mmin=ipv4_str_to_num (is_valid_ipv4(min_ip))
+ mmax=ipv4_str_to_num (is_valid_ipv4(max_ip))
+ if mmin > mmax :
+ raise CTRexPacketBuildException(-11, 'CTRexScIpv4SimpleRange m_min ip is bigger than max');
+
+
+class CTRexScIpv4TupleGen(CTRexScriptsBase):
+ """
+ Range tuple
+ """
+ FLAGS_ULIMIT_FLOWS =1
+
+ def __init__(self, min_ipv4, max_ipv4, num_flows=100000, min_port=1025, max_port=65535, flags=0):
+ super(CTRexScIpv4TupleGen, self).__init__()
+ self.min_ip = min_ipv4
+ self.max_ip = max_ipv4
+ mmin=ipv4_str_to_num (is_valid_ipv4(min_ipv4))
+ mmax=ipv4_str_to_num (is_valid_ipv4(max_ipv4))
+ if mmin > mmax :
+ raise CTRexPacketBuildException(-11, 'CTRexScIpv4SimpleRange m_min ip is bigger than max');
+
+ self.num_flows=num_flows;
+
+ self.min_port =min_port
+ self.max_port =max_port
+ self.flags = flags
+
+
+class CTRexScTrimPacketSize(CTRexScriptsBase):
+ """
+ Trim packet size. Field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
+ """
+ def __init__(self,field_type="rand",min_pkt_size=None, max_pkt_size=None):
+ super(CTRexScTrimPacketSize, self).__init__()
+ self.field_type = field_type
+ self.min_pkt_size = min_pkt_size
+ self.max_pkt_size = max_pkt_size
+ if max_pkt_size != None and min_pkt_size !=None :
+ if min_pkt_size == max_pkt_size:
+ raise CTRexPacketBuildException(-11, 'CTRexScTrimPacketSize min_pkt_size is the same as max_pkt_size ');
+
+ if min_pkt_size > max_pkt_size:
+ raise CTRexPacketBuildException(-11, 'CTRexScTrimPacketSize min_pkt_size is bigger than max_pkt_size ');
+
+
+class STLScVmRaw(CTRexScriptsBase):
+ """
+ Raw instructions
+ """
+ def __init__(self,list_of_commands=None,split_by_field=None,cache_size=None):
+ """
+ Include a list of a basic instructions objects.
+
+ :parameters:
+ list_of_commands : list
+ list of instructions
+
+ split_by_field : string
+ by which field to split to threads
+
+ cache_size : uint16_t
+ In case it is bigger than zero, FE results will be cached - this will speedup of the program at the cost of limiting the number of possible packets to the number of cache. The cache size is limited to the pool size
+
+ The following example splits the generated traffic by "ip_src" variable.
+
+ .. code-block:: python
+
+ # Split by
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="16.0.0.254",
+ size=4, op="inc"),
+
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ ]
+ ,split_by_field = "ip_src",
+ cache_size = 1000
+ )
+
+ """
+
+ super(STLScVmRaw, self).__init__()
+ self.split_by_field = split_by_field
+ self.cache_size = cache_size
+
+ if list_of_commands==None:
+ self.commands =[]
+ else:
+ self.commands = list_of_commands
+
+ def add_cmd (self,cmd):
+ self.commands.append(cmd)
+
+
+
+################################################################################################
+# VM raw instructions
+################################################################################################
+
+class CTRexVmInsBase(object):
+ """
+ Instruction base
+ """
+ def __init__(self, ins_type):
+ self.type = ins_type
+ validate_type('ins_type', ins_type, str)
+
+class CTRexVmInsFixIpv4(CTRexVmInsBase):
+ def __init__(self, offset):
+ super(CTRexVmInsFixIpv4, self).__init__("fix_checksum_ipv4")
+ self.pkt_offset = offset
+ validate_type('offset', offset, int)
+
+class CTRexVmInsFixHwCs(CTRexVmInsBase):
+ L4_TYPE_UDP = 11
+ L4_TYPE_TCP = 13
+
+ def __init__(self, l2_len,l3_len,l4_type):
+ super(CTRexVmInsFixHwCs, self).__init__("fix_checksum_hw")
+ self.l2_len = l2_len
+ validate_type('l2_len', l2_len, int)
+ self.l3_len = l3_len
+ validate_type('l3_len', l3_len, int)
+ self.l4_type = l4_type
+ validate_type('l4_type', l4_type, int)
+
+
+
+class CTRexVmInsFlowVar(CTRexVmInsBase):
+ #TBD add more validation tests
+
+ OPERATIONS =['inc', 'dec', 'random']
+ VALID_SIZES =[1, 2, 4, 8]
+
+ def __init__(self, fv_name, size, op, init_value, min_value, max_value,step):
+ super(CTRexVmInsFlowVar, self).__init__("flow_var")
+ self.name = fv_name;
+ validate_type('fv_name', fv_name, str)
+ self.size = size
+ self.op = op
+ self.init_value = init_value
+ validate_type('init_value', init_value, int)
+ assert init_value >= 0, 'init_value (%s) is negative' % init_value
+ self.min_value=min_value
+ validate_type('min_value', min_value, int)
+ assert min_value >= 0, 'min_value (%s) is negative' % min_value
+ self.max_value=max_value
+ validate_type('max_value', max_value, int)
+ assert max_value >= 0, 'max_value (%s) is negative' % max_value
+ self.step=step
+ validate_type('step', step, int)
+ assert step >= 0, 'step (%s) is negative' % step
+
+class CTRexVmInsFlowVarRandLimit(CTRexVmInsBase):
+ #TBD add more validation tests
+
+ VALID_SIZES =[1, 2, 4, 8]
+
+ def __init__(self, fv_name, size, limit, seed, min_value, max_value):
+ super(CTRexVmInsFlowVarRandLimit, self).__init__("flow_var_rand_limit")
+ self.name = fv_name;
+ validate_type('fv_name', fv_name, str)
+ self.size = size
+ self.limit=limit
+ validate_type('limit', limit, int)
+ assert limit >= 0, 'limit (%s) is negative' % limit
+ self.seed=seed
+ validate_type('seed', seed, int)
+ self.min_value=min_value
+ validate_type('min_value', min_value, int)
+ assert min_value >= 0, 'min_value (%s) is negative' % min_value
+ self.max_value=max_value
+ validate_type('max_value', max_value, int)
+ assert max_value >= 0, 'max_value (%s) is negative' % max_value
+
+
+class CTRexVmInsWrFlowVar(CTRexVmInsBase):
+ def __init__(self, fv_name, pkt_offset, add_value=0, is_big_endian=True):
+ super(CTRexVmInsWrFlowVar, self).__init__("write_flow_var")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+ self.pkt_offset = pkt_offset
+ validate_type('pkt_offset', pkt_offset, int)
+ self.add_value = add_value
+ validate_type('add_value', add_value, int)
+ self.is_big_endian = is_big_endian
+ validate_type('is_big_endian', is_big_endian, bool)
+
+class CTRexVmInsWrMaskFlowVar(CTRexVmInsBase):
+ def __init__(self, fv_name, pkt_offset,pkt_cast_size,mask,shift,add_value, is_big_endian=True):
+ super(CTRexVmInsWrMaskFlowVar, self).__init__("write_mask_flow_var")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+ self.pkt_offset = pkt_offset
+ validate_type('pkt_offset', pkt_offset, int)
+ self.pkt_cast_size = pkt_cast_size
+ validate_type('pkt_cast_size', pkt_cast_size, int)
+ self.mask = mask
+ validate_type('mask', mask, int)
+ self.shift = shift
+ validate_type('shift', shift, int)
+ self.add_value =add_value
+ validate_type('add_value', add_value, int)
+ self.is_big_endian = is_big_endian
+ validate_type('is_big_endian', is_big_endian, bool)
+
+class CTRexVmInsTrimPktSize(CTRexVmInsBase):
+ def __init__(self,fv_name):
+ super(CTRexVmInsTrimPktSize, self).__init__("trim_pkt_size")
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+
+class CTRexVmInsTupleGen(CTRexVmInsBase):
+ def __init__(self, fv_name, ip_min, ip_max, port_min, port_max, limit_flows, flags=0):
+ super(CTRexVmInsTupleGen, self).__init__("tuple_flow_var")
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.ip_min = ip_min;
+ self.ip_max = ip_max;
+ self.port_min = port_min;
+ self.port_max = port_max;
+ self.limit_flows = limit_flows;
+ self.flags =flags;
+
+
+################################################################################################
+#
+class CTRexVmEngine(object):
+
+ def __init__(self):
+ """
+ Inlcude list of instructions.
+ """
+ super(CTRexVmEngine, self).__init__()
+ self.ins=[]
+ self.split_by_var = ''
+ self.cache_size = 0
+
+
+ # return as json
+ def get_json (self):
+ inst_array = [];
+ # dump it as dict
+ for obj in self.ins:
+ inst_array.append(obj.__dict__);
+
+ d={'instructions': inst_array, 'split_by_var': self.split_by_var};
+ if self.cache_size >0 :
+ d['cache']=self.cache_size
+ return d
+
+ def add_ins (self,ins):
+ #assert issubclass(ins, CTRexVmInsBase)
+ self.ins.append(ins);
+
+ def dump (self):
+ cnt=0;
+ for obj in self.ins:
+ print("ins",cnt)
+ cnt = cnt +1
+ print(obj.__dict__)
+
+ def dump_bjson (self):
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
+
+ def dump_as_yaml (self):
+ print(yaml.dump(self.get_json(), default_flow_style=False))
+
+
+
+################################################################################################
+
+class CTRexScapyPktUtl(object):
+
+ def __init__(self, scapy_pkt):
+ self.pkt = scapy_pkt
+
+ def pkt_iter (self):
+ p=self.pkt;
+ while True:
+ yield p
+ p=p.payload
+ if p ==None or isinstance(p,NoPayload):
+ break;
+
+ def get_list_iter(self):
+ l=list(self.pkt_iter())
+ return l
+
+
+ def get_pkt_layers(self):
+ """
+ Return string 'IP:UDP:TCP'
+ """
+ l=self.get_list_iter ();
+ l1=map(lambda p: p.name,l );
+ return ":".join(l1);
+
+ def _layer_offset(self, name, cnt = 0):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ save_cnt=cnt
+ for pkt in self.pkt_iter ():
+ if pkt.name == name:
+ if cnt==0:
+ return (pkt, pkt.offset)
+ else:
+ cnt=cnt -1
+
+ raise CTRexPacketBuildException(-11,("no layer %s-%d" % (name, save_cnt)));
+
+
+ def layer_offset(self, name, cnt = 0):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ save_cnt=cnt
+ for pkt in self.pkt_iter ():
+ if pkt.name == name:
+ if cnt==0:
+ return pkt.offset
+ else:
+ cnt=cnt -1
+
+ raise CTRexPacketBuildException(-11,("no layer %s-%d" % (name, save_cnt)));
+
+ def get_field_offet(self, layer, layer_cnt, field_name):
+ """
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
+ """
+ t=self._layer_offset(layer,layer_cnt);
+ l_offset=t[1];
+ layer_pkt=t[0]
+
+ #layer_pkt.dump_fields_offsets ()
+
+ for f in layer_pkt.fields_desc:
+ if f.name == field_name:
+ return (l_offset+f.offset,f.get_size_bytes ());
+
+ raise CTRexPacketBuildException(-11, "No layer %s-%d." % (name, save_cnt, field_name));
+
+ def get_layer_offet_by_str(self, layer_des):
+ """
+ Return layer offset by string.
+
+ :parameters:
+
+ IP:0
+ IP:1
+ return offset
+
+
+ """
+ l1=layer_des.split(":")
+ layer=""
+ layer_cnt=0;
+
+ if len(l1)==1:
+ layer=l1[0];
+ else:
+ layer=l1[0];
+ layer_cnt=int(l1[1]);
+
+ return self.layer_offset(layer, layer_cnt)
+
+
+
+ def get_field_offet_by_str(self, field_des):
+ """
+ Return field_des (offset,size) layer:cnt.field
+ Example:
+ 802|1Q.vlan get 802.1Q->valn replace | with .
+ IP.src
+ IP:0.src (first IP.src like IP.src)
+ Example: IP:1.src for internal IP
+
+ Return (offset, size) as tuple.
+
+
+ """
+
+ s=field_des.split(".");
+ if len(s)!=2:
+ raise CTRexPacketBuildException(-11, ("Field desription should be layer:cnt.field Example: IP.src or IP:1.src"));
+
+
+ layer_ex = s[0].replace("|",".")
+ field = s[1]
+
+ l1=layer_ex.split(":")
+ layer=""
+ layer_cnt=0;
+
+ if len(l1)==1:
+ layer=l1[0];
+ else:
+ layer=l1[0];
+ layer_cnt=int(l1[1]);
+
+ return self.get_field_offet(layer,layer_cnt,field)
+
+ def has_IPv4 (self):
+ return self.pkt.has_layer("IP");
+
+ def has_IPv6 (self):
+ return self.pkt.has_layer("IPv6");
+
+ def has_UDP (self):
+ return self.pkt.has_layer("UDP");
+
+################################################################################################
+
+class CTRexVmDescBase(object):
+ """
+ Instruction base
+ """
+ def __init__(self):
+ pass;
+
+ def get_obj(self):
+ return self;
+
+ def get_json(self):
+ return self.get_obj().__dict__
+
+ def dump_bjson(self):
+ print(json.dumps(self.get_json(), sort_keys=True, indent=4))
+
+ def dump_as_yaml(self):
+ print(yaml.dump(self.get_json(), default_flow_style=False))
+
+
+ def get_var_ref (self):
+ '''
+ Virtual function returns a ref var name.
+ '''
+ return None
+
+ def get_var_name(self):
+ '''
+ Virtual function returns the varible name if it exists.
+ '''
+ return None
+
+ def compile(self,parent):
+ '''
+ Virtual function to take parent that has function name_to_offset.
+ '''
+ pass;
+
+
+def valid_fv_size (size):
+ if not (size in CTRexVmInsFlowVar.VALID_SIZES):
+ raise CTRexPacketBuildException(-11,("Flow var has invalid size %d ") % size );
+
+def valid_fv_ops (op):
+ if not (op in CTRexVmInsFlowVar.OPERATIONS):
+ raise CTRexPacketBuildException(-11,("Flow var has invalid op %s ") % op );
+
+def get_max_by_size (size):
+ d={
+ 1:((1<<8) -1),
+ 2:((1<<16)-1),
+ 4:((1<<32)-1),
+ 8:0xffffffffffffffff
+ };
+ return d[size]
+
+def convert_val (val):
+ if is_integer(val):
+ return val
+ if type(val) == str:
+ return ipv4_str_to_num (is_valid_ipv4(val))
+ raise CTRexPacketBuildException(-11,("init val invalid %s ") % val );
+
+def check_for_int (val):
+ validate_type('val', val, int)
+
+
+class STLVmFlowVar(CTRexVmDescBase):
+
+ def __init__(self, name, init_value=None, min_value=0, max_value=255, size=4, step=1,op="inc"):
+ """
+ Flow variable instruction. Allocates a variable on a stream context. The size argument determines the variable size.
+ The operation can be inc, dec, and random.
+ For increment and decrement operations, can set the "step" size.
+ For all operations, can set initialization value, minimum and maximum value.
+
+ :parameters:
+ name : string
+ Name of the stream variable
+
+ init_value : int
+ Init value of the variable. If not specified, it will be min_value
+
+ min_value : int
+ Min value
+
+ max_value : int
+ Max value
+
+ size : int
+ Number of bytes of the variable. Possible values: 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
+
+ step : int
+ Step in case of "inc" or "dec" operations
+
+ op : string
+ Possible values: "inc", "dec", "random"
+
+ .. code-block:: python
+
+ # Example1
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="inc")
+
+ # output 0,1,2,3,0,1,2,3 ..
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="dec")
+
+ # output 0,3,2,1,0,3,2,1 ..
+
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=3, size=1,op="random")
+
+ # output 1,1,2,3,1,2,1,0 ..
+
+ # input
+ STLVmFlowVar(min_value=0, max_value=10, size=1,op="inc",step=3)
+
+ # output 0,3,6,9,0,3,6,9,0..
+
+
+ """
+ super(STLVmFlowVar, self).__init__()
+ self.name = name;
+ validate_type('name', name, str)
+ self.size =size
+ valid_fv_size(size)
+ self.op =op
+ valid_fv_ops (op)
+
+ # choose default value for init val
+ if init_value == None:
+ init_value = max_value if op == "dec" else min_value
+
+ self.init_value = convert_val (init_value)
+ self.min_value = convert_val (min_value);
+ self.max_value = convert_val (max_value)
+ self.step = convert_val (step)
+
+ if self.min_value > self.max_value :
+ raise CTRexPacketBuildException(-11,("max %d is lower than min %d ") % (self.max_value,self.min_value) );
+
+ def get_obj (self):
+ return CTRexVmInsFlowVar(self.name,self.size,self.op,self.init_value,self.min_value,self.max_value,self.step);
+
+ def get_var_name(self):
+ return [self.name]
+
+class STLVmFlowVarRepetableRandom(CTRexVmDescBase):
+
+ def __init__(self, name, size=4, limit=100, seed=None, min_value=0, max_value=None):
+ """
+ Flow variable instruction for repeatable random with limit number of generating numbers. Allocates memory on a stream context.
+ The size argument determines the variable size. Could be 1,2,4 or 8
+
+ :parameters:
+ name : string
+ Name of the stream variable
+
+ size : int
+ Number of bytes of the variable. Possible values: 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
+
+ limit : int
+ The number of distinct repetable random number
+
+ seed : int
+ For deterministic result, you can set this to a uint16_t number
+
+ min_value : int
+ Min value
+
+ max_value : int
+ Max value
+
+
+ .. code-block:: python
+
+ # Example1
+
+ # input , 1 byte or random with limit of 5
+ STLVmFlowVarRepetableRandom("var1",size=1,limit=5)
+
+ # output 255,1,7,129,8, ==> repeat 255,1,7,129,8
+
+ STLVmFlowVarRepetableRandom("var1",size=4,limit=100,min_value=0x12345678, max_value=0x32345678)
+
+
+ """
+ super(STLVmFlowVarRepetableRandom, self).__init__()
+ self.name = name;
+ validate_type('name', name, str)
+ self.size =size
+ valid_fv_size(size)
+ self.limit =limit
+
+ if seed == None:
+ self.seed = random.randint(1, 32000)
+ else:
+ self.seed = seed
+
+ self.min_value = convert_val (min_value);
+
+ if max_value == None :
+ self.max_value = get_max_by_size (self.size)
+ else:
+ self.max_value = convert_val (max_value)
+
+ if self.min_value > self.max_value :
+ raise CTRexPacketBuildException(-11,("max %d is lower than min %d ") % (self.max_value,self.min_value) );
+
+ def get_obj (self):
+ return CTRexVmInsFlowVarRandLimit(self.name, self.size, self.limit, self.seed, self.min_value, self.max_value);
+
+ def get_var_name(self):
+ return [self.name]
+
+class STLVmFixChecksumHw(CTRexVmDescBase):
+ def __init__(self, l3_offset,l4_offset,l4_type):
+ """
+ Fix Ipv4 header checksum and TCP/UDP checksum using hardware assist.
+ Use this if the packet header has changed or data payload has changed as it is necessary to fix the checksums.
+ This instruction works on NICS that support this hardware offload.
+
+ For fixing only IPv4 header checksum use STLVmFixIpv4. This instruction should be used if both L4 and L3 need to be fixed.
+
+ example for supported packets
+
+ Ether()/(IPv4|IPv6)/(UDP|TCP)
+ Ether()/(IPv4|IPv6)/(UDP|TCP)
+ SomeTunnel()/(IPv4|IPv6)/(UDP|TCP)
+ SomeTunnel()/(IPv4|IPv6)/(UDP|TCP)
+
+
+ :parameters:
+ l3_offset : offset in bytes
+ **IPv4/IPv6 header** offset from packet start. It is **not** the offset of the checksum field itself.
+ in could be string in case of scapy packet. format IP[:[id]]
+
+ l4_offset : offset in bytes to UDP/TCP header
+
+ l4_type : CTRexVmInsFixHwCs.L4_TYPE_UDP or CTRexVmInsFixHwCs.L4_TYPE_TCP
+
+ see full example stl/syn_attack_fix_cs_hw.py
+
+ .. code-block:: python
+
+ # Example2
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ # by offset
+ STLVmFixChecksumHw(l3_offset=14,l4_offset=14+20,l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
+
+ # in case of scapy packet can be defined by header name
+ STLVmFixChecksumHw(l3_offset="IP",l4_offset="UDP",l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
+
+ # string for second "IP" header in the packet is IP:1
+ STLVmFixChecksumHw(offset="IP:1")
+
+ """
+
+ super(STLVmFixChecksumHw, self).__init__()
+ self.l3_offset = l3_offset; # could be a name of offset
+ self.l4_offset = l4_offset; # could be a name of offset
+ self.l4_type = l4_type
+
+
+ def get_obj (self):
+ return CTRexVmInsFixHwCs(self.l2_len,self.l3_len,self.l4_type);
+
+ def compile(self,parent):
+ if type(self.l3_offset)==str:
+ self.l2_len = parent._pkt_layer_offset(self.l3_offset);
+ if type(self.l4_offset)==str:
+ self.l4_offset = parent._pkt_layer_offset(self.l4_offset);
+
+ assert self.l4_offset >= self.l2_len+8, 'l4_offset should be higher than l3_offset offset'
+ self.l3_len = self.l4_offset - self.l2_len;
+
+
+class STLVmFixIpv4(CTRexVmDescBase):
+ def __init__(self, offset):
+ """
+ Fix IPv4 header checksum. Use this if the packet header has changed and it is necessary to change the checksum.
+
+ :parameters:
+ offset : uint16_t or string
+ **IPv4 header** offset from packet start. It is **not** the offset of the checksum field itself.
+ in could be string in case of scapy packet. format IP[:[id]]
+
+ .. code-block:: python
+
+ # Example2
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ # by offset
+ STLVmFixIpv4(offset=14)
+
+ # in case of scapy packet can be defined by header name
+ STLVmFixIpv4(offset="IP")
+
+ # string for second "IP" header in the packet is IP:1
+ STLVmFixIpv4(offset="IP:1")
+
+ """
+
+ super(STLVmFixIpv4, self).__init__()
+ self.offset = offset; # could be a name of offset
+
+ def get_obj (self):
+ return CTRexVmInsFixIpv4(self.offset);
+
+ def compile(self,parent):
+ if type(self.offset)==str:
+ self.offset = parent._pkt_layer_offset(self.offset);
+
+class STLVmWrFlowVar(CTRexVmDescBase):
+ def __init__(self, fv_name, pkt_offset, offset_fixup=0, add_val=0, is_big=True):
+ """
+ Write a stream variable into a packet field.
+ The write position is determined by the packet offset + offset fixup. The size of the write is determined by the stream variable.
+ Example: Offset 10, fixup 0, variable size 4. This function writes at 10, 11, 12, and 13.
+
+ For inromation about chaning the write size, offset, or fixup, see the `STLVmWrMaskFlowVar` command.
+ The Field name/offset can be given by name in the following format: ``header[:id].field``.
+
+
+ :parameters:
+ fv_name : string
+ Stream variable to write to a packet offset.
+
+ pkt_offset : string or in
+ Name of the field or offset in bytes from packet start.
+
+ offset_fixup : int
+ Number of bytes to move forward. If negative, move backward.
+
+ add_val : int
+ Value to add to the stream variable before writing it to the packet field. Can be used as a constant offset.
+
+ is_big : bool
+ How to write the variable to the the packet. True=big-endian, False=little-endian
+
+ .. code-block:: python
+
+ # Example3
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+
+ # write to ip.src offset
+ STLVmWrFlowVar (fv_name="tuple", pkt_offset= "IP.src" )
+
+ # packet offset is varible
+ STLVmWrFlowVar (fv_name="tuple", pkt_offset= 26 )
+
+ # add l3_len_fix before writing fv_rand into IP.len field
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix)
+
+ """
+
+ super(STLVmWrFlowVar, self).__init__()
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.offset_fixup =offset_fixup
+ validate_type('offset_fixup', offset_fixup, int)
+ self.pkt_offset =pkt_offset
+ self.add_val =add_val
+ validate_type('add_val', add_val, int)
+ self.is_big =is_big;
+ validate_type('is_big', is_big, bool)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsWrFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.add_val,self.is_big)
+
+ def compile(self,parent):
+ if type(self.pkt_offset)==str:
+ t=parent._name_to_offset(self.pkt_offset)
+ self.pkt_offset = t[0]
+
+class STLVmWrMaskFlowVar(CTRexVmDescBase):
+ def __init__(self, fv_name, pkt_offset, pkt_cast_size=1, mask=0xff, shift=0, add_value=0, offset_fixup=0, is_big=True):
+
+ """
+ Write a stream variable into a packet field with some operations.
+ Using this instruction, the variable size and the field can have different sizes.
+
+ Pseudocode of this code::
+
+ uint32_t val=(cast_to_size)rd_from_variable("name") # read flow-var
+ val+=m_add_value # add value
+
+ if (m_shift>0) { # shift
+ val=val<<m_shift
+ }else{
+ if (m_shift<0) {
+ val=val>>(-m_shift)
+ }
+ }
+
+ pkt_val=rd_from_pkt(pkt_offset) # RMW to the packet
+ pkt_val = (pkt_val & ~m_mask) | (val & m_mask)
+ wr_to_pkt(pkt_offset,pkt_val)
+
+
+ :parameters:
+ fv_name : string
+ The stream variable name to write to a packet field
+
+ pkt_cast_size : uint8_t
+ The size in bytes of the packet field
+
+
+ mask : uint32_t
+ The mask of the field. 1 means to write. 0 don't care
+
+ shift : uint8_t
+ How many bits to shift
+
+ pkt_offset : string or in
+ the name of the field or offset in byte from packet start.
+
+ offset_fixup : int
+ how many bytes to go forward. In case of a negative value go backward
+
+ add_val : int
+ value to add to stream variable before writing it to packet field. can be used as a constant offset
+
+ is_big : bool
+ how to write the variable to the the packet. is it big-edian or little edian
+
+ Example 1 - Cast from uint16_t (var) to uint8_t (pkt)::
+
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2,
+ op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 11,
+ pkt_cast_size=1,
+ mask=0xff) # mask command ->write it as one byte
+ ]
+ )
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ Example 2 - Change MSB of uint16_t variable::
+
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2, op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 10,
+ pkt_cast_size=2,
+ mask=0xff00,
+ shift=8) # take the var shift it 8 (x256) write only to LSB
+ ]
+ )
+
+
+
+ Example 3 - Every 2 packets, change the MAC (shift right)::
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=2, op="dec",step=1),
+ STLVmWrMaskFlowVar(fv_name="mac_src",
+ pkt_offset= 10,
+ pkt_cast_size=1,
+ mask=0x1,
+ shift=-1) # take var mac_src>>1 and write the LSB every two packet there should be a change
+ ]
+ )
+
+
+ """
+
+ super(STLVmWrMaskFlowVar, self).__init__()
+ self.name =fv_name
+ validate_type('fv_name', fv_name, str)
+ self.offset_fixup =offset_fixup
+ validate_type('offset_fixup', offset_fixup, int)
+ self.pkt_offset =pkt_offset
+ self.pkt_cast_size =pkt_cast_size
+ validate_type('pkt_cast_size', pkt_cast_size, int)
+ if not (pkt_cast_size in [1,2,4]):
+ raise CTRexPacketBuildException(-10,"not valid cast size");
+
+ self.mask = mask
+ validate_type('mask', mask, int)
+ self.shift = shift
+ validate_type('shift', shift, int)
+ self.add_value = add_value
+ validate_type('add_value', add_value, int)
+
+ self.is_big =is_big;
+ validate_type('is_big', is_big, bool)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsWrMaskFlowVar(self.name,self.pkt_offset+self.offset_fixup,self.pkt_cast_size,self.mask,self.shift,self.add_value,self.is_big)
+
+ def compile(self,parent):
+ if type(self.pkt_offset)==str:
+ t=parent._name_to_offset(self.pkt_offset)
+ self.pkt_offset = t[0]
+
+
+class STLVmTrimPktSize(CTRexVmDescBase):
+ """
+ Trim the packet size by the stream variable size. This instruction only changes the total packet size, and does not repair the fields to match the new size.
+
+
+ :parameters:
+ fv_name : string
+ Stream variable name. The value of this variable is the new total packet size.
+
+
+ For Example::
+
+ def create_stream (self):
+ # pkt
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4));
+ base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64,
+ max_value=len(base_pkt),
+ size=2, op="inc"),
+
+ STLVmTrimPktSize("fv_rand"), # change total packet size <<<
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "IP.len",
+ add_val=l3_len_fix), # fix ip len
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "UDP.len",
+ add_val=l4_len_fix) # fix udp len
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ mode = STLTXCont())
+
+
+ """
+
+ def __init__(self,fv_name):
+ super(STLVmTrimPktSize, self).__init__()
+ self.name = fv_name
+ validate_type('fv_name', fv_name, str)
+
+ def get_var_ref (self):
+ return self.name
+
+ def get_obj (self):
+ return CTRexVmInsTrimPktSize(self.name)
+
+
+
+class STLVmTupleGen(CTRexVmDescBase):
+ def __init__(self,name, ip_min="0.0.0.1", ip_max="0.0.0.10", port_min=1025, port_max=65535, limit_flows=100000, flags=0):
+ """
+ Generate a struct with two variables: ``var_name.ip`` as uint32_t and ``var_name.port`` as uint16_t
+ The variables are dependent. When the ip variable value reaches its maximum, the port is incremented.
+
+ For:
+
+ * ip_min = 10.0.0.1
+ * ip_max = 10.0.0.5
+ * port_min = 1025
+ * port_max = 1028
+ * limit_flows = 10
+
+ The result:
+
+ +------------+------------+-----------+
+ | ip | port | flow_id |
+ +============+============+===========+
+ | 10.0.0.1 | 1025 | 1 |
+ +------------+------------+-----------+
+ | 10.0.0.2 | 1025 | 2 |
+ +------------+------------+-----------+
+ | 10.0.0.3 | 1025 | 3 |
+ +------------+------------+-----------+
+ | 10.0.0.4 | 1025 | 4 |
+ +------------+------------+-----------+
+ | 10.0.0.5 | 1025 | 5 |
+ +------------+------------+-----------+
+ | 10.0.0.1 | 1026 | 6 |
+ +------------+------------+-----------+
+ | 10.0.0.2 | 1026 | 7 |
+ +------------+------------+-----------+
+ | 10.0.0.3 | 1026 | 8 |
+ +------------+------------+-----------+
+ | 10.0.0.4 | 1026 | 9 |
+ +------------+------------+-----------+
+ | 10.0.0.5 | 1026 | 10 |
+ +------------+------------+-----------+
+ | 10.0.0.1 | 1025 | 1 |
+ +------------+------------+-----------+
+
+
+ :parameters:
+ name : string
+ Name of the stream struct.
+
+ ip_min : string or int
+ Min value of the ip value. Number or IPv4 format.
+
+ ip_max : string or int
+ Max value of the ip value. Number or IPv4 format.
+
+ port_min : int
+ Min value of port variable.
+
+ port_max : int
+ Max value of port variable.
+
+ limit_flows : int
+ Limit of number of flows.
+
+ flags : 0
+
+ ="0.0.0.10", port_min=1025, port_max=65535, limit_flows=100000, flags=0
+
+ .. code-block:: python
+
+ # Example5
+
+ def create_stream (self):
+ # pkt
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4));
+ base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64,
+ max_value=len(base_pkt),
+ size=2, op="inc"),
+
+ STLVmTrimPktSize("fv_rand"), # change total packet size <<<
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "IP.len",
+ add_val=l3_len_fix), # fix ip len
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="fv_rand",
+ pkt_offset= "UDP.len",
+ add_val=l4_len_fix) # fix udp len
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ mode = STLTXCont())
+
+
+ """
+
+ super(STLVmTupleGen, self).__init__()
+ self.name = name
+ validate_type('name', name, str)
+ self.ip_min = convert_val(ip_min);
+ self.ip_max = convert_val(ip_max);
+ self.port_min = port_min;
+ check_for_int (port_min)
+ self.port_max = port_max;
+ check_for_int(port_max)
+ self.limit_flows = limit_flows;
+ check_for_int(limit_flows)
+ self.flags =flags;
+ check_for_int(flags)
+
+ def get_var_name(self):
+ return [self.name+".ip",self.name+".port"]
+
+ def get_obj (self):
+ return CTRexVmInsTupleGen(self.name, self.ip_min, self.ip_max, self.port_min, self.port_max, self.limit_flows, self.flags);
+
+
+################################################################################################
+
+class STLPktBuilder(CTrexPktBuilderInterface):
+
+ def __init__(self, pkt = None, pkt_buffer = None, vm = None, path_relative_to_profile = False, build_raw = False, remove_fcs = True):
+ """
+
+ This class defines a method for building a template packet and Field Engine using the Scapy package.
+ Using this class the user can also define how TRex will handle the packet by specifying the Field engine settings.
+ The pkt can be a Scapy pkt or pcap file name.
+ If using a pcap file, and path_relative_to_profile is True, then the function loads the pcap file from a path relative to the profile.
+
+
+ .. code-block:: python
+
+ # Example6
+
+ # packet is scapy
+ STLPktBuilder( pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/(10*'x') )
+
+
+ # packet is taken from pcap file relative to python
+ STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap")
+
+ # packet is taken from pcap file relative to profile file
+ STLPktBuilder( pkt ="stl/yaml/udp_64B_no_crc.pcap",
+ path_relative_to_profile = True )
+
+
+ vm = STLScVmRaw( [ STLVmTupleGen ( ip_min="16.0.0.1", ip_max="16.0.0.2",
+ port_min=1025, port_max=65535,
+ name="tuple"), # define tuple gen
+
+ STLVmWrFlowVar (fv_name="tuple.ip", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) #write udp.port
+ ]
+ )
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ STLPktBuilder(pkt = base_pkt/pad, vm= vm)
+
+
+ :parameters:
+
+ pkt : string,
+ Scapy object or pcap filename.
+
+ pkt_buffer : bytes
+ Packet as buffer.
+
+ vm : list or base on :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw`
+ List of instructions to manipulate packet fields.
+
+ path_relative_to_profile : bool
+ If pkt is a pcap file, determines whether to load it relative to profile file.
+
+ build_raw : bool
+ If a buffer is specified (by pkt_buffer), determines whether to build Scapy. Useful in cases where it is necessary to take the offset from Scapy.
+
+ remove_fcs : bool
+ If a buffer is specified (by pkt_buffer), determines whether to remove FCS.
+
+
+
+ """
+ super(STLPktBuilder, self).__init__()
+
+ validate_type('pkt', pkt, (type(None), str, Packet))
+ validate_type('pkt_buffer', pkt_buffer, (type(None), bytes))
+
+ self.pkt = None # as input
+ self.pkt_raw = None # from raw pcap file
+ self.vm_scripts = [] # list of high level instructions
+ self.vm_low_level = None
+ self.is_pkt_built = False
+ self.metadata=""
+ self.path_relative_to_profile = path_relative_to_profile
+ self.remove_fcs = remove_fcs
+ self.is_binary_source = pkt_buffer != None
+
+
+ if pkt != None and pkt_buffer != None:
+ raise CTRexPacketBuildException(-15, "Packet builder cannot be provided with both pkt and pkt_buffer.")
+
+ # process packet
+ if pkt != None:
+ self.set_packet(pkt)
+
+ elif pkt_buffer != None:
+ self.set_pkt_as_str(pkt_buffer)
+
+ # process VM
+ if vm != None:
+ if not isinstance(vm, (STLScVmRaw, list)):
+ raise CTRexPacketBuildException(-14, "Bad value for variable vm.")
+
+ self.add_command(vm if isinstance(vm, STLScVmRaw) else STLScVmRaw(vm))
+
+ # raw source build to see MAC presence/ fields offset by name in VM
+ if build_raw and self.pkt_raw and not self.pkt:
+ self.__lazy_build_packet()
+
+ # if we have packet and VM - compile now
+ if (self.pkt or self.pkt_raw) and (self.vm_scripts):
+ self.compile()
+
+
+ def dump_vm_data_as_yaml(self):
+ print(yaml.dump(self.get_vm_data(), default_flow_style=False))
+
+ def get_vm_data(self):
+ """
+ Dumps the instructions
+
+ :parameters:
+ None
+
+ :return:
+ + json object of instructions
+
+ :raises:
+ + :exc:`AssertionError`, in case VM is not compiled (is None).
+ """
+
+ assert self.vm_low_level is not None, 'vm_low_level is None, please use compile()'
+
+ return self.vm_low_level.get_json()
+
+ def dump_pkt(self, encode = True):
+ """
+ Dumps the packet as a decimal array of bytes (each item x gets value in range 0-255)
+
+ :parameters:
+ encode : bool
+ Encode using base64. (disable for debug)
+
+ Default: **True**
+
+ :return:
+ + packet representation as array of bytes
+
+ :raises:
+ + :exc:`AssertionError`, in case packet is empty.
+
+ """
+ pkt_buf = self._get_pkt_as_str()
+ return {'binary': base64.b64encode(pkt_buf).decode() if encode else pkt_buf,
+ 'meta': self.metadata}
+
+
+ def dump_pkt_to_pcap(self, file_path):
+ wrpcap(file_path, self._get_pkt_as_str())
+
+ def add_command (self, script):
+ self.vm_scripts.append(script.clone());
+
+ def dump_scripts (self):
+ self.vm_low_level.dump_as_yaml()
+
+ def dump_as_hex (self):
+ pkt_buf = self._get_pkt_as_str()
+ print(hexdump(pkt_buf))
+
+ def pkt_layers_desc (self):
+ """
+ Return layer description in this format: IP:TCP:Pyload
+
+ """
+ pkt_buf = self._get_pkt_as_str()
+ return self.pkt_layers_desc_from_buffer(pkt_buf)
+
+ @staticmethod
+ def pkt_layers_desc_from_buffer (pkt_buf):
+ scapy_pkt = Ether(pkt_buf);
+ pkt_utl = CTRexScapyPktUtl(scapy_pkt);
+ return pkt_utl.get_pkt_layers()
+
+
+ def set_pkt_as_str (self, pkt_buffer):
+ validate_type('pkt_buffer', pkt_buffer, bytes)
+ self.pkt_raw = pkt_buffer
+
+
+ def set_pcap_file (self, pcap_file):
+ """
+ Load raw pcap file into a buffer. Loads only the first packet.
+
+ :parameters:
+ pcap_file : file_name
+
+ :raises:
+ + :exc:`AssertionError`, if packet is empty.
+
+ """
+ f_path = self._get_pcap_file_path (pcap_file)
+
+ p=RawPcapReader(f_path)
+ was_set = False
+
+ for pkt in p:
+ was_set=True;
+ self.pkt_raw = pkt[0]
+ break
+ if not was_set :
+ raise CTRexPacketBuildException(-14, "No buffer inside the pcap file {0}".format(f_path))
+
+ def to_pkt_dump(self):
+ p = self.pkt
+ if p and isinstance(p, Packet):
+ p.show2();
+ hexdump(p);
+ return;
+ p = self.pkt_raw;
+ if p:
+ scapy_pkt = Ether(p);
+ scapy_pkt.show2();
+ hexdump(p);
+
+
+ def set_packet (self, pkt):
+ """
+ Scapy packet
+
+ Example::
+
+ pkt =Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/('x'*10)
+
+ """
+ if isinstance(pkt, Packet):
+ self.pkt = pkt;
+ else:
+ if isinstance(pkt, str):
+ self.set_pcap_file(pkt)
+ else:
+ raise CTRexPacketBuildException(-14, "bad packet" )
+
+ def is_default_src_mac (self):
+ if self.is_binary_source:
+ return True
+ p = self.pkt
+ if isinstance(p, Packet):
+ if isinstance(p,Ether):
+ if 'src' in p.fields :
+ return False
+ return True
+
+ def is_default_dst_mac (self):
+ if self.is_binary_source:
+ return True
+ p = self.pkt
+ if isinstance(p, Packet):
+ if isinstance(p,Ether):
+ if 'dst' in p.fields :
+ return False
+ return True
+
+ def compile (self):
+ if self.pkt == None and self.pkt_raw == None:
+ raise CTRexPacketBuildException(-14, "Packet is empty")
+
+
+ self.vm_low_level = CTRexVmEngine()
+
+ # compile the VM
+ for sc in self.vm_scripts:
+ if isinstance(sc, STLScVmRaw):
+ self._compile_raw(sc)
+
+ def get_pkt_len (self):
+ if self.pkt:
+ return len(self.pkt)
+ elif self.pkt_raw:
+ return len(self.pkt_raw)
+ else:
+ raise CTRexPacketBuildException(-14, "Packet is empty")
+
+ ####################################################
+ # private
+
+
+ def _get_pcap_file_path (self,pcap_file_name):
+ f_path = pcap_file_name
+ if os.path.isabs(pcap_file_name):
+ f_path = pcap_file_name
+ else:
+ if self.path_relative_to_profile:
+ p = self._get_path_relative_to_profile () # loader
+ if p :
+ f_path=os.path.abspath(os.path.join(os.path.dirname(p),pcap_file_name))
+
+ return f_path
+
+
+ def _get_path_relative_to_profile (self):
+ p = inspect.stack()
+ for obj in p:
+ if obj[3]=='get_streams':
+ return obj[1]
+ return None
+
+ def _compile_raw (self,obj):
+
+ # make sure we have varibles once
+ vars={};
+
+ # add it add var to dit
+ for desc in obj.commands:
+ var_names = desc.get_var_name()
+
+ if var_names :
+ for var_name in var_names:
+ if var_name in vars:
+ raise CTRexPacketBuildException(-11,("Variable %s defined twice ") % (var_name) );
+ else:
+ vars[var_name]=1
+
+ # check that all write exits
+ for desc in obj.commands:
+ var_name = desc.get_var_ref()
+ if var_name :
+ if not var_name in vars:
+ raise CTRexPacketBuildException(-11,("Variable %s does not exist ") % (var_name) );
+ desc.compile(self);
+
+ for desc in obj.commands:
+ self.vm_low_level.add_ins(desc.get_obj());
+
+ # set split_by_var
+ if obj.split_by_field :
+ validate_type('obj.split_by_field', obj.split_by_field, str)
+ self.vm_low_level.split_by_var = obj.split_by_field
+
+ #set cache size
+ if obj.cache_size :
+ validate_type('obj.cache_size', obj.cache_size, int)
+ self.vm_low_level.cache_size = obj.cache_size
+
+
+
+ # lazy packet build only on demand
+ def __lazy_build_packet (self):
+ # alrady built ? bail out
+ if self.is_pkt_built:
+ return
+
+ # for buffer, promote to a scapy packet
+ if self.pkt_raw:
+ self.pkt = Ether(self.pkt_raw)
+ self.pkt_raw = None
+
+ # regular scapy packet
+ elif not self.pkt:
+ # should not reach here
+ raise CTRexPacketBuildException(-11, 'Empty packet')
+
+ if self.remove_fcs and self.pkt.lastlayer().name == 'Padding':
+ self.pkt.lastlayer().underlayer.remove_payload()
+
+ self.pkt.build()
+ self.is_pkt_built = True
+
+ def _pkt_layer_offset (self,layer_name):
+
+ self.__lazy_build_packet()
+
+ p_utl=CTRexScapyPktUtl(self.pkt);
+ return p_utl.get_layer_offet_by_str(layer_name)
+
+ def _name_to_offset(self,field_name):
+
+ self.__lazy_build_packet()
+
+ p_utl=CTRexScapyPktUtl(self.pkt);
+ return p_utl.get_field_offet_by_str(field_name)
+
+ def _get_pkt_as_str(self):
+
+ if self.pkt:
+ return bytes(self.pkt)
+
+ if self.pkt_raw:
+ return self.pkt_raw
+
+ raise CTRexPacketBuildException(-11, 'Empty packet');
+
+ def _add_tuple_gen(self,tuple_gen):
+
+ pass;
+
+
+def STLIPRange (src = None,
+ dst = None,
+ fix_chksum = True):
+
+ vm = []
+
+ if src:
+ vm += [
+ STLVmFlowVar(name="src", min_value = src['start'], max_value = src['end'], size = 4, op = "inc", step = src['step']),
+ STLVmWrFlowVar(fv_name="src",pkt_offset= "IP.src")
+ ]
+
+ if dst:
+ vm += [
+ STLVmFlowVar(name="dst", min_value = dst['start'], max_value = dst['end'], size = 4, op = "inc", step = dst['step']),
+ STLVmWrFlowVar(fv_name="dst",pkt_offset= "IP.dst")
+ ]
+
+ if fix_chksum:
+ vm.append( STLVmFixIpv4(offset = "IP"))
+
+
+ return vm
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
new file mode 100644
index 00000000..cec3761f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
@@ -0,0 +1,794 @@
+
+from collections import namedtuple, OrderedDict
+
+from .trex_stl_packet_builder_scapy import STLPktBuilder
+from .trex_stl_streams import STLStream
+from .trex_stl_types import *
+from . import trex_stl_stats
+from .utils.constants import FLOW_CTRL_DICT_REVERSED
+
+import base64
+import copy
+from datetime import datetime, timedelta
+
+StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata'])
+
+########## utlity ############
+def mult_to_factor (mult, max_bps_l2, max_pps, line_util):
+ if mult['type'] == 'raw':
+ return mult['value']
+
+ if mult['type'] == 'bps':
+ return mult['value'] / max_bps_l2
+
+ if mult['type'] == 'pps':
+ return mult['value'] / max_pps
+
+ if mult['type'] == 'percentage':
+ return mult['value'] / line_util
+
+
+# describes a single port
+class Port(object):
+ STATE_DOWN = 0
+ STATE_IDLE = 1
+ STATE_STREAMS = 2
+ STATE_TX = 3
+ STATE_PAUSE = 4
+ STATE_PCAP_TX = 5
+
+ MASK_ALL = ((1 << 64) - 1)
+
+ PortState = namedtuple('PortState', ['state_id', 'state_name'])
+ STATES_MAP = {STATE_DOWN: "DOWN",
+ STATE_IDLE: "IDLE",
+ STATE_STREAMS: "IDLE",
+ STATE_TX: "TRANSMITTING",
+ STATE_PAUSE: "PAUSE",
+ STATE_PCAP_TX : "TRANSMITTING"}
+
+
+ def __init__ (self, port_id, user, comm_link, session_id, info):
+ self.port_id = port_id
+ self.state = self.STATE_IDLE
+ self.handler = None
+ self.comm_link = comm_link
+ self.transmit = comm_link.transmit
+ self.transmit_batch = comm_link.transmit_batch
+ self.user = user
+
+ self.info = dict(info)
+
+ self.streams = {}
+ self.profile = None
+ self.session_id = session_id
+ self.attr = {}
+
+ self.port_stats = trex_stl_stats.CPortStats(self)
+
+ self.next_available_id = 1
+ self.tx_stopped_ts = None
+ self.has_rx_streams = False
+
+ self.owner = ''
+ self.last_factor_type = None
+
+ # decorator to verify port is up
+ def up(func):
+ def func_wrapper(*args):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ return func(*args)
+
+ return func_wrapper
+
+ # owned
+ def owned(func):
+ def func_wrapper(*args):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ if not port.is_acquired():
+ return port.err("{0} - port is not owned".format(func.__name__))
+
+ return func(*args)
+
+ return func_wrapper
+
+
+ # decorator to check server is readable (port not down and etc.)
+ def writeable(func):
+ def func_wrapper(*args, **kwargs):
+ port = args[0]
+
+ if not port.is_up():
+ return port.err("{0} - port is down".format(func.__name__))
+
+ if not port.is_acquired():
+ return port.err("{0} - port is not owned".format(func.__name__))
+
+ if not port.is_writeable():
+ return port.err("{0} - port is not in a writeable state".format(func.__name__))
+
+ return func(*args, **kwargs)
+
+ return func_wrapper
+
+
+
+ def err(self, msg):
+ return RC_ERR("port {0} : {1}\n".format(self.port_id, msg))
+
+ def ok(self, data = ""):
+ return RC_OK(data)
+
+ def get_speed_bps (self):
+ return (self.info['speed'] * 1000 * 1000 * 1000)
+
+ def get_formatted_speed (self):
+ return "{0} Gbps".format(self.info['speed'])
+
+ def is_acquired(self):
+ return (self.handler != None)
+
+ def is_up (self):
+ return (self.state != self.STATE_DOWN)
+
+ def is_active(self):
+ return (self.state == self.STATE_TX ) or (self.state == self.STATE_PAUSE) or (self.state == self.STATE_PCAP_TX)
+
+ def is_transmitting (self):
+ return (self.state == self.STATE_TX) or (self.state == self.STATE_PCAP_TX)
+
+ def is_paused (self):
+ return (self.state == self.STATE_PAUSE)
+
+ def is_writeable (self):
+ # operations on port can be done on state idle or state streams
+ return ((self.state == self.STATE_IDLE) or (self.state == self.STATE_STREAMS))
+
+ def get_owner (self):
+ if self.is_acquired():
+ return self.user
+ else:
+ return self.owner
+
+ def __allocate_stream_id (self):
+ id = self.next_available_id
+ self.next_available_id += 1
+ return id
+
+
+ # take the port
+ @up
+ def acquire(self, force = False, sync_streams = True):
+ params = {"port_id": self.port_id,
+ "user": self.user,
+ "session_id": self.session_id,
+ "force": force}
+
+ rc = self.transmit("acquire", params)
+ if not rc:
+ return self.err(rc.err())
+
+ self.handler = rc.data()
+
+ if sync_streams:
+ return self.sync_streams()
+ else:
+ return self.ok()
+
+
+ # sync all the streams with the server
+ @up
+ def sync_streams (self):
+ params = {"port_id": self.port_id}
+
+ rc = self.transmit("get_all_streams", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ for k, v in rc.data()['streams'].items():
+ self.streams[k] = {'next_id': v['next_stream_id'],
+ 'pkt' : base64.b64decode(v['packet']['binary']),
+ 'mode' : v['mode']['type'],
+ 'rate' : STLStream.get_rate_from_field(v['mode']['rate'])}
+ return self.ok()
+
+ # release the port
+ @up
+ def release(self):
+ params = {"port_id": self.port_id,
+ "handler": self.handler}
+
+ rc = self.transmit("release", params)
+
+ if rc.good():
+
+ self.handler = None
+ self.owner = ''
+
+ return self.ok()
+ else:
+ return self.err(rc.err())
+
+
+
+ @up
+ def sync(self):
+
+ params = {"port_id": self.port_id}
+
+ rc = self.transmit("get_port_status", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ # sync the port
+ port_state = rc.data()['state']
+
+ if port_state == "DOWN":
+ self.state = self.STATE_DOWN
+ elif port_state == "IDLE":
+ self.state = self.STATE_IDLE
+ elif port_state == "STREAMS":
+ self.state = self.STATE_STREAMS
+ elif port_state == "TX":
+ self.state = self.STATE_TX
+ elif port_state == "PAUSE":
+ self.state = self.STATE_PAUSE
+ elif port_state == "PCAP_TX":
+ self.state = self.STATE_PCAP_TX
+ else:
+ raise Exception("port {0}: bad state received from server '{1}'".format(self.port_id, port_state))
+
+ self.owner = rc.data()['owner']
+
+ self.next_available_id = int(rc.data()['max_stream_id']) + 1
+
+ # attributes
+ self.attr = rc.data()['attr']
+ if 'speed' in rc.data():
+ self.info['speed'] = rc.data()['speed'] // 1000
+
+ return self.ok()
+
+
+
+ # add streams
+ @writeable
+ def add_streams (self, streams_list):
+
+ # listify
+ streams_list = streams_list if isinstance(streams_list, list) else [streams_list]
+
+ lookup = {}
+
+ # allocate IDs
+ for stream in streams_list:
+
+ # allocate stream id
+ stream_id = stream.get_id() if stream.get_id() is not None else self.__allocate_stream_id()
+ if stream_id in self.streams:
+ return self.err('Stream ID: {0} already exists'.format(stream_id))
+
+ # name
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ if name in lookup:
+ return self.err("multiple streams with duplicate name: '{0}'".format(name))
+ lookup[name] = stream_id
+
+ batch = []
+ for stream in streams_list:
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ stream_id = lookup[name]
+ next_id = -1
+
+ next = stream.get_next()
+ if next:
+ if not next in lookup:
+ return self.err("stream dependency error - unable to find '{0}'".format(next))
+ next_id = lookup[next]
+
+ stream_json = stream.to_json()
+ stream_json['next_stream_id'] = next_id
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "stream_id": stream_id,
+ "stream": stream_json}
+
+ cmd = RpcCmdData('add_stream', params, 'core')
+ batch.append(cmd)
+
+
+ rc = self.transmit_batch(batch)
+
+ ret = RC()
+ for i, single_rc in enumerate(rc):
+ if single_rc.rc:
+ stream_id = batch[i].params['stream_id']
+ next_id = batch[i].params['stream']['next_stream_id']
+ self.streams[stream_id] = {'next_id' : next_id,
+ 'pkt' : streams_list[i].get_pkt(),
+ 'mode' : streams_list[i].get_mode(),
+ 'rate' : streams_list[i].get_rate(),
+ 'has_flow_stats' : streams_list[i].has_flow_stats()}
+
+ ret.add(RC_OK(data = stream_id))
+
+ self.has_rx_streams = self.has_rx_streams or streams_list[i].has_flow_stats()
+
+ else:
+ ret.add(RC(*single_rc))
+
+ self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
+
+ return ret if ret else self.err(str(ret))
+
+
+
+ # remove stream from port
+ @writeable
+ def remove_streams (self, stream_id_list):
+
+ # single element to list
+ stream_id_list = stream_id_list if isinstance(stream_id_list, list) else [stream_id_list]
+
+ # verify existance
+ if not all([stream_id in self.streams for stream_id in stream_id_list]):
+ return self.err("stream {0} does not exists".format(stream_id))
+
+ batch = []
+
+ for stream_id in stream_id_list:
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "stream_id": stream_id}
+
+ cmd = RpcCmdData('remove_stream', params, 'core')
+ batch.append(cmd)
+
+
+ rc = self.transmit_batch(batch)
+ for i, single_rc in enumerate(rc):
+ if single_rc:
+ id = batch[i].params['stream_id']
+ del self.streams[id]
+
+ self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
+
+ # recheck if any RX stats streams present on the port
+ self.has_rx_streams = any([stream['has_flow_stats'] for stream in self.streams.values()])
+
+ return self.ok() if rc else self.err(rc.err())
+
+
+ # remove all the streams
+ @writeable
+ def remove_all_streams (self):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("remove_all_streams", params)
+ if not rc:
+ return self.err(rc.err())
+
+ self.streams = {}
+
+ self.state = self.STATE_IDLE
+ self.has_rx_streams = False
+
+ return self.ok()
+
+
+ # get a specific stream
+ def get_stream (self, stream_id):
+ if stream_id in self.streams:
+ return self.streams[stream_id]
+ else:
+ return None
+
+ def get_all_streams (self):
+ return self.streams
+
+
+ @writeable
+ def start (self, mul, duration, force, mask):
+
+ if self.state == self.STATE_IDLE:
+ return self.err("unable to start traffic - no streams attached to port")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "mul": mul,
+ "duration": duration,
+ "force": force,
+ "core_mask": mask if mask is not None else self.MASK_ALL}
+
+ # must set this before to avoid race with the async response
+ last_state = self.state
+ self.state = self.STATE_TX
+
+ rc = self.transmit("start_traffic", params)
+
+ if rc.bad():
+ self.state = last_state
+ return self.err(rc.err())
+
+ # save this for TUI
+ self.last_factor_type = mul['type']
+
+ return self.ok()
+
+
+ # stop traffic
+ # with force ignores the cached state and sends the command
+ @owned
+ def stop (self, force = False):
+
+ # if not is not active and not force - go back
+ if not self.is_active() and not force:
+ return self.ok()
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("stop_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_STREAMS
+ self.last_factor_type = None
+
+ # timestamp for last tx
+ self.tx_stopped_ts = datetime.now()
+
+ return self.ok()
+
+
+ # return True if port has any stream configured with RX stats
+ def has_rx_enabled (self):
+ return self.has_rx_streams
+
+
+ # return true if rx_delay_ms has passed since the last port stop
+ def has_rx_delay_expired (self, rx_delay_ms):
+ assert(self.has_rx_enabled())
+
+ # if active - it's not safe to remove RX filters
+ if self.is_active():
+ return False
+
+ # either no timestamp present or time has already passed
+ return not self.tx_stopped_ts or (datetime.now() - self.tx_stopped_ts) > timedelta(milliseconds = rx_delay_ms)
+
+
+ @writeable
+ def remove_rx_filters (self):
+ assert(self.has_rx_enabled())
+
+ if self.state == self.STATE_IDLE:
+ return self.ok()
+
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("remove_rx_filters", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ return self.ok()
+
+ @owned
+ def pause (self):
+
+ if (self.state == self.STATE_PCAP_TX) :
+ return self.err("pause is not supported during PCAP TX")
+
+ if (self.state != self.STATE_TX) :
+ return self.err("port is not transmitting")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("pause_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_PAUSE
+
+ return self.ok()
+
+ @owned
+ def resume (self):
+
+ if (self.state != self.STATE_PAUSE) :
+ return self.err("port is not in pause mode")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ # only valid state after stop
+
+ rc = self.transmit("resume_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_TX
+
+ return self.ok()
+
+ @owned
+ def update (self, mul, force):
+
+ if (self.state == self.STATE_PCAP_TX) :
+ return self.err("update is not supported during PCAP TX")
+
+ if (self.state != self.STATE_TX) :
+ return self.err("port is not transmitting")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "mul": mul,
+ "force": force}
+
+ rc = self.transmit("update_traffic", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ # save this for TUI
+ self.last_factor_type = mul['type']
+
+ return self.ok()
+
+ @owned
+ def validate (self):
+
+ if (self.state == self.STATE_IDLE):
+ return self.err("no streams attached to port")
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("validate", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.profile = rc.data()
+
+ return self.ok()
+
+
+ @owned
+ def set_attr (self, attr_dict):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "attr": attr_dict}
+
+ rc = self.transmit("set_port_attr", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+
+ #self.attr.update(attr_dict)
+
+ return self.ok()
+
+ @writeable
+ def push_remote (self, pcap_filename, ipg_usec, speedup, count, duration, is_dual, slave_handler):
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id,
+ "pcap_filename": pcap_filename,
+ "ipg_usec": ipg_usec if ipg_usec is not None else -1,
+ "speedup": speedup,
+ "count": count,
+ "duration": duration,
+ "is_dual": is_dual,
+ "slave_handler": slave_handler}
+
+ rc = self.transmit("push_remote", params)
+ if rc.bad():
+ return self.err(rc.err())
+
+ self.state = self.STATE_PCAP_TX
+ return self.ok()
+
+
+ def get_profile (self):
+ return self.profile
+
+
+ def print_profile (self, mult, duration):
+ if not self.get_profile():
+ return
+
+ rate = self.get_profile()['rate']
+ graph = self.get_profile()['graph']
+
+ print(format_text("Profile Map Per Port\n", 'underline', 'bold'))
+
+ factor = mult_to_factor(mult, rate['max_bps_l2'], rate['max_pps'], rate['max_line_util'])
+
+ print("Profile max BPS L2 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l2'], suffix = "bps"),
+ format_num(rate['max_bps_l2'] * factor, suffix = "bps")))
+
+ print("Profile max BPS L1 (base / req): {:^12} / {:^12}".format(format_num(rate['max_bps_l1'], suffix = "bps"),
+ format_num(rate['max_bps_l1'] * factor, suffix = "bps")))
+
+ print("Profile max PPS (base / req): {:^12} / {:^12}".format(format_num(rate['max_pps'], suffix = "pps"),
+ format_num(rate['max_pps'] * factor, suffix = "pps"),))
+
+ print("Profile line util. (base / req): {:^12} / {:^12}".format(format_percentage(rate['max_line_util']),
+ format_percentage(rate['max_line_util'] * factor)))
+
+
+ # duration
+ exp_time_base_sec = graph['expected_duration'] / (1000 * 1000)
+ exp_time_factor_sec = exp_time_base_sec / factor
+
+ # user configured a duration
+ if duration > 0:
+ if exp_time_factor_sec > 0:
+ exp_time_factor_sec = min(exp_time_factor_sec, duration)
+ else:
+ exp_time_factor_sec = duration
+
+
+ print("Duration (base / req): {:^12} / {:^12}".format(format_time(exp_time_base_sec),
+ format_time(exp_time_factor_sec)))
+ print("\n")
+
+ # generate port info
+ def get_info (self):
+ info = dict(self.info)
+
+ info['status'] = self.get_port_state_name()
+
+ if 'link' in self.attr:
+ info['link'] = 'UP' if self.attr['link']['up'] else 'DOWN'
+ else:
+ info['link'] = 'N/A'
+
+ if 'fc' in self.attr:
+ info['fc'] = FLOW_CTRL_DICT_REVERSED.get(self.attr['fc']['mode'], 'N/A')
+ else:
+ info['fc'] = 'N/A'
+
+ if 'promiscuous' in self.attr:
+ info['prom'] = "on" if self.attr['promiscuous']['enabled'] else "off"
+ else:
+ info['prom'] = "N/A"
+
+ if 'description' not in info:
+ info['description'] = "N/A"
+
+ if 'is_fc_supported' in info:
+ info['fc_supported'] = 'yes' if info['is_fc_supported'] else 'no'
+ else:
+ info['fc_supported'] = 'N/A'
+
+ if 'is_led_supported' in info:
+ info['led_change_supported'] = 'yes' if info['is_led_supported'] else 'no'
+ else:
+ info['led_change_supported'] = 'N/A'
+
+ if 'is_link_supported' in info:
+ info['link_change_supported'] = 'yes' if info['is_link_supported'] else 'no'
+ else:
+ info['link_change_supported'] = 'N/A'
+
+ if 'is_virtual' in info:
+ info['is_virtual'] = 'yes' if info['is_virtual'] else 'no'
+ else:
+ info['is_virtual'] = 'N/A'
+
+ return info
+
+
+ def get_port_state_name(self):
+ return self.STATES_MAP.get(self.state, "Unknown")
+
+ ################# stats handler ######################
+ def generate_port_stats(self):
+ return self.port_stats.generate_stats()
+
+ def generate_port_status(self):
+
+ info = self.get_info()
+
+ return {"driver": info['driver'],
+ "description": info.get('description', 'N/A')[:18],
+ "HW src mac": info['hw_macaddr'],
+ "SW src mac": info['src_macaddr'],
+ "SW dst mac": info['dst_macaddr'],
+ "PCI Address": info['pci_addr'],
+ "NUMA Node": info['numa'],
+ "--": "",
+ "---": "",
+ "link speed": "{speed} Gb/s".format(speed=info['speed']),
+ "port status": info['status'],
+ "link status": info['link'],
+ "promiscuous" : info['prom'],
+ "flow ctrl" : info['fc'],
+ }
+
+ def clear_stats(self):
+ return self.port_stats.clear_stats()
+
+
+ def get_stats (self):
+ return self.port_stats.get_stats()
+
+
+ def invalidate_stats(self):
+ return self.port_stats.invalidate()
+
+ ################# stream printout ######################
+ def generate_loaded_streams_sum(self):
+ if self.state == self.STATE_DOWN:
+ return {}
+
+ data = {}
+ for id, obj in self.streams.items():
+
+ # lazy build scapy repr.
+ if not 'pkt_type' in obj:
+ obj['pkt_type'] = STLPktBuilder.pkt_layers_desc_from_buffer(obj['pkt'])
+
+ data[id] = OrderedDict([ ('id', id),
+ ('packet_type', obj['pkt_type']),
+ ('L2 len', len(obj['pkt']) + 4),
+ ('mode', obj['mode']),
+ ('rate', obj['rate']),
+ ('next_stream', obj['next_id'] if not '-1' else 'None')
+ ])
+
+ return {"streams" : OrderedDict(sorted(data.items())) }
+
+
+
+ ################# events handler ######################
+ def async_event_port_job_done (self):
+ # until thread is locked - order is important
+ self.tx_stopped_ts = datetime.now()
+ self.state = self.STATE_STREAMS
+ self.last_factor_type = None
+
+ def async_event_port_attr_changed (self, attr):
+ self.info['speed'] = attr['speed'] // 1000
+ self.attr = attr
+
+ # rest of the events are used for TUI / read only sessions
+ def async_event_port_stopped (self):
+ if not self.is_acquired():
+ self.state = self.STATE_STREAMS
+
+ def async_event_port_paused (self):
+ if not self.is_acquired():
+ self.state = self.STATE_PAUSE
+
+ def async_event_port_started (self):
+ if not self.is_acquired():
+ self.state = self.STATE_TX
+
+ def async_event_port_resumed (self):
+ if not self.is_acquired():
+ self.state = self.STATE_TX
+
+ def async_event_acquired (self, who):
+ self.handler = None
+ self.owner = who
+
+ def async_event_released (self):
+ self.owner = ''
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
new file mode 100644
index 00000000..540bba68
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
@@ -0,0 +1,620 @@
+# -*- coding: utf-8 -*-
+
+"""
+Itay Marom
+Cisco Systems, Inc.
+
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+# simulator can be run as a standalone
+from . import trex_stl_ext
+from .trex_stl_exceptions import *
+from .trex_stl_streams import *
+from .utils import parsing_opts
+from .trex_stl_client import STLClient
+from .utils import pcap
+from trex_stl_lib.trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter, hexdump
+
+from random import randint
+from random import choice as rand_choice
+
+from yaml import YAMLError
+
+import re
+import json
+import argparse
+import tempfile
+import subprocess
+import os
+from operator import itemgetter
+
+class BpSimException(Exception):
+ pass
+
+
+# stateless simulation
+class STLSim(object):
+ MASK_ALL = ((1 << 64) - 1)
+
+ def __init__ (self, bp_sim_path, handler = 0, port_id = 0, api_h = "dummy"):
+
+ self.bp_sim_path = os.path.abspath(bp_sim_path)
+ if not os.path.exists(self.bp_sim_path):
+ raise STLError('BP sim path %s does not exist' % self.bp_sim_path)
+
+ # dummies
+ self.handler = handler
+ self.api_h = api_h
+ self.port_id = port_id
+
+
+ def generate_start_cmd (self, mult = "1", force = True, duration = -1):
+ return {"id":1,
+ "jsonrpc": "2.0",
+ "method": "start_traffic",
+ "params": {"handler": self.handler,
+ "api_h" : self.api_h,
+ "force": force,
+ "port_id": self.port_id,
+ "mul": parsing_opts.decode_multiplier(mult),
+ "duration": duration,
+ "core_mask": self.MASK_ALL}
+ }
+
+
+
+ # run command
+ # input_list - a list of streams or YAML files
+ # outfile - pcap file to save output, if None its a dry run
+ # dp_core_count - how many DP cores to use
+ # dp_core_index - simulate only specific dp core without merging
+ # is_debug - debug or release image
+ # pkt_limit - how many packets to simulate
+ # mult - multiplier
+ # mode - can be 'valgrind, 'gdb', 'json' or 'none'
+ def run (self,
+ input_list,
+ outfile = None,
+ dp_core_count = 1,
+ dp_core_index = None,
+ is_debug = True,
+ pkt_limit = 5000,
+ mult = "1",
+ duration = -1,
+ mode = 'none',
+ silent = False,
+ tunables = None):
+
+ if not mode in ['none', 'gdb', 'valgrind', 'json', 'yaml','pkt','native']:
+ raise STLArgumentError('mode', mode)
+
+ # listify
+ input_list = input_list if isinstance(input_list, list) else [input_list]
+
+ # check streams arguments
+ if not all([isinstance(i, (STLStream, str)) for i in input_list]):
+ raise STLArgumentError('input_list', input_list)
+
+ # split to two type
+ input_files = [x for x in input_list if isinstance(x, str)]
+ stream_list = [x for x in input_list if isinstance(x, STLStream)]
+
+ # handle YAMLs
+ if tunables == None:
+ tunables = {}
+
+ for input_file in input_files:
+ try:
+ if not 'direction' in tunables:
+ tunables['direction'] = self.port_id % 2
+
+ profile = STLProfile.load(input_file, **tunables)
+
+ except STLError as e:
+ s = format_text("\nError while loading profile '{0}'\n".format(input_file), 'bold')
+ s += "\n" + e.brief()
+ raise STLError(s)
+
+ stream_list += profile.get_streams()
+
+
+ # load streams
+ cmds_json = []
+
+ id_counter = 1
+
+ lookup = {}
+
+ # allocate IDs
+ for stream in stream_list:
+ if stream.get_id() is not None:
+ stream_id = stream.get_id()
+ else:
+ stream_id = id_counter
+ id_counter += 1
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ if name in lookup:
+ raise STLError("multiple streams with name: '{0}'".format(name))
+ lookup[name] = stream_id
+
+ # resolve names
+ for stream in stream_list:
+
+ name = stream.get_name() if stream.get_name() is not None else id(stream)
+ stream_id = lookup[name]
+
+ next_id = -1
+ next = stream.get_next()
+ if next:
+ if not next in lookup:
+ raise STLError("stream dependency error - unable to find '{0}'".format(next))
+ next_id = lookup[next]
+
+
+ stream_json = stream.to_json()
+ stream_json['next_stream_id'] = next_id
+
+ cmd = {"id":1,
+ "jsonrpc": "2.0",
+ "method": "add_stream",
+ "params": {"handler": self.handler,
+ "api_h": self.api_h,
+ "port_id": self.port_id,
+ "stream_id": stream_id,
+ "stream": stream_json}
+ }
+
+ cmds_json.append(cmd)
+
+ # generate start command
+ cmds_json.append(self.generate_start_cmd(mult = mult,
+ force = True,
+ duration = duration))
+
+ if mode == 'json':
+ print(json.dumps(cmds_json, indent = 4, separators=(',', ': '), sort_keys = True))
+ return
+ elif mode == 'yaml':
+ print(STLProfile(stream_list).dump_to_yaml())
+ return
+ elif mode == 'pkt':
+ print(STLProfile(stream_list).dump_as_pkt())
+ return
+ elif mode == 'native':
+ print(STLProfile(stream_list).dump_to_code())
+ return
+
+
+ # start simulation
+ self.outfile = outfile
+ self.dp_core_count = dp_core_count
+ self.dp_core_index = dp_core_index
+ self.is_debug = is_debug
+ self.pkt_limit = pkt_limit
+ self.mult = mult
+ self.duration = duration,
+ self.mode = mode
+ self.silent = silent
+
+ self.__run(cmds_json)
+
+
+ # internal run
+ def __run (self, cmds_json):
+
+ # write to temp file
+ f = tempfile.NamedTemporaryFile(delete = False)
+
+ msg = json.dumps(cmds_json).encode()
+
+ f.write(msg)
+ f.close()
+
+ # launch bp-sim
+ try:
+ self.execute_bp_sim(f.name)
+ finally:
+ os.unlink(f.name)
+
+
+
+ def execute_bp_sim (self, json_filename):
+ if self.is_debug:
+ exe = os.path.join(self.bp_sim_path, 'bp-sim-64-debug')
+ else:
+ exe = os.path.join(self.bp_sim_path, 'bp-sim-64')
+
+ if not os.path.exists(exe):
+ raise STLError("'{0}' does not exists, please build it before calling the simulation".format(exe))
+
+
+ cmd = [exe,
+ '--pcap',
+ '--sl',
+ '--cores',
+ str(self.dp_core_count),
+ '--limit',
+ str(self.pkt_limit),
+ '-f',
+ json_filename]
+
+ # out or dry
+ if not self.outfile:
+ cmd += ['--dry']
+ cmd += ['-o', '/dev/null']
+ else:
+ cmd += ['-o', self.outfile]
+
+ if self.dp_core_index != None:
+ cmd += ['--core_index', str(self.dp_core_index)]
+
+ if self.mode == 'valgrind':
+ cmd = ['valgrind', '--leak-check=full', '--error-exitcode=1'] + cmd
+
+ elif self.mode == 'gdb':
+ cmd = ['/usr/bin/gdb', '--args'] + cmd
+
+ print("executing command: '{0}'".format(" ".join(cmd)))
+
+ if self.silent:
+ FNULL = open(os.devnull, 'wb')
+ rc = subprocess.call(cmd, stdout=FNULL)
+ else:
+ rc = subprocess.call(cmd)
+
+ if rc != 0:
+ raise STLError('simulation has failed with error code {0}'.format(rc))
+
+ self.merge_results()
+
+
+ def merge_results (self):
+ if not self.outfile:
+ return
+
+ if self.dp_core_count == 1:
+ return
+
+ if self.dp_core_index != None:
+ return
+
+
+ if not self.silent:
+ print("Mering cores output to a single pcap file...\n")
+ inputs = ["{0}-{1}".format(self.outfile, index) for index in range(0, self.dp_core_count)]
+ pcap.merge_cap_files(inputs, self.outfile, delete_src = True)
+
+
+
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+def unsigned_int (x):
+ x = int(x)
+ if x < 0:
+ raise argparse.ArgumentTypeError("argument must be >= 0")
+
+ return x
+
+def setParserOptions():
+ parser = argparse.ArgumentParser(prog="stl_sim.py")
+
+ parser.add_argument("-f",
+ dest ="input_file",
+ help = "input file in YAML or Python format",
+ type = is_valid_file,
+ required=True)
+
+ parser.add_argument("-o",
+ dest = "output_file",
+ default = None,
+ help = "output file in ERF format")
+
+
+ parser.add_argument("-c", "--cores",
+ help = "DP core count [default is 1]",
+ dest = "dp_core_count",
+ default = 1,
+ type = int,
+ choices = list(range(1, 9)))
+
+ parser.add_argument("-n", "--core_index",
+ help = "Record only a specific core",
+ dest = "dp_core_index",
+ default = None,
+ type = int)
+
+ parser.add_argument("-i", "--port",
+ help = "Simulate a specific port ID [default is 0]",
+ dest = "port_id",
+ default = 0,
+ type = int)
+
+
+ parser.add_argument("-r", "--release",
+ help = "runs on release image instead of debug [default is False]",
+ action = "store_true",
+ default = False)
+
+
+ parser.add_argument("-s", "--silent",
+ help = "runs on silent mode (no stdout) [default is False]",
+ action = "store_true",
+ default = False)
+
+ parser.add_argument("-l", "--limit",
+ help = "limit test total packet count [default is 5000]",
+ default = 5000,
+ type = unsigned_int)
+
+ parser.add_argument('-m', '--multiplier',
+ help = parsing_opts.match_multiplier_help,
+ dest = 'mult',
+ default = "1",
+ type = parsing_opts.match_multiplier_strict)
+
+ parser.add_argument('-d', '--duration',
+ help = "run duration",
+ dest = 'duration',
+ default = -1,
+ type = float)
+
+
+ parser.add_argument('-t',
+ help = 'sets tunable for a profile',
+ dest = 'tunables',
+ default = None,
+ type = parsing_opts.decode_tunables)
+
+ parser.add_argument('-p', '--path',
+ help = "BP sim path",
+ dest = 'bp_sim_path',
+ default = None,
+ type = str)
+
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument("-x", "--valgrind",
+ help = "run under valgrind [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("-g", "--gdb",
+ help = "run under GDB [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--json",
+ help = "generate JSON output only to stdout [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--pkt",
+ help = "Parse the packet and show it as hex",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--yaml",
+ help = "generate YAML from input file [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--native",
+ help = "generate Python code with stateless profile from input file [default is False]",
+ action = "store_true",
+ default = False)
+
+ group.add_argument("--test_multi_core",
+ help = "runs the profile with c=1-8",
+ action = "store_true",
+ default = False)
+
+ return parser
+
+
+def validate_args (parser, options):
+
+ if options.dp_core_index:
+ if not options.dp_core_index in range(0, options.dp_core_count):
+ parser.error("DP core index valid range is 0 to {0}".format(options.dp_core_count - 1))
+
+ # zero is ok - no limit, but other values must be at least as the number of cores
+ if (options.limit != 0) and options.limit < options.dp_core_count:
+ parser.error("limit cannot be lower than number of DP cores")
+
+
+# a more flexible check
+def compare_caps (cap1, cap2, max_diff_sec = (5 * 1e-6)):
+ pkts1 = list(RawPcapReader(cap1))
+ pkts2 = list(RawPcapReader(cap2))
+
+ if len(pkts1) != len(pkts2):
+ print('{0} contains {1} packets vs. {2} contains {3} packets'.format(cap1, len(pkts1), cap2, len(pkts2)))
+ return False
+
+ # to be less strict we define equality if all packets from cap1 exists and in cap2
+ # and vice versa
+ # 'exists' means the same packet with abs(TS1-TS2) < 5nsec
+ # its O(n^2) but who cares, right ?
+ for i, pkt1 in enumerate(pkts1):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ found = None
+ for j, pkt2 in enumerate(pkts2):
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > max_diff_sec:
+ break
+
+ if pkt1[0] == pkt2[0]:
+ found = j
+ break
+
+
+ if found is None:
+ print(format_text("cannot find packet #{0} from {1} in {2}\n".format(i, cap1, cap2), 'bold'))
+ return False
+ else:
+ del pkts2[found]
+
+ return True
+
+
+def hexdiff (d1, d2):
+ rc = []
+
+ if len(d1) != len(d2):
+ return rc
+
+ for i in range(len(d1)):
+ if d1[i] != d2[i]:
+ rc.append(i)
+ return rc
+
+def prettyhex (h, diff_list):
+ if type(h[0]) == str:
+ h = [ord(x) for x in h]
+
+ for i in range(len(h)):
+
+ if i in diff_list:
+ sys.stdout.write("->'0x%02x'<-" % h[i])
+ else:
+ sys.stdout.write(" '0x%02x' " % h[i])
+ if ((i % 9) == 8):
+ print("")
+
+ print("")
+
+# a more strict comparsion 1 <--> 1
+def compare_caps_strict (cap1, cap2, max_diff_sec = (5 * 1e-6)):
+ pkts1 = list(RawPcapReader(cap1))
+ pkts2 = list(RawPcapReader(cap2))
+
+ if len(pkts1) != len(pkts2):
+ print('{0} contains {1} packets vs. {1} contains {2} packets'.format(cap1, len(pkts1), cap2, len(pkts2)))
+ return False
+
+ # a strict check
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > 0.000005: # 5 nsec
+ print(format_text("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'\n".format(cap1, cap2, i, ts1, ts2), 'bold'))
+ return False
+
+ if pkt1[0] != pkt2[0]:
+ print(format_text("RAW error: cap files '{0}', '{1}' differ in cap #{2}\n".format(cap1, cap2, i), 'bold'))
+
+ diff_list = hexdiff(pkt1[0], pkt2[0])
+
+ print("{0} - packet #{1}:\n".format(cap1, i))
+ prettyhex(pkt1[0], diff_list)
+
+ print("\n{0} - packet #{1}:\n".format(cap2, i))
+ prettyhex(pkt2[0], diff_list)
+
+ print("")
+ return False
+
+ return True
+
+
+def test_multi_core (r, options):
+
+ for core_count in range(1, 9):
+ r.run(input_list = options.input_file,
+ outfile = '{0}.cap'.format(core_count),
+ dp_core_count = core_count,
+ is_debug = (not options.release),
+ pkt_limit = options.limit,
+ mult = options.mult,
+ duration = options.duration,
+ mode = 'none',
+ silent = True,
+ tunables = options.tunables)
+
+ print("")
+
+ for core_count in range(1, 9):
+ print(format_text("comparing {0} cores to 1 core:\n".format(core_count), 'underline'))
+ rc = compare_caps_strict('1.cap', '{0}.cap'.format(core_count))
+ if rc:
+ print("[Passed]\n")
+
+ return
+
+
+def main (args = None):
+ parser = setParserOptions()
+ options = parser.parse_args(args = args)
+
+ validate_args(parser, options)
+
+
+
+ if options.valgrind:
+ mode = 'valgrind'
+ elif options.gdb:
+ mode = 'gdb'
+ elif options.json:
+ mode = 'json'
+ elif options.yaml:
+ mode = 'yaml'
+ elif options.native:
+ mode = 'native'
+ elif options.pkt:
+ mode = 'pkt'
+ elif options.test_multi_core:
+ mode = 'test_multi_core'
+ else:
+ mode = 'none'
+
+ try:
+ r = STLSim(bp_sim_path = options.bp_sim_path, port_id = options.port_id)
+
+ if mode == 'test_multi_core':
+ test_multi_core(r, options)
+ else:
+ r.run(input_list = options.input_file,
+ outfile = options.output_file,
+ dp_core_count = options.dp_core_count,
+ dp_core_index = options.dp_core_index,
+ is_debug = (not options.release),
+ pkt_limit = options.limit,
+ mult = options.mult,
+ duration = options.duration,
+ mode = mode,
+ silent = options.silent,
+ tunables = options.tunables)
+
+ except KeyboardInterrupt as e:
+ print("\n\n*** Caught Ctrl + C... Exiting...\n\n")
+ return (-1)
+
+ except STLError as e:
+ print(e)
+ return (-1)
+
+ return (0)
+
+
+if __name__ == '__main__':
+ main()
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
new file mode 100644
index 00000000..9f601484
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -0,0 +1,1549 @@
+#!/router/bin/python
+
+from .utils import text_tables
+from .utils.text_opts import format_text, format_threshold, format_num
+from .trex_stl_types import StatNotAvailable, is_integer
+from .trex_stl_exceptions import STLError
+
+from collections import namedtuple, OrderedDict, deque
+import sys
+import copy
+import datetime
+import time
+import re
+import math
+import threading
+import pprint
+
+GLOBAL_STATS = 'g'
+PORT_STATS = 'p'
+PORT_GRAPH = 'pg'
+PORT_STATUS = 'ps'
+STREAMS_STATS = 's'
+LATENCY_STATS = 'ls'
+LATENCY_HISTOGRAM = 'lh'
+CPU_STATS = 'c'
+MBUF_STATS = 'm'
+EXTENDED_STATS = 'x'
+EXTENDED_INC_ZERO_STATS = 'xz'
+
+ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM, CPU_STATS, MBUF_STATS, EXTENDED_STATS, EXTENDED_INC_ZERO_STATS]
+COMPACT = [GLOBAL_STATS, PORT_STATS]
+GRAPH_PORT_COMPACT = [GLOBAL_STATS, PORT_GRAPH]
+SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS] # stream stats
+LS_COMPAT = [GLOBAL_STATS, LATENCY_STATS] # latency stats
+LH_COMPAT = [GLOBAL_STATS, LATENCY_HISTOGRAM] # latency histogram
+UT_COMPAT = [GLOBAL_STATS, CPU_STATS, MBUF_STATS] # utilization
+
+ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
+
+def round_float (f):
+ return float("%.2f" % f) if type(f) is float else f
+
+def try_int(i):
+ try:
+ return int(i)
+ except:
+ return i
+
+# deep mrege of dicts dst = src + dst
+def deep_merge_dicts (dst, src):
+ for k, v in src.items():
+ # if not exists - deep copy it
+ if not k in dst:
+ dst[k] = copy.deepcopy(v)
+ else:
+ if isinstance(v, dict):
+ deep_merge_dicts(dst[k], v)
+
+# BPS L1 from pps and BPS L2
+def calc_bps_L1 (bps, pps):
+ if (pps == 0) or (bps == 0):
+ return 0
+
+ factor = bps / (pps * 8.0)
+ return bps * ( 1 + (20 / factor) )
+#
+
+def is_intable (value):
+ try:
+ int(value)
+ return True
+ except ValueError:
+ return False
+
+# use to calculate diffs relative to the previous values
+# for example, BW
+def calculate_diff (samples):
+ total = 0.0
+
+ weight_step = 1.0 / sum(range(0, len(samples)))
+ weight = weight_step
+
+ for i in range(0, len(samples) - 1):
+ current = samples[i] if samples[i] > 0 else 1
+ next = samples[i + 1] if samples[i + 1] > 0 else 1
+
+ s = 100 * ((float(next) / current) - 1.0)
+
+ # block change by 100%
+ total += (min(s, 100) * weight)
+ weight += weight_step
+
+ return total
+
+
+# calculate by absolute values and not relatives (useful for CPU usage in % and etc.)
+def calculate_diff_raw (samples):
+ total = 0.0
+
+ weight_step = 1.0 / sum(range(0, len(samples)))
+ weight = weight_step
+
+ for i in range(0, len(samples) - 1):
+ current = samples[i]
+ next = samples[i + 1]
+
+ total += ( (next - current) * weight )
+ weight += weight_step
+
+ return total
+
+get_number_of_bytes_cache = {}
+# get number of bytes: '64b'->64, '9kb'->9000 etc.
+def get_number_of_bytes(val):
+ if val not in get_number_of_bytes_cache:
+ get_number_of_bytes_cache[val] = int(val[:-1].replace('k', '000'))
+ return get_number_of_bytes_cache[val]
+
+# a simple object to keep a watch over a field
+class WatchedField(object):
+
+ def __init__ (self, name, suffix, high_th, low_th, events_handler):
+ self.name = name
+ self.suffix = suffix
+ self.high_th = high_th
+ self.low_th = low_th
+ self.events_handler = events_handler
+
+ self.hot = False
+ self.current = None
+
+ def update (self, value):
+ if value is None:
+ return
+
+ if value > self.high_th and not self.hot:
+ self.events_handler.log_warning("{0} is high: {1}{2}".format(self.name, value, self.suffix))
+ self.hot = True
+
+ if value < self.low_th and self.hot:
+ self.hot = False
+
+ self.current = value
+
+
+
+class CTRexInfoGenerator(object):
+ """
+ This object is responsible of generating stats and information from objects maintained at
+ STLClient and the ports.
+ """
+
+ def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, util_stats_ref, xstats_ref, async_monitor):
+ self._global_stats = global_stats_ref
+ self._ports_dict = ports_dict_ref
+ self._rx_stats_ref = rx_stats_ref
+ self._latency_stats_ref = latency_stats_ref
+ self._util_stats_ref = util_stats_ref
+ self._xstats_ref = xstats_ref
+ self._async_monitor = async_monitor
+
+ def generate_single_statistic(self, port_id_list, statistic_type):
+ if statistic_type == GLOBAL_STATS:
+ return self._generate_global_stats()
+
+ elif statistic_type == PORT_STATS:
+ return self._generate_port_stats(port_id_list)
+
+ elif statistic_type == PORT_GRAPH:
+ return self._generate_port_graph(port_id_list)
+
+ elif statistic_type == PORT_STATUS:
+ return self._generate_port_status(port_id_list)
+
+ elif statistic_type == STREAMS_STATS:
+ return self._generate_streams_stats()
+
+ elif statistic_type == LATENCY_STATS:
+ return self._generate_latency_stats()
+
+ elif statistic_type == LATENCY_HISTOGRAM:
+ return self._generate_latency_histogram()
+
+ elif statistic_type == CPU_STATS:
+ return self._generate_cpu_util_stats()
+
+ elif statistic_type == MBUF_STATS:
+ return self._generate_mbuf_util_stats()
+
+ elif statistic_type == EXTENDED_STATS:
+ return self._generate_xstats(port_id_list, include_zero_lines = False)
+
+ elif statistic_type == EXTENDED_INC_ZERO_STATS:
+ return self._generate_xstats(port_id_list, include_zero_lines = True)
+
+ else:
+ # ignore by returning empty object
+ return {}
+
+ def generate_streams_info(self, port_id_list, stream_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+ return_data = OrderedDict()
+
+ for port_obj in relevant_ports:
+ streams_data = self._generate_single_port_streams_info(port_obj, stream_id_list)
+ if not streams_data:
+ continue
+ hdr_key = "Port {port}:".format(port= port_obj.port_id)
+
+ # TODO: test for other ports with same stream structure, and join them
+ return_data[hdr_key] = streams_data
+
+ return return_data
+
+ def _generate_global_stats(self):
+ global_stats = self._global_stats
+
+ stats_data_left = OrderedDict([("connection", "{host}, Port {port}".format(host=global_stats.connection_info.get("server"),
+ port=global_stats.connection_info.get("sync_port"))),
+ ("version", "{ver}".format(ver=global_stats.server_version.get("version", "N/A"))),
+
+ ("cpu_util.", "{0}% @ {2} cores ({3} per port) {1}".format( format_threshold(round_float(global_stats.get("m_cpu_util")), [85, 100], [0, 85]),
+ global_stats.get_trend_gui("m_cpu_util", use_raw = True),
+ global_stats.system_info.get('dp_core_count'),
+ global_stats.system_info.get('dp_core_count_per_port'),
+ )),
+
+ ("rx_cpu_util.", "{0}% {1}".format( format_threshold(round_float(global_stats.get("m_rx_cpu_util")), [85, 100], [0, 85]),
+ global_stats.get_trend_gui("m_rx_cpu_util", use_raw = True))),
+
+ ("async_util.", "{0}% / {1}".format( format_threshold(round_float(self._async_monitor.get_cpu_util()), [85, 100], [0, 85]),
+ format_num(self._async_monitor.get_bps() / 8.0, suffix = "B/sec"))),
+ ])
+
+ stats_data_right = OrderedDict([
+ ("total_tx_L2", "{0} {1}".format( global_stats.get("m_tx_bps", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_tx_bps"))),
+
+ ("total_tx_L1", "{0} {1}".format( global_stats.get("m_tx_bps_L1", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_tx_bps_L1"))),
+
+ ("total_rx", "{0} {1}".format( global_stats.get("m_rx_bps", format=True, suffix="b/sec"),
+ global_stats.get_trend_gui("m_rx_bps"))),
+
+ ("total_pps", "{0} {1}".format( global_stats.get("m_tx_pps", format=True, suffix="pkt/sec"),
+ global_stats.get_trend_gui("m_tx_pps"))),
+
+ ("drop_rate", "{0}".format( format_num(global_stats.get("m_rx_drop_bps"),
+ suffix = 'b/sec',
+ opts = 'green' if (global_stats.get("m_rx_drop_bps")== 0) else 'red'),
+ )),
+
+ ("queue_full", "{0}".format( format_num(global_stats.get_rel("m_total_queue_full"),
+ suffix = 'pkts',
+ compact = False,
+ opts = 'green' if (global_stats.get_rel("m_total_queue_full")== 0) else 'red'))),
+ ])
+
+ # build table representation
+ stats_table = text_tables.TRexTextInfo()
+ stats_table.set_cols_align(["l", "l"])
+ stats_table.set_deco(0)
+ stats_table.set_cols_width([50, 45])
+ max_lines = max(len(stats_data_left), len(stats_data_right))
+ for line_num in range(max_lines):
+ row = []
+ if line_num < len(stats_data_left):
+ key = list(stats_data_left.keys())[line_num]
+ row.append('{:<12} : {}'.format(key, stats_data_left[key]))
+ else:
+ row.append('')
+ if line_num < len(stats_data_right):
+ key = list(stats_data_right.keys())[line_num]
+ row.append('{:<12} : {}'.format(key, stats_data_right[key]))
+ else:
+ row.append('')
+ stats_table.add_row(row)
+
+ return {"global_statistics": ExportableStats(None, stats_table)}
+
+ def _generate_streams_stats (self):
+ flow_stats = self._rx_stats_ref
+ # for TUI - maximum 4
+ pg_ids = list(filter(is_intable, flow_stats.latest_stats.keys()))[:4]
+ stream_count = len(pg_ids)
+
+ sstats_data = OrderedDict([ ('Tx pps', []),
+ ('Tx bps L2', []),
+ ('Tx bps L1', []),
+ ('---', [''] * stream_count),
+ ('Rx pps', []),
+ ('Rx bps', []),
+ ('----', [''] * stream_count),
+ ('opackets', []),
+ ('ipackets', []),
+ ('obytes', []),
+ ('ibytes', []),
+ ('-----', [''] * stream_count),
+ ('tx_pkts', []),
+ ('rx_pkts', []),
+ ('tx_bytes', []),
+ ('rx_bytes', [])
+ ])
+
+
+
+ # maximum 4
+ for pg_id in pg_ids:
+
+ sstats_data['Tx pps'].append(flow_stats.get([pg_id, 'tx_pps_lpf', 'total'], format = True, suffix = "pps"))
+ sstats_data['Tx bps L2'].append(flow_stats.get([pg_id, 'tx_bps_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['Tx bps L1'].append(flow_stats.get([pg_id, 'tx_bps_L1_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['Rx pps'].append(flow_stats.get([pg_id, 'rx_pps_lpf', 'total'], format = True, suffix = "pps"))
+ sstats_data['Rx bps'].append(flow_stats.get([pg_id, 'rx_bps_lpf', 'total'], format = True, suffix = "bps"))
+
+ sstats_data['opackets'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total']))
+ sstats_data['ipackets'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total']))
+ sstats_data['obytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total']))
+ sstats_data['ibytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total']))
+ sstats_data['tx_bytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total'], format = True, suffix = "B"))
+ sstats_data['rx_bytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total'], format = True, suffix = "B"))
+ sstats_data['tx_pkts'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total'], format = True, suffix = "pkts"))
+ sstats_data['rx_pkts'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total'], format = True, suffix = "pkts"))
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([10] + [17] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+
+ stats_table.add_rows([[k] + v
+ for k, v in sstats_data.items()],
+ header=False)
+
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+
+ return {"streams_statistics": ExportableStats(sstats_data, stats_table)}
+
+ def _generate_latency_stats(self):
+ lat_stats = self._latency_stats_ref
+ latency_window_size = 14
+
+ # for TUI - maximum 5
+ pg_ids = list(filter(is_intable, lat_stats.latest_stats.keys()))[:5]
+ stream_count = len(pg_ids)
+ lstats_data = OrderedDict([('TX pkts', []),
+ ('RX pkts', []),
+ ('Max latency', []),
+ ('Avg latency', []),
+ ('-- Window --', [''] * stream_count),
+ ('Last (max)', []),
+ ] + [('Last-%s' % i, []) for i in range(1, latency_window_size)] + [
+ ('---', [''] * stream_count),
+ ('Jitter', []),
+ ('----', [''] * stream_count),
+ ('Errors', []),
+ ])
+
+ with lat_stats.lock:
+ history = [x for x in lat_stats.history]
+ flow_stats = self._rx_stats_ref.get_stats()
+ for pg_id in pg_ids:
+ lstats_data['TX pkts'].append(flow_stats[pg_id]['tx_pkts']['total'] if pg_id in flow_stats else '')
+ lstats_data['RX pkts'].append(flow_stats[pg_id]['rx_pkts']['total'] if pg_id in flow_stats else '')
+ lstats_data['Avg latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'average'])))
+ lstats_data['Max latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'total_max'])))
+ lstats_data['Last (max)'].append(try_int(lat_stats.get([pg_id, 'latency', 'last_max'])))
+ for i in range(1, latency_window_size):
+ val = history[-i - 1].get(pg_id, {}).get('latency', {}).get('last_max', '') if len(history) > i else ''
+ lstats_data['Last-%s' % i].append(try_int(val))
+ lstats_data['Jitter'].append(try_int(lat_stats.get([pg_id, 'latency', 'jitter'])))
+ errors = 0
+ seq_too_low = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_low'])
+ if is_integer(seq_too_low):
+ errors += seq_too_low
+ seq_too_high = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_high'])
+ if is_integer(seq_too_high):
+ errors += seq_too_high
+ lstats_data['Errors'].append(format_num(errors,
+ opts = 'green' if errors == 0 else 'red'))
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([12] + [14] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+ stats_table.add_rows([[k] + v
+ for k, v in lstats_data.items()],
+ header=False)
+
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+
+ return {"latency_statistics": ExportableStats(lstats_data, stats_table)}
+
+ def _generate_latency_histogram(self):
+ lat_stats = self._latency_stats_ref.latest_stats
+ max_histogram_size = 17
+
+ # for TUI - maximum 5
+ pg_ids = list(filter(is_intable, lat_stats.keys()))[:5]
+
+ merged_histogram = {}
+ for pg_id in pg_ids:
+ merged_histogram.update(lat_stats[pg_id]['latency']['histogram'])
+ histogram_size = min(max_histogram_size, len(merged_histogram))
+
+ stream_count = len(pg_ids)
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * stream_count)
+ stats_table.set_cols_width([12] + [14] * stream_count)
+ stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
+
+ for i in range(max_histogram_size - histogram_size):
+ if i == 0 and not merged_histogram:
+ stats_table.add_row([' No Data'] + [' '] * stream_count)
+ else:
+ stats_table.add_row([' '] * (stream_count + 1))
+ for key in list(reversed(sorted(merged_histogram.keys())))[:histogram_size]:
+ hist_vals = []
+ for pg_id in pg_ids:
+ hist_vals.append(lat_stats[pg_id]['latency']['histogram'].get(key, ' '))
+ stats_table.add_row([key] + hist_vals)
+
+ stats_table.add_row(['- Counters -'] + [' '] * stream_count)
+ err_cntrs_dict = OrderedDict()
+ for pg_id in pg_ids:
+ for err_cntr in sorted(lat_stats[pg_id]['err_cntrs'].keys()):
+ if err_cntr not in err_cntrs_dict:
+ err_cntrs_dict[err_cntr] = [lat_stats[pg_id]['err_cntrs'][err_cntr]]
+ else:
+ err_cntrs_dict[err_cntr].append(lat_stats[pg_id]['err_cntrs'][err_cntr])
+ for err_cntr, val_list in err_cntrs_dict.items():
+ stats_table.add_row([err_cntr] + val_list)
+ header = ["PG ID"] + [key for key in pg_ids]
+ stats_table.header(header)
+ return {"latency_histogram": ExportableStats(None, stats_table)}
+
+ def _generate_cpu_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'cpu' not in util_stats:
+ raise Exception("Excepting 'cpu' section in stats %s" % util_stats)
+ cpu_stats = util_stats['cpu']
+ hist_len = len(cpu_stats[0]["history"])
+ avg_len = min(5, hist_len)
+ show_len = min(15, hist_len)
+ stats_table.header(['Thread', 'Avg', 'Latest'] + list(range(-1, 0 - show_len, -1)))
+ stats_table.set_cols_align(['l'] + ['r'] * (show_len + 1))
+ stats_table.set_cols_width([10, 3, 6] + [3] * (show_len - 1))
+ stats_table.set_cols_dtype(['t'] * (show_len + 2))
+
+ for i in range(min(18, len(cpu_stats))):
+ history = cpu_stats[i]["history"]
+ ports = cpu_stats[i]["ports"]
+ avg = int(round(sum(history[:avg_len]) / avg_len))
+
+ # decode active ports for core
+ if ports == [-1, -1]:
+ interfaces = "(IDLE)"
+ elif not -1 in ports:
+ interfaces = "({:},{:})".format(ports[0], ports[1])
+ else:
+ interfaces = "({:})".format(ports[0] if ports[0] != -1 else ports[1])
+
+ thread = "{:2} {:^7}".format(i, interfaces)
+ stats_table.add_row([thread, avg] + history[:show_len])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'cpu_util(%)': ExportableStats(None, stats_table)}
+
+ def _generate_mbuf_util_stats(self):
+ util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
+ stats_table = text_tables.TRexTextTable()
+ if util_stats:
+ if 'mbuf_stats' not in util_stats:
+ raise Exception("Excepting 'mbuf_stats' section in stats %s" % util_stats)
+ mbuf_stats = util_stats['mbuf_stats']
+ for mbufs_per_socket in mbuf_stats.values():
+ first_socket_mbufs = mbufs_per_socket
+ break
+ if not self._util_stats_ref.mbuf_types_list:
+ mbuf_keys = list(first_socket_mbufs.keys())
+ mbuf_keys.sort(key = get_number_of_bytes)
+ self._util_stats_ref.mbuf_types_list = mbuf_keys
+ types_len = len(self._util_stats_ref.mbuf_types_list)
+ stats_table.set_cols_align(['l'] + ['r'] * (types_len + 1))
+ stats_table.set_cols_width([10] + [7] * (types_len + 1))
+ stats_table.set_cols_dtype(['t'] * (types_len + 2))
+ stats_table.header([''] + self._util_stats_ref.mbuf_types_list + ['RAM(MB)'])
+ total_list = []
+ sum_totals = 0
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ sum_totals += first_socket_mbufs[mbuf_type][1] * get_number_of_bytes(mbuf_type) + 64
+ total_list.append(first_socket_mbufs[mbuf_type][1])
+ sum_totals *= len(list(mbuf_stats.values()))
+ total_list.append(int(sum_totals/1e6))
+ stats_table.add_row(['Total:'] + total_list)
+ stats_table.add_row(['Used:'] + [''] * (types_len + 1))
+ for socket_name in sorted(list(mbuf_stats.keys())):
+ mbufs = mbuf_stats[socket_name]
+ socket_show_name = socket_name.replace('cpu-', '').replace('-', ' ').capitalize() + ':'
+ sum_used = 0
+ used_list = []
+ percentage_list = []
+ for mbuf_type in self._util_stats_ref.mbuf_types_list:
+ used = mbufs[mbuf_type][1] - mbufs[mbuf_type][0]
+ sum_used += used * get_number_of_bytes(mbuf_type) + 64
+ used_list.append(used)
+ percentage_list.append('%s%%' % int(100 * used / mbufs[mbuf_type][1]))
+ used_list.append(int(sum_used/1e6))
+ stats_table.add_row([socket_show_name] + used_list)
+ stats_table.add_row(['Percent:'] + percentage_list + [''])
+ else:
+ stats_table.add_row(['No Data.'])
+ return {'mbuf_util': ExportableStats(None, stats_table)}
+
+ def _generate_xstats(self, port_id_list, include_zero_lines = False):
+ relevant_ports = [port.port_id for port in self.__get_relevant_ports(port_id_list)]
+ # get the data on relevant ports
+ xstats_data = OrderedDict()
+ for port_id in relevant_ports:
+ for key, val in self._xstats_ref.get_stats(port_id).items():
+ if key not in xstats_data:
+ xstats_data[key] = []
+ xstats_data[key].append(val)
+
+ # put into table
+ stats_table = text_tables.TRexTextTable()
+ stats_table.header(['Name:'] + ['Port %s:' % port_id for port_id in relevant_ports])
+ stats_table.set_cols_align(['l'] + ['r'] * len(relevant_ports))
+ stats_table.set_cols_width([30] + [15] * len(relevant_ports))
+ stats_table.set_cols_dtype(['t'] * (len(relevant_ports) + 1))
+ for key, arr in xstats_data.items():
+ if include_zero_lines or list(filter(None, arr)):
+ key = key[:28]
+ stats_table.add_row([key] + arr)
+ return {'xstats:': ExportableStats(None, stats_table)}
+
+ @staticmethod
+ def _get_rational_block_char(value, range_start, interval):
+ # in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now
+ #return 'X' if value >= range_start + float(interval) / 2 else ' '
+
+ if sys.__stdout__.encoding != 'UTF-8':
+ return 'X' if value >= range_start + float(interval) / 2 else ' '
+
+ value -= range_start
+ ratio = float(value) / interval
+ if ratio <= 0.0625:
+ return u' ' # empty block
+ if ratio <= 0.1875:
+ return u'\u2581' # 1/8
+ if ratio <= 0.3125:
+ return u'\u2582' # 2/8
+ if ratio <= 0.4375:
+ return u'\u2583' # 3/8
+ if ratio <= 0.5625:
+ return u'\u2584' # 4/8
+ if ratio <= 0.6875:
+ return u'\u2585' # 5/8
+ if ratio <= 0.8125:
+ return u'\u2586' # 6/8
+ if ratio <= 0.9375:
+ return u'\u2587' # 7/8
+ return u'\u2588' # full block
+
+ def _generate_port_graph(self, port_id_list):
+ relevant_port = self.__get_relevant_ports(port_id_list)[0]
+ hist_len = len(relevant_port.port_stats.history)
+ hist_maxlen = relevant_port.port_stats.history.maxlen
+ util_tx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['tx_percentage']) for i in range(hist_len)]
+ util_rx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['rx_percentage']) for i in range(hist_len)]
+
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.header([' Util(%)', 'TX', 'RX'])
+ stats_table.set_cols_align(['c', 'c', 'c'])
+ stats_table.set_cols_width([8, hist_maxlen, hist_maxlen])
+ stats_table.set_cols_dtype(['t', 't', 't'])
+
+ for y in range(95, -1, -5):
+ stats_table.add_row([y, ''.join([self._get_rational_block_char(util_tx, y, 5) for util_tx in util_tx_hist]),
+ ''.join([self._get_rational_block_char(util_rx, y, 5) for util_rx in util_rx_hist])])
+
+ return {"port_graph": ExportableStats({}, stats_table)}
+
+ def _generate_port_stats(self, port_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+
+ return_stats_data = {}
+ per_field_stats = OrderedDict([("owner", []),
+ ('link', []),
+ ("state", []),
+ ("speed", []),
+ ("CPU util.", []),
+ ("--", []),
+ ("Tx bps L2", []),
+ ("Tx bps L1", []),
+ ("Tx pps", []),
+ ("Line Util.", []),
+
+ ("---", []),
+ ("Rx bps", []),
+ ("Rx pps", []),
+
+ ("----", []),
+ ("opackets", []),
+ ("ipackets", []),
+ ("obytes", []),
+ ("ibytes", []),
+ ("tx-bytes", []),
+ ("rx-bytes", []),
+ ("tx-pkts", []),
+ ("rx-pkts", []),
+
+ ("-----", []),
+ ("oerrors", []),
+ ("ierrors", []),
+
+ ])
+
+ total_stats = CPortStats(None)
+
+ for port_obj in relevant_ports:
+ # fetch port data
+ port_stats = port_obj.generate_port_stats()
+
+ total_stats += port_obj.port_stats
+
+ # populate to data structures
+ return_stats_data[port_obj.port_id] = port_stats
+ self.__update_per_field_dict(port_stats, per_field_stats)
+
+ total_cols = len(relevant_ports)
+ header = ["port"] + [port.port_id for port in relevant_ports]
+
+ if (total_cols > 1):
+ self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
+ header += ['total']
+ total_cols += 1
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["r"] * total_cols)
+ stats_table.set_cols_width([10] + [17] * total_cols)
+ stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
+
+ stats_table.add_rows([[k] + v
+ for k, v in per_field_stats.items()],
+ header=False)
+
+ stats_table.header(header)
+
+ return {"port_statistics": ExportableStats(return_stats_data, stats_table)}
+
+ def _generate_port_status(self, port_id_list):
+ relevant_ports = self.__get_relevant_ports(port_id_list)
+
+ return_stats_data = {}
+ per_field_status = OrderedDict([("driver", []),
+ ("description", []),
+ ("link status", []),
+ ("link speed", []),
+ ("port status", []),
+ ("promiscuous", []),
+ ("flow ctrl", []),
+ ("--", []),
+ ("HW src mac", []),
+ ("SW src mac", []),
+ ("SW dst mac", []),
+ ("---", []),
+ ("PCI Address", []),
+ ("NUMA Node", []),
+ ]
+ )
+
+ for port_obj in relevant_ports:
+ # fetch port data
+ # port_stats = self._async_stats.get_port_stats(port_obj.port_id)
+ port_status = port_obj.generate_port_status()
+
+ # populate to data structures
+ return_stats_data[port_obj.port_id] = port_status
+
+ self.__update_per_field_dict(port_status, per_field_status)
+
+ stats_table = text_tables.TRexTextTable()
+ stats_table.set_cols_align(["l"] + ["c"]*len(relevant_ports))
+ stats_table.set_cols_width([15] + [20] * len(relevant_ports))
+
+ stats_table.add_rows([[k] + v
+ for k, v in per_field_status.items()],
+ header=False)
+ stats_table.header(["port"] + [port.port_id
+ for port in relevant_ports])
+
+ return {"port_status": ExportableStats(return_stats_data, stats_table)}
+
+ def _generate_single_port_streams_info(self, port_obj, stream_id_list):
+
+ return_streams_data = port_obj.generate_loaded_streams_sum()
+
+ if not return_streams_data.get("streams"):
+ # we got no streams available
+ return None
+
+ # FORMAT VALUES ON DEMAND
+
+ # because we mutate this - deep copy before
+ return_streams_data = copy.deepcopy(return_streams_data)
+
+ p_type_field_len = 0
+
+ for stream_id, stream_id_sum in return_streams_data['streams'].items():
+ stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 30)
+ p_type_field_len = max(p_type_field_len, len(stream_id_sum['packet_type']))
+
+ info_table = text_tables.TRexTextTable()
+ info_table.set_cols_align(["c"] + ["l"] + ["r"] + ["c"] + ["r"] + ["c"])
+ info_table.set_cols_width([10] + [p_type_field_len] + [8] + [16] + [15] + [12])
+ info_table.set_cols_dtype(["t"] + ["t"] + ["t"] + ["t"] + ["t"] + ["t"])
+
+ info_table.add_rows([v.values()
+ for k, v in return_streams_data['streams'].items()],
+ header=False)
+ info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
+
+ return ExportableStats(return_streams_data, info_table)
+
+
+ def __get_relevant_ports(self, port_id_list):
+ # fetch owned ports
+ ports = [port_obj
+ for _, port_obj in self._ports_dict.items()
+ if port_obj.port_id in port_id_list]
+
+ # display only the first FOUR options, by design
+ if len(ports) > 4:
+ #self.logger is not defined
+ #self.logger.log(format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta'))
+ ports = ports[:4]
+ return ports
+
+ def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
+ for key, val in dict_src_data.items():
+ if key in dict_dest_ref:
+ dict_dest_ref[key].append(val)
+
+ @staticmethod
+ def _trim_packet_headers(headers_str, trim_limit):
+ if len(headers_str) < trim_limit:
+ # do nothing
+ return headers_str
+ else:
+ return (headers_str[:trim_limit-3] + "...")
+
+
+
+class CTRexStats(object):
+ """ This is an abstract class to represent a stats object """
+
+ def __init__(self):
+ self.reference_stats = {}
+ self.latest_stats = {}
+ self.last_update_ts = time.time()
+ self.history = deque(maxlen = 47)
+ self.lock = threading.Lock()
+ self.has_baseline = False
+
+ ######## abstract methods ##########
+
+ # get stats for user / API
+ def get_stats (self):
+ raise NotImplementedError()
+
+ # generate format stats (for TUI)
+ def generate_stats(self):
+ raise NotImplementedError()
+
+ # called when a snapshot arrives - add more fields
+ def _update (self, snapshot, baseline):
+ raise NotImplementedError()
+
+
+ ######## END abstract methods ##########
+
+ def update(self, snapshot, baseline):
+
+ # no update is valid before baseline
+ if not self.has_baseline and not baseline:
+ return
+
+ # call the underlying method
+ rc = self._update(snapshot)
+ if not rc:
+ return
+
+ # sync one time
+ if not self.has_baseline and baseline:
+ self.reference_stats = copy.deepcopy(self.latest_stats)
+ self.has_baseline = True
+
+ # save history
+ with self.lock:
+ self.history.append(self.latest_stats)
+
+
+ def clear_stats(self):
+ self.reference_stats = copy.deepcopy(self.latest_stats)
+ self.history.clear()
+
+
+ def invalidate (self):
+ self.latest_stats = {}
+
+
+ def _get (self, src, field, default = None):
+ if isinstance(field, list):
+ # deep
+ value = src
+ for level in field:
+ if not level in value:
+ return default
+ value = value[level]
+ else:
+ # flat
+ if not field in src:
+ return default
+ value = src[field]
+
+ return value
+
+ def get(self, field, format=False, suffix="", opts = None):
+ value = self._get(self.latest_stats, field)
+ if value == None:
+ return 'N/A'
+
+ return value if not format else format_num(value, suffix = suffix, opts = opts)
+
+
+ def get_rel(self, field, format=False, suffix=""):
+ ref_value = self._get(self.reference_stats, field)
+ latest_value = self._get(self.latest_stats, field)
+
+ # latest value is an aggregation - must contain the value
+ if latest_value == None:
+ return 'N/A'
+
+ if ref_value == None:
+ ref_value = 0
+
+ value = latest_value - ref_value
+
+ return value if not format else format_num(value, suffix)
+
+
+ # get trend for a field
+ def get_trend (self, field, use_raw = False, percision = 10.0):
+ if field not in self.latest_stats:
+ return 0
+
+ # not enough history - no trend
+ if len(self.history) < 5:
+ return 0
+
+ # absolute value is too low 0 considered noise
+ if self.latest_stats[field] < percision:
+ return 0
+
+ # must lock, deque is not thread-safe for iteration
+ with self.lock:
+ field_samples = [sample[field] for sample in list(self.history)[-5:]]
+
+ if use_raw:
+ return calculate_diff_raw(field_samples)
+ else:
+ return calculate_diff(field_samples)
+
+
+ def get_trend_gui (self, field, show_value = False, use_raw = False, up_color = 'red', down_color = 'green'):
+ v = self.get_trend(field, use_raw)
+
+ value = abs(v)
+
+ # use arrows if utf-8 is supported
+ if sys.__stdout__.encoding == 'UTF-8':
+ arrow = u'\u25b2' if v > 0 else u'\u25bc'
+ else:
+ arrow = ''
+
+ if sys.version_info < (3,0):
+ arrow = arrow.encode('utf-8')
+
+ color = up_color if v > 0 else down_color
+
+ # change in 1% is not meaningful
+ if value < 1:
+ return ""
+
+ elif value > 5:
+
+ if show_value:
+ return format_text("{0}{0}{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}{0}{0}".format(arrow), color)
+
+ elif value > 2:
+
+ if show_value:
+ return format_text("{0}{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}{0}".format(arrow), color)
+
+ else:
+ if show_value:
+ return format_text("{0} {1:.2f}%".format(arrow,v), color)
+ else:
+ return format_text("{0}".format(arrow), color)
+
+
+
+class CGlobalStats(CTRexStats):
+
+ def __init__(self, connection_info, server_version, ports_dict_ref, events_handler):
+ super(CGlobalStats, self).__init__()
+
+ self.connection_info = connection_info
+ self.server_version = server_version
+ self._ports_dict = ports_dict_ref
+ self.events_handler = events_handler
+
+ self.watched_cpu_util = WatchedField('CPU util.', '%', 85, 60, events_handler)
+ self.watched_rx_cpu_util = WatchedField('RX core util.', '%', 85, 60, events_handler)
+
+ def get_stats (self):
+ stats = {}
+
+ # absolute
+ stats['cpu_util'] = self.get("m_cpu_util")
+ stats['rx_cpu_util'] = self.get("m_rx_cpu_util")
+ stats['bw_per_core'] = self.get("m_bw_per_core")
+
+ stats['tx_bps'] = self.get("m_tx_bps")
+ stats['tx_pps'] = self.get("m_tx_pps")
+
+ stats['rx_bps'] = self.get("m_rx_bps")
+ stats['rx_pps'] = self.get("m_rx_pps")
+ stats['rx_drop_bps'] = self.get("m_rx_drop_bps")
+
+ # relatives
+ stats['queue_full'] = self.get_rel("m_total_queue_full")
+
+ return stats
+
+
+
+ def _update(self, snapshot):
+ # L1 bps
+ bps = snapshot.get("m_tx_bps")
+ pps = snapshot.get("m_tx_pps")
+
+ snapshot['m_tx_bps_L1'] = calc_bps_L1(bps, pps)
+
+
+ # simple...
+ self.latest_stats = snapshot
+
+ self.watched_cpu_util.update(snapshot.get('m_cpu_util'))
+ self.watched_rx_cpu_util.update(snapshot.get('m_rx_cpu_util'))
+
+ return True
+
+
+class CPortStats(CTRexStats):
+
+ def __init__(self, port_obj):
+ super(CPortStats, self).__init__()
+ self._port_obj = port_obj
+
+ @staticmethod
+ def __merge_dicts (target, src):
+ for k, v in src.items():
+ if k in target:
+ target[k] += v
+ else:
+ target[k] = v
+
+
+ def __add__ (self, x):
+ if not isinstance(x, CPortStats):
+ raise TypeError("cannot add non stats object to stats")
+
+ # main stats
+ if not self.latest_stats:
+ self.latest_stats = {}
+
+ self.__merge_dicts(self.latest_stats, x.latest_stats)
+
+ # reference stats
+ if x.reference_stats:
+ if not self.reference_stats:
+ self.reference_stats = x.reference_stats.copy()
+ else:
+ self.__merge_dicts(self.reference_stats, x.reference_stats)
+
+ # history - should be traverse with a lock
+ with self.lock, x.lock:
+ if not self.history:
+ self.history = copy.deepcopy(x.history)
+ else:
+ for h1, h2 in zip(self.history, x.history):
+ self.__merge_dicts(h1, h2)
+
+ return self
+
+ # for port we need to do something smarter
+ def get_stats (self):
+ stats = {}
+
+ stats['opackets'] = self.get_rel("opackets")
+ stats['ipackets'] = self.get_rel("ipackets")
+ stats['obytes'] = self.get_rel("obytes")
+ stats['ibytes'] = self.get_rel("ibytes")
+ stats['oerrors'] = self.get_rel("oerrors")
+ stats['ierrors'] = self.get_rel("ierrors")
+
+ stats['tx_bps'] = self.get("m_total_tx_bps")
+ stats['tx_pps'] = self.get("m_total_tx_pps")
+ stats['tx_bps_L1'] = self.get("m_total_tx_bps_L1")
+ stats['tx_util'] = self.get("m_tx_util")
+
+ stats['rx_bps'] = self.get("m_total_rx_bps")
+ stats['rx_pps'] = self.get("m_total_rx_pps")
+ stats['rx_bps_L1'] = self.get("m_total_rx_bps_L1")
+ stats['rx_util'] = self.get("m_rx_util")
+
+ return stats
+
+
+
+ def _update(self, snapshot):
+ speed = self._port_obj.get_speed_bps()
+
+ # L1 bps
+ tx_bps = snapshot.get("m_total_tx_bps")
+ tx_pps = snapshot.get("m_total_tx_pps")
+ rx_bps = snapshot.get("m_total_rx_bps")
+ rx_pps = snapshot.get("m_total_rx_pps")
+ ts_diff = 0.5 # TODO: change this to real ts diff from server
+
+ bps_tx_L1 = calc_bps_L1(tx_bps, tx_pps)
+ bps_rx_L1 = calc_bps_L1(rx_bps, rx_pps)
+
+ snapshot['m_total_tx_bps_L1'] = bps_tx_L1
+ if speed:
+ snapshot['m_tx_util'] = (bps_tx_L1 / speed) * 100.0
+ else:
+ snapshot['m_tx_util'] = 0
+
+ snapshot['m_total_rx_bps_L1'] = bps_rx_L1
+ if speed:
+ snapshot['m_rx_util'] = (bps_rx_L1 / speed) * 100.0
+ else:
+ snapshot['m_rx_util'] = 0
+
+ # TX line util not smoothed
+ diff_tx_pkts = snapshot.get('opackets', 0) - self.latest_stats.get('opackets', 0)
+ diff_tx_bytes = snapshot.get('obytes', 0) - self.latest_stats.get('obytes', 0)
+ tx_bps_L1 = calc_bps_L1(8.0 * diff_tx_bytes / ts_diff, float(diff_tx_pkts) / ts_diff)
+ if speed:
+ snapshot['tx_percentage'] = 100.0 * tx_bps_L1 / speed
+ else:
+ snapshot['tx_percentage'] = 0
+
+ # RX line util not smoothed
+ diff_rx_pkts = snapshot.get('ipackets', 0) - self.latest_stats.get('ipackets', 0)
+ diff_rx_bytes = snapshot.get('ibytes', 0) - self.latest_stats.get('ibytes', 0)
+ rx_bps_L1 = calc_bps_L1(8.0 * diff_rx_bytes / ts_diff, float(diff_rx_pkts) / ts_diff)
+ if speed:
+ snapshot['rx_percentage'] = 100.0 * rx_bps_L1 / speed
+ else:
+ snapshot['rx_percentage'] = 0
+
+ # simple...
+ self.latest_stats = snapshot
+
+ return True
+
+
+ def generate_stats(self):
+
+ port_state = self._port_obj.get_port_state_name() if self._port_obj else ""
+ if port_state == "TRANSMITTING":
+ port_state = format_text(port_state, 'green', 'bold')
+ elif port_state == "PAUSE":
+ port_state = format_text(port_state, 'magenta', 'bold')
+ else:
+ port_state = format_text(port_state, 'bold')
+
+ if self._port_obj:
+ if 'link' in self._port_obj.attr:
+ if self._port_obj.attr.get('link', {}).get('up') == False:
+ link_state = format_text('DOWN', 'red', 'bold')
+ else:
+ link_state = 'UP'
+ else:
+ link_state = 'N/A'
+ else:
+ link_state = ''
+
+ # default rate format modifiers
+ rate_format = {'bpsl1': None, 'bps': None, 'pps': None, 'percentage': 'bold'}
+
+ # mark owned ports by color
+ if self._port_obj:
+ owner = self._port_obj.get_owner()
+ rate_format[self._port_obj.last_factor_type] = ('blue', 'bold')
+ if self._port_obj.is_acquired():
+ owner = format_text(owner, 'green')
+
+ else:
+ owner = ''
+
+
+ return {"owner": owner,
+ "state": "{0}".format(port_state),
+ 'link': link_state,
+ "speed": self._port_obj.get_formatted_speed() if self._port_obj else '',
+ "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True),
+ format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' ,
+ "--": " ",
+ "---": " ",
+ "----": " ",
+ "-----": " ",
+
+ "Tx bps L1": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
+ self.get("m_total_tx_bps_L1", format = True, suffix = "bps", opts = rate_format['bpsl1'])),
+
+ "Tx bps L2": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
+ self.get("m_total_tx_bps", format = True, suffix = "bps", opts = rate_format['bps'])),
+
+ "Line Util.": "{0} {1}".format(self.get_trend_gui("m_tx_util", show_value = False) if self._port_obj else "",
+ self.get("m_tx_util", format = True, suffix = "%", opts = rate_format['percentage']) if self._port_obj else ""),
+
+ "Rx bps": "{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
+ self.get("m_total_rx_bps", format = True, suffix = "bps")),
+
+ "Tx pps": "{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
+ self.get("m_total_tx_pps", format = True, suffix = "pps", opts = rate_format['pps'])),
+
+ "Rx pps": "{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
+ self.get("m_total_rx_pps", format = True, suffix = "pps")),
+
+ "opackets" : self.get_rel("opackets"),
+ "ipackets" : self.get_rel("ipackets"),
+ "obytes" : self.get_rel("obytes"),
+ "ibytes" : self.get_rel("ibytes"),
+
+ "tx-bytes": self.get_rel("obytes", format = True, suffix = "B"),
+ "rx-bytes": self.get_rel("ibytes", format = True, suffix = "B"),
+ "tx-pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
+ "rx-pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
+
+ "oerrors" : format_num(self.get_rel("oerrors"),
+ compact = False,
+ opts = 'green' if (self.get_rel("oerrors")== 0) else 'red'),
+
+ "ierrors" : format_num(self.get_rel("ierrors"),
+ compact = False,
+ opts = 'green' if (self.get_rel("ierrors")== 0) else 'red'),
+
+ }
+
+
+class CLatencyStats(CTRexStats):
+ def __init__(self, ports):
+ super(CLatencyStats, self).__init__()
+
+
+ # for API
+ def get_stats (self):
+ return copy.deepcopy(self.latest_stats)
+
+
+ def _update(self, snapshot):
+ if snapshot is None:
+ snapshot = {}
+ output = {}
+
+ output['global'] = {}
+ for field in ['bad_hdr', 'old_flow']:
+ if 'global' in snapshot and field in snapshot['global']:
+ output['global'][field] = snapshot['global'][field]
+ else:
+ output['global'][field] = 0
+
+ # we care only about the current active keys
+ pg_ids = list(filter(is_intable, snapshot.keys()))
+
+ for pg_id in pg_ids:
+ current_pg = snapshot.get(pg_id)
+ int_pg_id = int(pg_id)
+ output[int_pg_id] = {}
+ output[int_pg_id]['err_cntrs'] = current_pg['err_cntrs']
+ output[int_pg_id]['latency'] = {}
+
+ if 'latency' in current_pg:
+ for field in ['jitter', 'average', 'total_max', 'last_max']:
+ if field in current_pg['latency']:
+ output[int_pg_id]['latency'][field] = current_pg['latency'][field]
+ else:
+ output[int_pg_id]['latency'][field] = StatNotAvailable(field)
+
+ if 'histogram' in current_pg['latency']:
+ output[int_pg_id]['latency']['histogram'] = {int(elem): current_pg['latency']['histogram'][elem]
+ for elem in current_pg['latency']['histogram']}
+ min_val = min(output[int_pg_id]['latency']['histogram'].keys())
+ if min_val == 0:
+ min_val = 2
+ output[int_pg_id]['latency']['total_min'] = min_val
+ else:
+ output[int_pg_id]['latency']['total_min'] = StatNotAvailable('total_min')
+ output[int_pg_id]['latency']['histogram'] = {}
+
+ self.latest_stats = output
+ return True
+
+
+# RX stats objects - COMPLEX :-(
+class CRxStats(CTRexStats):
+ def __init__(self, ports):
+ super(CRxStats, self).__init__()
+ self.ports = ports
+
+
+ # calculates a diff between previous snapshot
+ # and current one
+ def calculate_diff_sec (self, current, prev):
+ if not 'ts' in current:
+ raise ValueError("INTERNAL ERROR: RX stats snapshot MUST contain 'ts' field")
+
+ if prev:
+ prev_ts = prev['ts']
+ now_ts = current['ts']
+ diff_sec = (now_ts['value'] - prev_ts['value']) / float(now_ts['freq'])
+ else:
+ diff_sec = 0.0
+
+ return diff_sec
+
+
+ # this is the heart of the complex
+ def process_single_pg (self, current_pg, prev_pg):
+
+ # start with the previous PG
+ output = copy.deepcopy(prev_pg)
+
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
+ # is in the first time ? (nothing in prev)
+ if field not in output:
+ output[field] = {}
+
+ # does the current snapshot has this field ?
+ if field in current_pg:
+ for port, pv in current_pg[field].items():
+ if not is_intable(port):
+ continue
+
+ output[field][port] = pv
+
+ # sum up
+ total = None
+ for port, pv in output[field].items():
+ if not is_intable(port):
+ continue
+ if total is None:
+ total = 0
+ total += pv
+
+ output[field]['total'] = total
+
+
+ return output
+
+
+ def process_snapshot (self, current, prev):
+
+ # final output
+ output = {}
+
+ # copy timestamp field
+ output['ts'] = current['ts']
+
+ # global (not per pg_id) error counters
+ output['global'] = {}
+ for field in ['rx_err', 'tx_err']:
+ output['global'][field] = {}
+ if 'global' in current and field in current['global']:
+ for port in current['global'][field]:
+ output['global'][field][int(port)] = current['global'][field][port]
+
+ # we care only about the current active keys
+ pg_ids = list(filter(is_intable, current.keys()))
+
+ for pg_id in pg_ids:
+
+ current_pg = current.get(pg_id, {})
+
+ # first time - we do not care
+ if current_pg.get('first_time'):
+ # new value - ignore history
+ output[pg_id] = self.process_single_pg(current_pg, {})
+ self.reference_stats[pg_id] = {}
+
+ # 'dry' B/W
+ self.calculate_bw_for_pg(output[pg_id])
+
+ else:
+ # aggregate the two values
+ prev_pg = prev.get(pg_id, {})
+ output[pg_id] = self.process_single_pg(current_pg, prev_pg)
+
+ # calculate B/W
+ diff_sec = self.calculate_diff_sec(current, prev)
+ self.calculate_bw_for_pg(output[pg_id], prev_pg, diff_sec)
+
+
+ # cleanp old reference values - they are dead
+ ref_pg_ids = list(filter(is_intable, self.reference_stats.keys()))
+
+ deleted_pg_ids = set(ref_pg_ids).difference(pg_ids)
+ for d_pg_id in deleted_pg_ids:
+ del self.reference_stats[d_pg_id]
+
+ return output
+
+
+
+ def calculate_bw_for_pg (self, pg_current, pg_prev = None, diff_sec = 0.0):
+ # no previous values
+ if (not pg_prev) or not (diff_sec > 0):
+ pg_current['tx_pps'] = {}
+ pg_current['tx_bps'] = {}
+ pg_current['tx_bps_L1'] = {}
+ pg_current['tx_line_util'] = {}
+ pg_current['rx_pps'] = {}
+ pg_current['rx_bps'] = {}
+ pg_current['rx_bps_L1'] = {}
+ pg_current['rx_line_util'] = {}
+
+ pg_current['tx_pps_lpf'] = {}
+ pg_current['tx_bps_lpf'] = {}
+ pg_current['tx_bps_L1_lpf'] = {}
+ pg_current['rx_pps_lpf'] = {}
+ pg_current['rx_bps_lpf'] = {}
+ pg_current['rx_bps_L1_lpf'] = {}
+ return
+
+ # TX
+ for port in pg_current['tx_pkts'].keys():
+
+ prev_tx_pps = pg_prev['tx_pps'].get(port)
+ now_tx_pkts = pg_current['tx_pkts'].get(port)
+ prev_tx_pkts = pg_prev['tx_pkts'].get(port)
+ pg_current['tx_pps'][port], pg_current['tx_pps_lpf'][port] = self.calc_pps(prev_tx_pps, now_tx_pkts, prev_tx_pkts, diff_sec)
+
+ prev_tx_bps = pg_prev['tx_bps'].get(port)
+ now_tx_bytes = pg_current['tx_bytes'].get(port)
+ prev_tx_bytes = pg_prev['tx_bytes'].get(port)
+
+ pg_current['tx_bps'][port], pg_current['tx_bps_lpf'][port] = self.calc_bps(prev_tx_bps, now_tx_bytes, prev_tx_bytes, diff_sec)
+
+ if pg_current['tx_bps'].get(port) != None and pg_current['tx_pps'].get(port) != None:
+ pg_current['tx_bps_L1'][port] = calc_bps_L1(pg_current['tx_bps'][port], pg_current['tx_pps'][port])
+ pg_current['tx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['tx_bps_lpf'][port], pg_current['tx_pps_lpf'][port])
+ else:
+ pg_current['tx_bps_L1'][port] = None
+ pg_current['tx_bps_L1_lpf'][port] = None
+
+
+ # RX
+ for port in pg_current['rx_pkts'].keys():
+
+ prev_rx_pps = pg_prev['rx_pps'].get(port)
+ now_rx_pkts = pg_current['rx_pkts'].get(port)
+ prev_rx_pkts = pg_prev['rx_pkts'].get(port)
+ pg_current['rx_pps'][port], pg_current['rx_pps_lpf'][port] = self.calc_pps(prev_rx_pps, now_rx_pkts, prev_rx_pkts, diff_sec)
+
+ prev_rx_bps = pg_prev['rx_bps'].get(port)
+ now_rx_bytes = pg_current['rx_bytes'].get(port)
+ prev_rx_bytes = pg_prev['rx_bytes'].get(port)
+ pg_current['rx_bps'][port], pg_current['rx_bps_lpf'][port] = self.calc_bps(prev_rx_bps, now_rx_bytes, prev_rx_bytes, diff_sec)
+ if pg_current['rx_bps'].get(port) != None and pg_current['rx_pps'].get(port) != None:
+ pg_current['rx_bps_L1'][port] = calc_bps_L1(pg_current['rx_bps'][port], pg_current['rx_pps'][port])
+ pg_current['rx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['rx_bps_lpf'][port], pg_current['rx_pps_lpf'][port])
+ else:
+ pg_current['rx_bps_L1'][port] = None
+ pg_current['rx_bps_L1_lpf'][port] = None
+
+
+ def calc_pps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, False)
+
+
+ def calc_bps (self, prev_bw, now, prev, diff_sec):
+ return self.calc_bw(prev_bw, now, prev, diff_sec, True)
+
+ # returns tuple - first value is real, second is low pass filtered
+ def calc_bw (self, prev_bw, now, prev, diff_sec, is_bps):
+ # B/W is not valid when the values are None
+ if (now is None) or (prev is None):
+ return (None, None)
+
+ # calculate the B/W for current snapshot
+ current_bw = (now - prev) / diff_sec
+ if is_bps:
+ current_bw *= 8
+
+ # previous B/W is None ? ignore it
+ if prev_bw is None:
+ prev_bw = 0
+
+ return (current_bw, 0.5 * prev_bw + 0.5 * current_bw)
+
+
+
+
+ def _update (self, snapshot):
+ #print(snapshot)
+ # generate a new snapshot
+ new_snapshot = self.process_snapshot(snapshot, self.latest_stats)
+
+ #print new_snapshot
+ # advance
+ self.latest_stats = new_snapshot
+
+
+ return True
+
+
+
+ # for API
+ def get_stats (self):
+ stats = {}
+
+ for pg_id, value in self.latest_stats.items():
+ # skip non ints
+ if not is_intable(pg_id):
+ # 'global' stats are in the same level of the pg_ids. We do want them to go to the user
+ if pg_id == 'global':
+ stats[pg_id] = value
+ continue
+ # bare counters
+ stats[int(pg_id)] = {}
+ for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
+ val = self.get_rel([pg_id, field, 'total'])
+ stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
+ for port in value[field].keys():
+ if is_intable(port):
+ val = self.get_rel([pg_id, field, port])
+ stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
+
+ # BW values
+ for field in ['tx_pps', 'tx_bps', 'tx_bps_L1', 'rx_pps', 'rx_bps', 'rx_bps_L1']:
+ val = self.get([pg_id, field, 'total'])
+ stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
+ for port in value[field].keys():
+ if is_intable(port):
+ val = self.get([pg_id, field, port])
+ stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
+
+ return stats
+
+class CUtilStats(CTRexStats):
+
+ def __init__(self, client):
+ super(CUtilStats, self).__init__()
+ self.client = client
+ self.history = deque(maxlen = 1)
+ self.mbuf_types_list = None
+ self.last_update_ts = -999
+
+ def get_stats(self, use_1sec_cache = False):
+ time_now = time.time()
+ if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
+ if self.client.is_connected():
+ rc = self.client._transmit('get_utilization')
+ if not rc:
+ raise STLError(rc)
+ self.last_update_ts = time_now
+ self.history.append(rc.data())
+ else:
+ self.history.append({})
+
+ return self.history[-1]
+
+class CXStats(CTRexStats):
+
+ def __init__(self, client):
+ super(CXStats, self).__init__()
+ self.client = client
+ self.names = []
+ self.last_update_ts = -999
+
+ def clear_stats(self, port_id = None):
+ if port_id == None:
+ ports = self.client.get_all_ports()
+ elif type(port_id) is list:
+ ports = port_id
+ else:
+ ports = [port_id]
+
+ for port_id in ports:
+ self.reference_stats[port_id] = self.get_stats(port_id, relative = False)
+
+ def get_stats(self, port_id, use_1sec_cache = False, relative = True):
+ time_now = time.time()
+ if self.last_update_ts + 1 < time_now or not self.latest_stats or not use_1sec_cache:
+ if self.client.is_connected():
+ rc = self.client._transmit('get_port_xstats_values', params = {'port_id': port_id})
+ if not rc:
+ raise STLError(rc)
+ self.last_update_ts = time_now
+ values = rc.data().get('xstats_values', [])
+ if len(values) != len(self.names): # need to update names ("keys")
+ rc = self.client._transmit('get_port_xstats_names', params = {'port_id': port_id})
+ if not rc:
+ raise STLError(rc)
+ self.names = rc.data().get('xstats_names', [])
+ if len(values) != len(self.names):
+ raise STLError('Length of get_xstats_names: %s and get_port_xstats_values: %s' % (len(self.names), len(values)))
+ self.latest_stats[port_id] = OrderedDict([(key, val) for key, val in zip(self.names, values)])
+
+ stats = OrderedDict()
+ for key, val in self.latest_stats[port_id].items():
+ if relative:
+ stats[key] = self.get_rel([port_id, key])
+ else:
+ stats[key] = self.get([port_id, key])
+ return stats
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
new file mode 100644
index 00000000..30fdb2dd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_std.py
@@ -0,0 +1,78 @@
+from .trex_stl_streams import *
+from .trex_stl_packet_builder_scapy import *
+
+# map ports
+# will destroy all streams/data on the ports
+def stl_map_ports (client, ports = None):
+ # by default use all ports
+ if ports is None:
+ ports = client.get_all_ports()
+
+ stl_send_3_pkts(client, ports)
+
+ tx_pkts = {}
+ pkts = 1
+ base_pkt = STLPktBuilder(pkt = Ether()/IP())
+
+ for port in ports:
+ tx_pkts[pkts] = port
+ stream = STLStream(packet = base_pkt,
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = pkts * 3))
+
+ client.add_streams(stream, [port])
+
+ pkts *= 2
+
+ # inject
+ client.clear_stats()
+ client.start(ports, mult = "50%")
+ client.wait_on_traffic(ports)
+
+ stats = client.get_stats()
+
+ # cleanup
+ client.reset(ports = ports)
+
+ table = {'map': {}, 'bi' : [], 'unknown': []}
+
+ # actual mapping
+ for port in ports:
+
+ ipackets = int(round(stats[port]["ipackets"] / 3.0)) # majority out of 3 to clean random noises
+ table['map'][port] = None
+
+ for pkts in tx_pkts.keys():
+ if ( (pkts & ipackets) == pkts ):
+ tx_port = tx_pkts[pkts]
+ table['map'][port] = tx_port
+
+ unmapped = list(ports)
+ while len(unmapped) > 0:
+ port_a = unmapped.pop(0)
+ port_b = table['map'][port_a]
+
+ # if unknown - add to the unknown list
+ if port_b == None:
+ table['unknown'].append(port_a)
+ # self-loop, due to bug?
+ elif port_a == port_b:
+ continue
+ # bi-directional ports
+ elif (table['map'][port_b] == port_a):
+ unmapped.remove(port_b)
+ table['bi'].append( (port_a, port_b) )
+
+ return table
+
+# reset ports and send 3 packets from each acquired port
+def stl_send_3_pkts(client, ports = None):
+
+ base_pkt = STLPktBuilder(pkt = Ether()/IP())
+ stream = STLStream(packet = base_pkt,
+ mode = STLTXSingleBurst(pps = 100000, total_pkts = 3))
+
+ client.reset(ports)
+ client.add_streams(stream, ports)
+ client.start(ports, mult = "50%")
+ client.wait_on_traffic(ports)
+ client.reset(ports)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
new file mode 100755
index 00000000..e63f9125
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -0,0 +1,1346 @@
+#!/router/bin/python
+
+from .trex_stl_exceptions import *
+from .trex_stl_types import verify_exclusive_arg, validate_type
+from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
+from .trex_stl_packet_builder_scapy import *
+from collections import OrderedDict, namedtuple
+
+from scapy.utils import ltoa
+from scapy.error import Scapy_Exception
+import random
+import yaml
+import base64
+import string
+import traceback
+import copy
+import imp
+
+
+# base class for TX mode
+class STLTXMode(object):
+ """ mode rate speed """
+
+ def __init__ (self, pps = None, bps_L1 = None, bps_L2 = None, percentage = None):
+ """
+ Speed can be given in packets per second (pps), L2/L1 bps, or port percent
+ Use only one unit.
+ you can enter pps =10000 oe bps_L1=10
+
+ :parameters:
+ pps : float
+ Packets per second
+
+ bps_L1 : float
+ Bits per second L1 (with IPG)
+
+ bps_L2 : float
+ Bits per second L2 (Ethernet-FCS)
+
+ percentage : float
+ Link interface percent (0-100). Example: 10 is 10% of the port link setup
+
+ .. code-block:: python
+
+ # STLTXMode Example
+
+ mode = STLTXCont(pps = 10)
+
+ mode = STLTXCont(bps_L1 = 10000000) #10mbps L1
+
+ mode = STLTXCont(bps_L2 = 10000000) #10mbps L2
+
+ mode = STLTXCont(percentage = 10) #10%
+
+ """
+
+ args = [pps, bps_L1, bps_L2, percentage]
+
+ # default
+ if all([x is None for x in args]):
+ pps = 1.0
+ else:
+ verify_exclusive_arg(args)
+
+ self.fields = {'rate': {}}
+
+ if pps is not None:
+ validate_type('pps', pps, [float, int])
+
+ self.fields['rate']['type'] = 'pps'
+ self.fields['rate']['value'] = pps
+
+ elif bps_L1 is not None:
+ validate_type('bps_L1', bps_L1, [float, int])
+
+ self.fields['rate']['type'] = 'bps_L1'
+ self.fields['rate']['value'] = bps_L1
+
+ elif bps_L2 is not None:
+ validate_type('bps_L2', bps_L2, [float, int])
+
+ self.fields['rate']['type'] = 'bps_L2'
+ self.fields['rate']['value'] = bps_L2
+
+ elif percentage is not None:
+ validate_type('percentage', percentage, [float, int])
+ if not (percentage > 0 and percentage <= 100):
+ raise STLArgumentError('percentage', percentage)
+
+ self.fields['rate']['type'] = 'percentage'
+ self.fields['rate']['value'] = percentage
+
+
+
+ def to_json (self):
+ return self.fields
+
+
+# continuous mode
+class STLTXCont(STLTXMode):
+ """ Continuous mode """
+
+ def __init__ (self, **kwargs):
+ """
+ Continuous mode
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXCont Example
+
+ mode = STLTXCont(pps = 10)
+
+ """
+ super(STLTXCont, self).__init__(**kwargs)
+
+
+ self.fields['type'] = 'continuous'
+
+ @staticmethod
+ def __str__ ():
+ return "Continuous"
+
+# single burst mode
+class STLTXSingleBurst(STLTXMode):
+ """ Single burst mode """
+
+ def __init__ (self, total_pkts = 1, **kwargs):
+ """
+ Single burst mode
+
+ :parameters:
+ total_pkts : int
+ Number of packets for this burst
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXSingleBurst Example
+
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 1)
+
+ """
+
+
+ if not isinstance(total_pkts, int):
+ raise STLArgumentError('total_pkts', total_pkts)
+
+ super(STLTXSingleBurst, self).__init__(**kwargs)
+
+ self.fields['type'] = 'single_burst'
+ self.fields['total_pkts'] = total_pkts
+
+ @staticmethod
+ def __str__ ():
+ return "Single Burst"
+
+# multi burst mode
+class STLTXMultiBurst(STLTXMode):
+ """ Multi-burst mode """
+
+ def __init__ (self,
+ pkts_per_burst = 1,
+ ibg = 0.0, # usec not SEC
+ count = 1,
+ **kwargs):
+ """
+ Multi-burst mode
+
+ :parameters:
+
+ pkts_per_burst: int
+ Number of packets per burst
+
+ ibg : float
+ Inter-burst gap in usec 1,000,000.0 is 1 sec
+
+ count : int
+ Number of bursts
+
+ see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
+
+ .. code-block:: python
+
+ # STLTXMultiBurst Example
+
+ mode = STLTXMultiBurst(pps = 10, pkts_per_burst = 1,count 10, ibg=10.0)
+
+ """
+
+
+ if not isinstance(pkts_per_burst, int):
+ raise STLArgumentError('pkts_per_burst', pkts_per_burst)
+
+ if not isinstance(ibg, (int, float)):
+ raise STLArgumentError('ibg', ibg)
+
+ if not isinstance(count, int):
+ raise STLArgumentError('count', count)
+
+ super(STLTXMultiBurst, self).__init__(**kwargs)
+
+ self.fields['type'] = 'multi_burst'
+ self.fields['pkts_per_burst'] = pkts_per_burst
+ self.fields['ibg'] = ibg
+ self.fields['count'] = count
+
+ @staticmethod
+ def __str__ ():
+ return "Multi Burst"
+
+STLStreamDstMAC_CFG_FILE=0
+STLStreamDstMAC_PKT =1
+STLStreamDstMAC_ARP =2
+
+class STLFlowStatsInterface(object):
+ def __init__ (self, pg_id):
+ self.fields = {}
+ self.fields['enabled'] = True
+ self.fields['stream_id'] = pg_id
+
+ def to_json (self):
+ """ Dump as json"""
+ return dict(self.fields)
+
+ @staticmethod
+ def defaults ():
+ return {'enabled' : False}
+
+
+class STLFlowStats(STLFlowStatsInterface):
+ """ Define per stream basic stats
+
+ .. code-block:: python
+
+ # STLFlowStats Example
+
+ flow_stats = STLFlowStats(pg_id = 7)
+
+ """
+
+ def __init__(self, pg_id):
+ super(STLFlowStats, self).__init__(pg_id)
+ self.fields['rule_type'] = 'stats'
+
+
+class STLFlowLatencyStats(STLFlowStatsInterface):
+ """ Define per stream basic stats + latency, jitter, packet reorder/loss
+
+ .. code-block:: python
+
+ # STLFlowLatencyStats Example
+
+ flow_stats = STLFlowLatencyStats(pg_id = 7)
+
+ """
+
+ def __init__(self, pg_id):
+ super(STLFlowLatencyStats, self).__init__(pg_id)
+ self.fields['rule_type'] = 'latency'
+
+
+class STLStream(object):
+ """ One stream object. Includes mode, Field Engine mode packet template and Rx stats
+
+ .. code-block:: python
+
+ # STLStream Example
+
+
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ STLStream( isg = 10.0, # star in delay
+ name ='S0',
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = 1),
+ next = 'S1'), # point to next stream
+
+
+ """
+
+ def __init__ (self,
+ name = None,
+ packet = None,
+ mode = STLTXCont(pps = 1),
+ enabled = True,
+ self_start = True,
+ isg = 0.0,
+ flow_stats = None,
+ next = None,
+ stream_id = None,
+ action_count = 0,
+ random_seed =0,
+ mac_src_override_by_pkt=None,
+ mac_dst_override_mode=None #see STLStreamDstMAC_xx
+ ):
+ """
+ Stream object
+
+ :parameters:
+
+ name : string
+ Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name.
+
+ packet : STLPktBuilder see :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder`
+ Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad)
+
+ mode : :class:`trex_stl_lib.trex_stl_streams.STLTXCont` or :class:`trex_stl_lib.trex_stl_streams.STLTXSingleBurst` or :class:`trex_stl_lib.trex_stl_streams.STLTXMultiBurst`
+
+ enabled : bool
+ Indicates whether the stream is enabled.
+
+ self_start : bool
+ If False, another stream activates it.
+
+ isg : float
+ Inter-stream gap in usec. Time to wait until the stream sends the first packet.
+
+ flow_stats : :class:`trex_stl_lib.trex_stl_streams.STLFlowStats`
+ Per stream statistic object. See: STLFlowStats
+
+ next : string
+ Name of the stream to activate.
+
+ stream_id :
+ For use by HLTAPI.
+
+ action_count : uint16_t
+ If there is a next stream, number of loops before stopping. Default: 0 (unlimited).
+
+ random_seed: uint16_t
+ If given, the seed for this stream will be this value. Useful if you need a deterministic random value.
+
+ mac_src_override_by_pkt : bool
+ Template packet sets src MAC.
+
+ mac_dst_override_mode=None : STLStreamDstMAC_xx
+ Template packet sets dst MAC.
+ """
+
+
+ # type checking
+ validate_type('mode', mode, STLTXMode)
+ validate_type('packet', packet, (type(None), CTrexPktBuilderInterface))
+ validate_type('flow_stats', flow_stats, (type(None), STLFlowStatsInterface))
+ validate_type('enabled', enabled, bool)
+ validate_type('self_start', self_start, bool)
+ validate_type('isg', isg, (int, float))
+ validate_type('stream_id', stream_id, (type(None), int))
+ validate_type('random_seed',random_seed,int);
+
+ if (type(mode) == STLTXCont) and (next != None):
+ raise STLError("Continuous stream cannot have a next stream ID")
+
+ # tag for the stream and next - can be anything
+ self.name = name
+ self.next = next
+
+ self.mac_src_override_by_pkt = mac_src_override_by_pkt # save for easy construct code from stream object
+ self.mac_dst_override_mode = mac_dst_override_mode
+ self.id = stream_id
+
+
+ self.fields = {}
+
+ int_mac_src_override_by_pkt = 0;
+ int_mac_dst_override_mode = 0;
+
+
+ if mac_src_override_by_pkt == None:
+ int_mac_src_override_by_pkt=0
+ if packet :
+ if packet.is_default_src_mac ()==False:
+ int_mac_src_override_by_pkt=1
+
+ else:
+ int_mac_src_override_by_pkt = int(mac_src_override_by_pkt);
+
+ if mac_dst_override_mode == None:
+ int_mac_dst_override_mode = 0;
+ if packet :
+ if packet.is_default_dst_mac ()==False:
+ int_mac_dst_override_mode=STLStreamDstMAC_PKT
+ else:
+ int_mac_dst_override_mode = int(mac_dst_override_mode);
+
+
+ self.is_default_mac = not (int_mac_src_override_by_pkt or int_mac_dst_override_mode)
+
+ self.fields['flags'] = (int_mac_src_override_by_pkt&1) + ((int_mac_dst_override_mode&3)<<1)
+
+ self.fields['action_count'] = action_count
+
+ # basic fields
+ self.fields['enabled'] = enabled
+ self.fields['self_start'] = self_start
+ self.fields['isg'] = isg
+
+ if random_seed !=0 :
+ self.fields['random_seed'] = random_seed # optional
+
+ # mode
+ self.fields['mode'] = mode.to_json()
+ self.mode_desc = str(mode)
+
+
+ # packet
+ self.fields['packet'] = {}
+ self.fields['vm'] = {}
+
+ if not packet:
+ packet = STLPktBuilder(pkt = Ether()/IP())
+
+ self.scapy_pkt_builder = packet
+ # packet builder
+ packet.compile()
+
+ # packet and VM
+ self.fields['packet'] = packet.dump_pkt()
+ self.fields['vm'] = packet.get_vm_data()
+
+ self.pkt = base64.b64decode(self.fields['packet']['binary'])
+
+ # this is heavy, calculate lazy
+ self.packet_desc = None
+
+ if not flow_stats:
+ self.fields['flow_stats'] = STLFlowStats.defaults()
+ else:
+ self.fields['flow_stats'] = flow_stats.to_json()
+
+
+ def __str__ (self):
+ s = "Stream Name: {0}\n".format(self.name)
+ s += "Stream Next: {0}\n".format(self.next)
+ s += "Stream JSON:\n{0}\n".format(json.dumps(self.fields, indent = 4, separators=(',', ': '), sort_keys = True))
+ return s
+
+ def to_json (self):
+ """
+ Return json format
+ """
+ return dict(self.fields)
+
+ def get_id (self):
+ """ Get the stream id after resolution """
+ return self.id
+
+
+ def has_custom_mac_addr (self):
+ """ Return True if src or dst MAC were set as custom """
+ return not self.is_default_mac
+
+ def get_name (self):
+ """ Get the stream name """
+ return self.name
+
+ def get_next (self):
+ """ Get next stream object """
+ return self.next
+
+
+ def has_flow_stats (self):
+ """ Return True if stream was configured with flow stats """
+ return self.fields['flow_stats']['enabled']
+
+ def get_pkt (self):
+ """ Get packet as string """
+ return self.pkt
+
+ def get_pkt_len (self, count_crc = True):
+ """ Get packet number of bytes """
+ pkt_len = len(self.get_pkt())
+ if count_crc:
+ pkt_len += 4
+
+ return pkt_len
+
+
+ def get_pkt_type (self):
+ """ Get packet description. Example: IP:UDP """
+ if self.packet_desc == None:
+ self.packet_desc = STLPktBuilder.pkt_layers_desc_from_buffer(self.get_pkt())
+
+ return self.packet_desc
+
+ def get_mode (self):
+ return self.mode_desc
+
+ @staticmethod
+ def get_rate_from_field (rate_json):
+ """ Get rate from json """
+ t = rate_json['type']
+ v = rate_json['value']
+
+ if t == "pps":
+ return format_num(v, suffix = "pps")
+ elif t == "bps_L1":
+ return format_num(v, suffix = "bps (L1)")
+ elif t == "bps_L2":
+ return format_num(v, suffix = "bps (L2)")
+ elif t == "percentage":
+ return format_num(v, suffix = "%")
+
+ def get_rate (self):
+ return self.get_rate_from_field(self.fields['mode']['rate'])
+
+ def to_pkt_dump (self):
+ """ Print packet description from Scapy """
+ if self.name:
+ print("Stream Name: ",self.name)
+ scapy_b = self.scapy_pkt_builder;
+ if scapy_b and isinstance(scapy_b,STLPktBuilder):
+ scapy_b.to_pkt_dump()
+ else:
+ print("Nothing to dump")
+
+
+
+ def to_yaml (self):
+ """ Convert to YAML """
+ y = {}
+
+ if self.name:
+ y['name'] = self.name
+
+ if self.next:
+ y['next'] = self.next
+
+ y['stream'] = copy.deepcopy(self.fields)
+
+ # some shortcuts for YAML
+ rate_type = self.fields['mode']['rate']['type']
+ rate_value = self.fields['mode']['rate']['value']
+
+ y['stream']['mode'][rate_type] = rate_value
+ del y['stream']['mode']['rate']
+
+ return y
+
+ # returns the Python code (text) to build this stream, inside the code it will be in variable "stream"
+ def to_code (self):
+ """ Convert to Python code as profile """
+ packet = Ether(self.pkt)
+ layer = packet
+ imports_arr = []
+ # remove checksums, add imports if needed
+ while layer:
+ layer_class = layer.__class__.__name__
+ try: # check if class can be instantiated
+ eval('%s()' % layer_class)
+ except NameError: # no such layer
+ found_import = False
+ for module_path, module in sys.modules.items():
+ import_string = 'from %s import %s' % (module_path, layer_class)
+ if import_string in imports_arr:
+ found_import = True
+ break
+ if not module_path.startswith(('scapy.layers', 'scapy.contrib')):
+ continue
+ check_layer = getattr(module, layer_class, None)
+ if not check_layer:
+ continue
+ try:
+ check_layer()
+ imports_arr.append(import_string)
+ found_import = True
+ break
+ except: # can't by instantiated
+ continue
+ if not found_import:
+ raise STLError('Could not determine import of layer %s' % layer.name)
+ for chksum_name in ('cksum', 'chksum'):
+ if chksum_name in layer.fields:
+ del layer.fields[chksum_name]
+ layer = layer.payload
+ packet.hide_defaults() # remove fields with default values
+ payload = packet.getlayer('Raw')
+ packet_command = packet.command()
+
+ imports = '\n'.join(imports_arr)
+ if payload:
+ payload.remove_payload() # fcs etc.
+ data = payload.fields.get('load', '')
+
+ good_printable = [c for c in string.printable if ord(c) not in range(32)]
+ good_printable.remove("'")
+
+ if type(data) is str:
+ new_data = ''.join([c if c in good_printable else r'\x{0:02x}'.format(ord(c)) for c in data])
+ else:
+ new_data = ''.join([chr(c) if chr(c) in good_printable else r'\x{0:02x}'.format(c) for c in data])
+
+ payload_start = packet_command.find("Raw(load=")
+ if payload_start != -1:
+ packet_command = packet_command[:payload_start-1]
+ layers = packet_command.split('/')
+
+ if payload:
+ if len(new_data) and new_data == new_data[0] * len(new_data):
+ layers.append("Raw(load='%s' * %s)" % (new_data[0], len(new_data)))
+ else:
+ layers.append("Raw(load='%s')" % new_data)
+
+ packet_code = 'packet = (' + (' / \n ').join(layers) + ')'
+ vm_list = []
+ for inst in self.fields['vm']['instructions']:
+ if inst['type'] == 'flow_var':
+ vm_list.append("STLVmFlowVar(name='{name}', size={size}, op='{op}', init_value={init_value}, min_value={min_value}, max_value={max_value}, step={step})".format(**inst))
+ elif inst['type'] == 'write_flow_var':
+ vm_list.append("STLVmWrFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, add_val={add_value}, is_big={is_big_endian})".format(**inst))
+ elif inst['type'] == 'write_mask_flow_var':
+ inst = copy.copy(inst)
+ inst['mask'] = hex(inst['mask'])
+ vm_list.append("STLVmWrMaskFlowVar(fv_name='{name}', pkt_offset={pkt_offset}, pkt_cast_size={pkt_cast_size}, mask={mask}, shift={shift}, add_value={add_value}, is_big={is_big_endian})".format(**inst))
+ elif inst['type'] == 'fix_checksum_ipv4':
+ vm_list.append("STLVmFixIpv4(offset={pkt_offset})".format(**inst))
+ elif inst['type'] == 'trim_pkt_size':
+ vm_list.append("STLVmTrimPktSize(fv_name='{name}')".format(**inst))
+ elif inst['type'] == 'tuple_flow_var':
+ inst = copy.copy(inst)
+ inst['ip_min'] = ltoa(inst['ip_min'])
+ inst['ip_max'] = ltoa(inst['ip_max'])
+ vm_list.append("STLVmTupleGen(name='{name}', ip_min='{ip_min}', ip_max='{ip_max}', port_min={port_min}, port_max={port_max}, limit_flows={limit_flows}, flags={flags})".format(**inst))
+ elif inst['type'] == 'flow_var_rand_limit':
+ vm_list.append("STLVmFlowVarRepetableRandom(name='{name}', size={size}, limit={limit}, seed={seed}, min_value={min_value}, max_value={max_value})".format(**inst))
+
+ vm_code = 'vm = STLScVmRaw([' + ',\n '.join(vm_list) + '], split_by_field = %s)' % STLStream.__add_quotes(self.fields['vm'].get('split_by_var'))
+ stream_params_list = []
+ stream_params_list.append('packet = STLPktBuilder(pkt = packet, vm = vm)')
+ if default_STLStream.name != self.name:
+ stream_params_list.append('name = %s' % STLStream.__add_quotes(self.name))
+ if default_STLStream.fields['enabled'] != self.fields['enabled']:
+ stream_params_list.append('enabled = %s' % self.fields['enabled'])
+ if default_STLStream.fields['self_start'] != self.fields['self_start']:
+ stream_params_list.append('self_start = %s' % self.fields['self_start'])
+ if default_STLStream.fields['isg'] != self.fields['isg']:
+ stream_params_list.append('isg = %s' % self.fields['isg'])
+ if default_STLStream.fields['flow_stats'] != self.fields['flow_stats']:
+ stream_params_list.append('flow_stats = STLFlowStats(%s)' % self.fields['flow_stats']['stream_id'])
+ if default_STLStream.next != self.next:
+ stream_params_list.append('next = %s' % STLStream.__add_quotes(self.next))
+ if default_STLStream.id != self.id:
+ stream_params_list.append('stream_id = %s' % self.id)
+ if default_STLStream.fields['action_count'] != self.fields['action_count']:
+ stream_params_list.append('action_count = %s' % self.fields['action_count'])
+ if 'random_seed' in self.fields:
+ stream_params_list.append('random_seed = %s' % self.fields.get('random_seed', 0))
+ if default_STLStream.mac_src_override_by_pkt != self.mac_src_override_by_pkt:
+ stream_params_list.append('mac_src_override_by_pkt = %s' % self.mac_src_override_by_pkt)
+ if default_STLStream.mac_dst_override_mode != self.mac_dst_override_mode:
+ stream_params_list.append('mac_dst_override_mode = %s' % self.mac_dst_override_mode)
+
+ mode_args = ''
+ for key, value in self.fields['mode'].items():
+ if key not in ('rate', 'type'):
+ mode_args += '%s = %s, ' % (key, value)
+ mode_args += '%s = %s' % (self.fields['mode']['rate']['type'], self.fields['mode']['rate']['value'])
+ if self.mode_desc == STLTXCont.__str__():
+ stream_params_list.append('mode = STLTXCont(%s)' % mode_args)
+ elif self.mode_desc == STLTXSingleBurst().__str__():
+ stream_params_list.append('mode = STLTXSingleBurst(%s)' % mode_args)
+ elif self.mode_desc == STLTXMultiBurst().__str__():
+ stream_params_list.append('mode = STLTXMultiBurst(%s)' % mode_args)
+ else:
+ raise STLError('Could not determine mode: %s' % self.mode_desc)
+
+ stream = "stream = STLStream(" + ',\n '.join(stream_params_list) + ')'
+ return '\n'.join([imports, packet_code, vm_code, stream])
+
+ # add quoted for string, or leave as is if other type
+ @staticmethod
+ def __add_quotes(arg):
+ if type(arg) is str:
+ return "'%s'" % arg
+ return arg
+
+ # used to replace non-printable characters with hex
+ @staticmethod
+ def __replchars_to_hex(match):
+ return r'\x{0:02x}'.format(ord(match.group()))
+
+ def dump_to_yaml (self, yaml_file = None):
+ """ Print as yaml """
+ yaml_dump = yaml.dump([self.to_yaml()], default_flow_style = False)
+
+ # write to file if provided
+ if yaml_file:
+ with open(yaml_file, 'w') as f:
+ f.write(yaml_dump)
+
+ return yaml_dump
+
+class YAMLLoader(object):
+
+ def __init__ (self, yaml_file):
+ self.yaml_path = os.path.dirname(yaml_file)
+ self.yaml_file = yaml_file
+
+
+ def __parse_packet (self, packet_dict):
+
+ packet_type = set(packet_dict).intersection(['binary', 'pcap'])
+ if len(packet_type) != 1:
+ raise STLError("Packet section must contain either 'binary' or 'pcap'")
+
+ if 'binary' in packet_type:
+ try:
+ pkt_str = base64.b64decode(packet_dict['binary'])
+ except TypeError:
+ raise STLError("'binary' field is not a valid packet format")
+
+ builder = STLPktBuilder(pkt_buffer = pkt_str)
+
+ elif 'pcap' in packet_type:
+ pcap = os.path.join(self.yaml_path, packet_dict['pcap'])
+
+ if not os.path.exists(pcap):
+ raise STLError("'pcap' - cannot find '{0}'".format(pcap))
+
+ builder = STLPktBuilder(pkt = pcap)
+
+ return builder
+
+
+ def __parse_mode (self, mode_obj):
+ if not mode_obj:
+ return None
+
+ rate_parser = set(mode_obj).intersection(['pps', 'bps_L1', 'bps_L2', 'percentage'])
+ if len(rate_parser) != 1:
+ raise STLError("'rate' must contain exactly one from 'pps', 'bps_L1', 'bps_L2', 'percentage'")
+
+ rate_type = rate_parser.pop()
+ rate = {rate_type : mode_obj[rate_type]}
+
+ mode_type = mode_obj.get('type')
+
+ if mode_type == 'continuous':
+ mode = STLTXCont(**rate)
+
+ elif mode_type == 'single_burst':
+ defaults = STLTXSingleBurst()
+ mode = STLTXSingleBurst(total_pkts = mode_obj.get('total_pkts', defaults.fields['total_pkts']),
+ **rate)
+
+ elif mode_type == 'multi_burst':
+ defaults = STLTXMultiBurst()
+ mode = STLTXMultiBurst(pkts_per_burst = mode_obj.get('pkts_per_burst', defaults.fields['pkts_per_burst']),
+ ibg = mode_obj.get('ibg', defaults.fields['ibg']),
+ count = mode_obj.get('count', defaults.fields['count']),
+ **rate)
+
+ else:
+ raise STLError("mode type can be 'continuous', 'single_burst' or 'multi_burst")
+
+
+ return mode
+
+
+
+ def __parse_flow_stats (self, flow_stats_obj):
+
+ # no such object
+ if not flow_stats_obj or flow_stats_obj.get('enabled') == False:
+ return None
+
+ pg_id = flow_stats_obj.get('stream_id')
+ if pg_id == None:
+ raise STLError("Enabled RX stats section must contain 'stream_id' field")
+
+ return STLFlowStats(pg_id = pg_id)
+
+
+ def __parse_stream (self, yaml_object):
+ s_obj = yaml_object['stream']
+
+ # parse packet
+ packet = s_obj.get('packet')
+ if not packet:
+ raise STLError("YAML file must contain 'packet' field")
+
+ builder = self.__parse_packet(packet)
+
+
+ # mode
+ mode = self.__parse_mode(s_obj.get('mode'))
+
+ # rx stats
+ flow_stats = self.__parse_flow_stats(s_obj.get('flow_stats'))
+
+
+ defaults = default_STLStream
+ # create the stream
+ stream = STLStream(name = yaml_object.get('name'),
+ packet = builder,
+ mode = mode,
+ flow_stats = flow_stats,
+ enabled = s_obj.get('enabled', defaults.fields['enabled']),
+ self_start = s_obj.get('self_start', defaults.fields['self_start']),
+ isg = s_obj.get('isg', defaults.fields['isg']),
+ next = yaml_object.get('next'),
+ action_count = s_obj.get('action_count', defaults.fields['action_count']),
+ mac_src_override_by_pkt = s_obj.get('mac_src_override_by_pkt', 0),
+ mac_dst_override_mode = s_obj.get('mac_src_override_by_pkt', 0)
+ )
+
+ # hack the VM fields for now
+ if 'vm' in s_obj:
+ stream.fields['vm'].update(s_obj['vm'])
+
+ return stream
+
+
+ def parse (self):
+ with open(self.yaml_file, 'r') as f:
+ # read YAML and pass it down to stream object
+ yaml_str = f.read()
+
+ try:
+ objects = yaml.load(yaml_str)
+ except yaml.parser.ParserError as e:
+ raise STLError(str(e))
+
+ streams = [self.__parse_stream(object) for object in objects]
+
+ return streams
+
+
+# profile class
+class STLProfile(object):
+ """ Describe a list of streams
+
+ .. code-block:: python
+
+ # STLProfile Example
+
+ profile = STLProfile( [ STLStream( isg = 10.0, # star in delay
+ name ='S0',
+ packet = STLPktBuilder(pkt = base_pkt/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
+ next = 'S1'), # point to next stream
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S1',
+ packet = STLPktBuilder(pkt = base_pkt1/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size),
+ next = 'S2' ),
+
+ STLStream( self_start = False, # stream is disabled enable trow S0
+ name ='S2',
+ packet = STLPktBuilder(pkt = base_pkt2/pad),
+ mode = STLTXSingleBurst( pps = 10, total_pkts = self.burst_size )
+ )
+ ]).get_streams()
+
+
+
+ """
+
+ def __init__ (self, streams = None):
+ """
+
+ :parameters:
+
+ streams : list of :class:`trex_stl_lib.trex_stl_streams.STLStream`
+ a list of stream objects
+
+ """
+
+
+ if streams == None:
+ streams = []
+
+ if not type(streams) == list:
+ streams = [streams]
+
+ if not all([isinstance(stream, STLStream) for stream in streams]):
+ raise STLArgumentError('streams', streams, valid_values = STLStream)
+
+ self.streams = streams
+ self.meta = None
+
+
+ def get_streams (self):
+ """ Get the list of streams"""
+ return self.streams
+
+ def __str__ (self):
+ return '\n'.join([str(stream) for stream in self.streams])
+
+ def is_pauseable (self):
+ return all([x.get_mode() == "Continuous" for x in self.get_streams()])
+
+ def has_custom_mac_addr (self):
+ return any([x.has_custom_mac_addr() for x in self.get_streams()])
+
+ def has_flow_stats (self):
+ return any([x.has_flow_stats() for x in self.get_streams()])
+
+ @staticmethod
+ def load_yaml (yaml_file):
+ """ Load (from YAML file) a profile with a number of streams"""
+
+ # check filename
+ if not os.path.isfile(yaml_file):
+ raise STLError("file '{0}' does not exists".format(yaml_file))
+
+ yaml_loader = YAMLLoader(yaml_file)
+ streams = yaml_loader.parse()
+
+ profile = STLProfile(streams)
+ profile.meta = {'type': 'yaml'}
+
+ return profile
+
+ @staticmethod
+ def get_module_tunables(module):
+ # remove self and variables
+ func = module.register().get_streams
+ argc = func.__code__.co_argcount
+ tunables = func.__code__.co_varnames[1:argc]
+
+ # fetch defaults
+ defaults = func.__defaults__
+ if len(defaults) != (argc - 1):
+ raise STLError("Module should provide default values for all arguments on get_streams()")
+
+ output = {}
+ for t, d in zip(tunables, defaults):
+ output[t] = d
+
+ return output
+
+
+ @staticmethod
+ def load_py (python_file, direction = 0, port_id = 0, **kwargs):
+ """ Load from Python profile """
+
+ # check filename
+ if not os.path.isfile(python_file):
+ raise STLError("File '{0}' does not exist".format(python_file))
+
+ basedir = os.path.dirname(python_file)
+ sys.path.insert(0, basedir)
+
+ try:
+ file = os.path.basename(python_file).split('.')[0]
+ module = __import__(file, globals(), locals(), [], 0)
+ imp.reload(module) # reload the update
+
+ t = STLProfile.get_module_tunables(module)
+ #for arg in kwargs:
+ # if not arg in t:
+ # raise STLError("Profile {0} does not support tunable '{1}' - supported tunables are: '{2}'".format(python_file, arg, t))
+
+ streams = module.register().get_streams(direction = direction,
+ port_id = port_id,
+ **kwargs)
+ profile = STLProfile(streams)
+
+ profile.meta = {'type': 'python',
+ 'tunables': t}
+
+ return profile
+
+ except Exception as e:
+ a, b, tb = sys.exc_info()
+ x =''.join(traceback.format_list(traceback.extract_tb(tb)[1:])) + a.__name__ + ": " + str(b) + "\n"
+
+ summary = "\nPython Traceback follows:\n\n" + x
+ raise STLError(summary)
+
+
+ finally:
+ sys.path.remove(basedir)
+
+
+ # loop_count = 0 means loop forever
+ @staticmethod
+ def load_pcap (pcap_file,
+ ipg_usec = None,
+ speedup = 1.0,
+ loop_count = 1,
+ vm = None,
+ packet_hook = None,
+ split_mode = None):
+ """ Convert a pcap file with a number of packets to a list of connected streams.
+
+ packet1->packet2->packet3 etc
+
+ :parameters:
+
+ pcap_file : string
+ Name of the pcap file
+
+ ipg_usec : float
+ Inter packet gap in usec. If IPG is None, IPG is taken from pcap file
+
+ speedup : float
+ When reading the pcap file, divide IPG by this "speedup" factor. Resulting IPG is sped up by this factor.
+
+ loop_count : uint16_t
+ Number of loops to repeat the pcap file
+
+ vm : list
+ List of Field engine instructions
+
+ packet_hook : Callable or function
+ will be applied to every packet
+
+ is_split : str
+ should this PCAP be split to two profiles based on IPs / MACs
+ used for dual mode
+ can be 'MAC' or 'IP'
+
+ :return: STLProfile
+
+ """
+
+ # check filename
+ if not os.path.isfile(pcap_file):
+ raise STLError("file '{0}' does not exists".format(pcap_file))
+
+ # make sure IPG is not less than 1 usec
+ if ipg_usec is not None and ipg_usec < 0.001:
+ raise STLError("ipg_usec cannot be less than 0.001 usec: '{0}'".format(ipg_usec))
+
+ if loop_count < 0:
+ raise STLError("'loop_count' cannot be negative")
+
+
+ try:
+
+ if split_mode is None:
+ pkts = PCAPReader(pcap_file).read_all()
+ return STLProfile.__pkts_to_streams(pkts,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook)
+ else:
+ pkts_a, pkts_b = PCAPReader(pcap_file).read_all(split_mode = split_mode)
+
+ profile_a = STLProfile.__pkts_to_streams(pkts_a,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook,
+ start_delay_usec = 10000)
+
+ profile_b = STLProfile.__pkts_to_streams(pkts_b,
+ ipg_usec,
+ speedup,
+ loop_count,
+ vm,
+ packet_hook,
+ start_delay_usec = 10000)
+
+ return profile_a, profile_b
+
+
+ except Scapy_Exception as e:
+ raise STLError("failed to open PCAP file {0}: '{1}'".format(pcap_file, str(e)))
+
+
+ @staticmethod
+ def __pkts_to_streams (pkts, ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0):
+
+ streams = []
+
+ # 10 ms delay before starting the PCAP
+ last_ts_usec = -(start_delay_usec)
+
+ if packet_hook:
+ pkts = [(packet_hook(cap), meta) for (cap, meta) in pkts]
+
+
+ for i, (cap, meta) in enumerate(pkts, start = 1):
+ # IPG - if not provided, take from cap
+ if ipg_usec == None:
+ ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup)
+ else:
+ ts_usec = (ipg_usec * i) / float(speedup)
+
+ # handle last packet
+ if i == len(pkts):
+ next = 1
+ action_count = loop_count
+ else:
+ next = i + 1
+ action_count = 0
+
+ streams.append(STLStream(name = i,
+ packet = STLPktBuilder(pkt_buffer = cap, vm = vm),
+ mode = STLTXSingleBurst(total_pkts = 1, percentage = 100),
+ self_start = True if (i == 1) else False,
+ isg = (ts_usec - last_ts_usec), # seconds to usec
+ action_count = action_count,
+ next = next))
+
+ last_ts_usec = ts_usec
+
+
+ profile = STLProfile(streams)
+ profile.meta = {'type': 'pcap'}
+
+ return profile
+
+
+
+ @staticmethod
+ def load (filename, direction = 0, port_id = 0, **kwargs):
+ """ Load a profile by its type. Supported types are:
+ * py
+ * yaml
+ * pcap file that converted to profile automaticly
+
+ :Parameters:
+ filename : string as filename
+ direction : profile's direction (if supported by the profile)
+ port_id : which port ID this profile is being loaded to
+ kwargs : forward those key-value pairs to the profile
+
+ """
+
+ x = os.path.basename(filename).split('.')
+ suffix = x[1] if (len(x) == 2) else None
+
+ if suffix == 'py':
+ profile = STLProfile.load_py(filename, direction, port_id, **kwargs)
+
+ elif suffix == 'yaml':
+ profile = STLProfile.load_yaml(filename)
+
+ elif suffix in ['cap', 'pcap']:
+ profile = STLProfile.load_pcap(filename, speedup = 1, ipg_usec = 1e6)
+
+ else:
+ raise STLError("unknown profile file type: '{0}'".format(suffix))
+
+ profile.meta['stream_count'] = len(profile.get_streams()) if isinstance(profile.get_streams(), list) else 1
+ return profile
+
+ @staticmethod
+ def get_info (filename):
+ profile = STLProfile.load(filename)
+ return profile.meta
+
+ def dump_as_pkt (self):
+ """ Dump the profile as Scapy packet. If the packet is raw, convert it to Scapy before dumping it."""
+ cnt=0;
+ for stream in self.streams:
+ print("=======================")
+ print("Stream %d" % cnt)
+ print("=======================")
+ cnt = cnt +1
+ stream.to_pkt_dump()
+
+ def dump_to_yaml (self, yaml_file = None):
+ """ Convert the profile to yaml """
+ yaml_list = [stream.to_yaml() for stream in self.streams]
+ yaml_str = yaml.dump(yaml_list, default_flow_style = False)
+
+ # write to file if provided
+ if yaml_file:
+ with open(yaml_file, 'w') as f:
+ f.write(yaml_str)
+
+ return yaml_str
+
+ def dump_to_code (self, profile_file = None):
+ """ Convert the profile to Python native profile. """
+ profile_dump = '''# !!! Auto-generated code !!!
+from trex_stl_lib.api import *
+
+class STLS1(object):
+ def get_streams(self, direction = 0, **kwargs):
+ streams = []
+'''
+ for stream in self.streams:
+ profile_dump += ' '*8 + stream.to_code().replace('\n', '\n' + ' '*8) + '\n'
+ profile_dump += ' '*8 + 'streams.append(stream)\n'
+ profile_dump += '''
+ return streams
+
+def register():
+ return STLS1()
+'''
+ # write to file if provided
+ if profile_file:
+ with open(profile_file, 'w') as f:
+ f.write(profile_dump)
+
+ return profile_dump
+
+
+
+ def __len__ (self):
+ return len(self.streams)
+
+
+class PCAPReader(object):
+ def __init__ (self, pcap_file):
+ self.pcap_file = pcap_file
+
+ def read_all (self, split_mode = None):
+ if split_mode is None:
+ return RawPcapReader(self.pcap_file).read_all()
+
+ # we need to split
+ self.pcap = rdpcap(self.pcap_file)
+ self.graph = Graph()
+
+ self.pkt_groups = [ [], [] ]
+
+ if split_mode == 'MAC':
+ self.generate_mac_groups()
+ elif split_mode == 'IP':
+ self.generate_ip_groups()
+ else:
+ raise STLError('unknown split mode for PCAP')
+
+ return self.pkt_groups
+
+
+ # generate two groups based on MACs
+ def generate_mac_groups (self):
+ for i, pkt in enumerate(self.pcap):
+ if not isinstance(pkt, (Ether, Dot3) ):
+ raise STLError("Packet #{0} has an unknown L2 format: {1}".format(i, type(pkt)))
+ mac_src = pkt.fields['src']
+ mac_dst = pkt.fields['dst']
+ self.graph.add(mac_src, mac_dst)
+
+ # split the graph to two groups
+ mac_groups = self.graph.split()
+
+ for pkt in self.pcap:
+ mac_src = pkt.fields['src']
+ group = 1 if mac_src in mac_groups[1] else 0
+
+ time, raw = pkt.time, bytes(pkt)
+ self.pkt_groups[group].append((raw, (time, 0)))
+
+
+ # generate two groups based on IPs
+ def generate_ip_groups (self):
+ for pkt in self.pcap:
+ if not isinstance(pkt, (Ether, Dot3) ):
+ raise STLError("Packet #{0} has an unknown L2 format: {1}".format(i, type(pkt)))
+ # skip non IP packets
+ if not isinstance(pkt.payload, IP):
+ continue
+ ip_src = pkt.payload.fields['src']
+ ip_dst = pkt.payload.fields['dst']
+ self.graph.add(ip_src, ip_dst)
+
+ # split the graph to two groups
+ ip_groups = self.graph.split()
+
+ for pkt in self.pcap:
+ # default group - 0
+ group = 0
+
+ # if the packet is IP and IP SRC is in group 1 - move to group 1
+ if isinstance(pkt.payload, IP) and pkt.payload.fields['src'] in ip_groups[1]:
+ group = 1
+
+ time, raw = pkt.time, bytes(pkt)
+ self.pkt_groups[group].append((raw, (time, 0)))
+
+
+
+# a simple graph object - used to split to two groups
+class Graph(object):
+ def __init__ (self):
+ self.db = OrderedDict()
+ self.debug = False
+
+ def log (self, msg):
+ if self.debug:
+ print(msg)
+
+ # add a connection v1 --> v2
+ def add (self, v1, v2):
+ # init value for v1
+ if not v1 in self.db:
+ self.db[v1] = set()
+
+ # init value for v2
+ if not v2 in self.db:
+ self.db[v2] = set()
+
+ # ignore self to self edges
+ if v1 == v2:
+ return
+
+ # undirected - add two ways
+ self.db[v1].add(v2)
+ self.db[v2].add(v1)
+
+
+ # create a 2-color of the graph if possible
+ def split (self):
+ color_a = set()
+ color_b = set()
+
+ # start with all
+ nodes = list(self.db.keys())
+
+ # process one by one
+ while len(nodes) > 0:
+ node = nodes.pop(0)
+
+ friends = self.db[node]
+
+ # node has never been seen - move to color_a
+ if not node in color_a and not node in color_b:
+ self.log("<NEW> {0} --> A".format(node))
+ color_a.add(node)
+
+ # node color
+ node_color, other_color = (color_a, color_b) if node in color_a else (color_b, color_a)
+
+ # check that the coloring is possible
+ bad_friends = friends.intersection(node_color)
+ if bad_friends:
+ raise STLError("ERROR: failed to split PCAP file - {0} and {1} are in the same group".format(node, bad_friends))
+
+ # add all the friends to the other color
+ for friend in friends:
+ self.log("<FRIEND> {0} --> {1}".format(friend, 'A' if other_color is color_a else 'B'))
+ other_color.add(friend)
+
+
+ return color_a, color_b
+
+
+default_STLStream = STLStream()
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
new file mode 100644
index 00000000..aa6c4218
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
@@ -0,0 +1,167 @@
+
+from collections import namedtuple, OrderedDict
+from .utils.text_opts import *
+from .trex_stl_exceptions import *
+import types
+
+RpcCmdData = namedtuple('RpcCmdData', ['method', 'params', 'api_class'])
+TupleRC = namedtuple('RCT', ['rc', 'data', 'is_warn'])
+
+class RpcResponseStatus(namedtuple('RpcResponseStatus', ['success', 'id', 'msg'])):
+ __slots__ = ()
+ def __str__(self):
+ return "{id:^3} - {msg} ({stat})".format(id=self.id,
+ msg=self.msg,
+ stat="success" if self.success else "fail")
+
+# simple class to represent complex return value
+class RC():
+
+ def __init__ (self, rc = None, data = None, is_warn = False):
+ self.rc_list = []
+
+ if (rc != None):
+ self.rc_list.append(TupleRC(rc, data, is_warn))
+
+ def __nonzero__ (self):
+ return self.good()
+
+ def __bool__ (self):
+ return self.good()
+
+ def add (self, rc):
+ self.rc_list += rc.rc_list
+
+ def good (self):
+ return all([x.rc for x in self.rc_list])
+
+ def bad (self):
+ return not self.good()
+
+ def warn (self):
+ return any([x.is_warn for x in self.rc_list])
+
+ def data (self):
+ d = [x.data if x.rc else "" for x in self.rc_list]
+ return (d if len(d) != 1 else d[0])
+
+ def err (self):
+ e = [x.data if not x.rc else "" for x in self.rc_list]
+ return (e if len(e) != 1 else e[0])
+
+ def __str__ (self):
+ s = ""
+ for x in self.rc_list:
+ if x.data:
+ s += format_text("\n{0}".format(x.data), 'bold')
+ return s
+
+ def __iter__(self):
+ return self.rc_list.__iter__()
+
+
+ def prn_func (self, msg, newline = True):
+ if newline:
+ print(msg)
+ else:
+ print(msg),
+
+ def annotate (self, log_func = None, desc = None, show_status = True):
+
+ if not log_func:
+ log_func = self.prn_func
+
+ if desc:
+ log_func(format_text('\n{:<60}'.format(desc), 'bold'), newline = False)
+ else:
+ log_func("")
+
+ if self.bad():
+ # print all the errors
+ print("")
+ for x in self.rc_list:
+ if not x.rc:
+ log_func(format_text("\n{0}".format(x.data), 'bold'))
+
+ print("")
+ if show_status:
+ log_func(format_text("[FAILED]\n", 'red', 'bold'))
+
+
+ else:
+ if show_status:
+ log_func(format_text("[SUCCESS]\n", 'green', 'bold'))
+
+
+def RC_OK(data = ""):
+ return RC(True, data)
+
+def RC_ERR (err):
+ return RC(False, err)
+
+def RC_WARN (warn):
+ return RC(True, warn, is_warn = True)
+
+try:
+ long
+ long_exists = True
+except:
+ long_exists = False
+
+def is_integer(arg):
+ if type(arg) is int:
+ return True
+ if long_exists and type(arg) is long:
+ return True
+ return False
+
+# validate type of arg
+# example1: validate_type('somearg', somearg, [int, long])
+# example2: validate_type('another_arg', another_arg, str)
+def validate_type(arg_name, arg, valid_types):
+ if long_exists:
+ if valid_types is int:
+ valid_types = (int, long)
+ elif type(valid_types) is list and int in valid_types and long not in valid_types:
+ valid_types.append(long)
+ if type(valid_types) is list:
+ valid_types = tuple(valid_types)
+ if (type(valid_types) is type or # single type, not array of types
+ type(valid_types) is tuple or # several valid types as tuple
+ type(valid_types) is types.ClassType): # old style class
+ if isinstance(arg, valid_types):
+ return
+ raise STLTypeError(arg_name, type(arg), valid_types)
+ else:
+ raise STLError('validate_type: valid_types should be type or list or tuple of types')
+
+# throws STLError if not exactly one argument is present
+def verify_exclusive_arg (args_list):
+ if not (len(list(filter(lambda x: x is not None, args_list))) == 1):
+ raise STLError('exactly one parameter from {0} should be provided'.format(args_list))
+
+def listify (x):
+ if isinstance(x, list):
+ return x
+ else:
+ return [x]
+
+# shows as 'N/A', but does not let any compares for user to not mistake in automation
+class StatNotAvailable(str):
+ def __new__(cls, value, *args, **kwargs):
+ cls.stat_name = value
+ return super(StatNotAvailable, cls).__new__(cls, 'N/A')
+
+ def __cmp__(self, *args, **kwargs):
+ raise Exception("Stat '%s' not available at this setup" % self.stat_name)
+
+
+class LRU_cache(OrderedDict):
+ def __init__(self, maxlen = 20, *args, **kwargs):
+ OrderedDict.__init__(self, *args, **kwargs)
+ self.maxlen = maxlen
+
+ def __setitem__(self, *args, **kwargs):
+ OrderedDict.__setitem__(self, *args, **kwargs)
+ if len(self) > self.maxlen:
+ self.popitem(last = False)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
new file mode 100755
index 00000000..fe4fc893
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/GAObjClass.py
@@ -0,0 +1,297 @@
+try: # Python2
+ import Queue
+ from urllib2 import *
+except: # Python3
+ import queue as Queue
+ from urllib.request import *
+ from urllib.error import *
+import threading
+import sys
+from time import sleep
+from pprint import pprint
+"""
+GAObjClass is a class destined to send Google Analytics Information.
+
+cid - unique number per user.
+command - the Event Category rubric appears on site. type: TEXT
+action - the Event Action rubric appears on site - type: TEXT
+label - the Event Label rubric - type: TEXT
+value - the event value metric - type: INTEGER
+
+QUOTAS:
+1 single payload - up to 8192Bytes
+batched:
+A maximum of 20 hits can be specified per request.
+The total size of all hit payloads cannot be greater than 16K bytes.
+No single hit payload can be greater than 8K bytes.
+"""
+url_single = 'https://www.google-analytics.com/collect' #sending single event
+url_batched = 'https://www.google-analytics.com/batch' #sending batched events
+url_debug = 'https://www.google-analytics.com/debug/collect' #verifying hit is valid
+url_conn = 'http://172.217.2.196' # testing internet connection to this address (google-analytics server)
+
+#..................................................................class GA_ObjClass................................................................
+class GA_ObjClass:
+ def __init__(self,cid,trackerID,appName,appVer):
+ self.cid = cid
+ self.trackerID = trackerID
+ self.appName = appName
+ self.appVer = appVer
+ self.payload = ''
+ self.payload = GA_ObjClass.generate_payload(self)
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='v=1&t=event&tid='+str(self.trackerID)
+ self.payload+='&cid='+str(self.cid)
+ self.payload+='&an='+str(self.appName)
+ self.payload+='&av='+str(self.appVer)
+ return self.payload
+
+
+#..................................................................class GA_EVENT_ObjClass................................................................
+class GA_EVENT_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,command,action,label,value,appName,appVer):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.command = command
+ self.action = action
+ self.label = label
+ self.value = value
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='&ec='+str(self.command)
+ self.payload+='&ea='+str(self.action)
+ self.payload+='&el='+str(self.label)
+ self.payload+='&ev='+str(self.value)
+ return self.payload
+
+#..................................................................class GA_EXCEPTION_ObjClass................................................................
+#ExceptionFatal - BOOLEAN
+class GA_EXCEPTION_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,ExceptionName,ExceptionFatal,appName,appVer):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.ExceptionName = ExceptionName
+ self.ExceptionFatal = ExceptionFatal
+ self.payload = self.generate_payload()
+
+ def generate_payload(self):
+ self.payload+='&exd='+str(self.ExceptionName)
+ self.payload+='&exf='+str(self.ExceptionFatal)
+ return self.payload
+
+
+
+#..................................................................class GA_TESTING_ObjClass................................................................
+class GA_TESTING_ObjClass(GA_ObjClass):
+ def __init__(self,cid,trackerID,TRexMode,TestName,SetupName,appName,ActionNumber,appVer,TestType,Mppspc,GoldenMin,GoldenMax):
+ GA_ObjClass.__init__(self,cid,trackerID,appName,appVer)
+ self.ActionNumber = ActionNumber
+ self.TRexMode = TRexMode
+ self.TestName = TestName
+ self.SetupName = SetupName
+ self.TestType = TestType
+ self.Mppspc = Mppspc
+ self.GoldenMin = GoldenMin
+ self.GoldenMax = GoldenMax
+ self.payload = self.generate_payload()
+ self.size = sys.getsizeof(self.payload)
+
+ def generate_payload(self):
+ self.payload+='&ec=TRexTests'
+ self.payload+='&ea='+str(self.ActionNumber)
+ self.payload+='&cd2='+str(self.TRexMode)
+ self.payload+='&cd1='+str(self.TestName)
+ self.payload+='&cd3='+str(self.SetupName)
+ self.payload+='&cd4='+str(self.TestType)
+ self.payload+='&cm1='+str(self.Mppspc)
+ self.payload+='&cm2='+str(self.GoldenMin)
+ self.payload+='&cm3='+str(self.GoldenMax)
+ return self.payload
+#.....................................................................class ga_Thread.................................................................
+"""
+
+Google analytics thread manager:
+
+will report and empty queue of google analytics items to GA server, every Timeout (parameter given on initialization)
+will perform connectivity check every timeout*10 seconds
+
+"""
+
+class ga_Thread (threading.Thread):
+ def __init__(self,threadID,gManager):
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.gManager = gManager
+ def run(self):
+ keepAliveCounter=0
+ #sys.stdout.write('thread started \n')
+ #sys.stdout.flush()
+ while True:
+ if (keepAliveCounter==10):
+ keepAliveCounter=0
+ if (self.gManager.internet_on()==True):
+ self.gManager.connectedToInternet=1
+ else:
+ self.gManager.connectedToInternet=0
+ sleep(self.gManager.Timeout)
+ keepAliveCounter+=1
+ if not self.gManager.GA_q.empty():
+ self.gManager.threadLock.acquire(1)
+# sys.stdout.write('lock acquired: reporting to GA \n')
+# sys.stdout.flush()
+ if (self.gManager.connectedToInternet==1):
+ self.gManager.emptyAndReportQ()
+ self.gManager.threadLock.release()
+# sys.stdout.write('finished \n')
+# sys.stdout.flush()
+#.....................................................................class GAmanager.................................................................
+"""
+
+Google ID - specify tracker property, example: UA-75220362-2 (when the suffix '2' specifies the analytics property profile)
+
+UserID - unique userID, this will differ between users on GA
+
+appName - s string to determine app name
+
+appVer - a string to determine app version
+
+QueueSize - the size of the queue that holds reported items. once the Queue is full:
+ on blocking mode:
+ will block program until next submission to GA server, which will make new space
+ on non-blocking mode:
+ will drop new requests
+
+Timout - the timeout the queue uses between data transmissions. Timeout should be shorter than the time it takes to generate 20 events. MIN VALUE = 11 seconds
+
+User Permission - the user must accept data transmission, use this flag as 1/0 flag, when UserPermission=1 allows data collection
+
+BlockingMode - set to 1 if you wish every Google Analytic Object will be submitted and processed, with no drops allowed.
+ this will block the running of the program until every item is processed
+
+*** Restriction - Google's restriction for amount of packages being sent per session per second is: 1 event per second, per session. session length is 30min ***
+"""
+
+class GAmanager:
+ def __init__(self,GoogleID,UserID,appName,appVer,QueueSize,Timeout,UserPermission,BlockingMode):
+ self.UserID = UserID
+ self.GoogleID = GoogleID
+ self.QueueSize = QueueSize
+ self.Timeout = Timeout
+ self.appName = appName
+ self.appVer = appVer
+ self.UserPermission = UserPermission
+ self.GA_q = Queue.Queue(QueueSize)
+ self.thread = ga_Thread(UserID,self)
+ self.threadLock = threading.Lock()
+ self.BlockingMode = BlockingMode
+ self.connectedToInternet =0
+ if (self.internet_on()==True):
+# sys.stdout.write('internet connection active \n')
+# sys.stdout.flush()
+ self.connectedToInternet=1
+ else:
+ self.connectedToInternet=0
+
+ def gaAddAction(self,Event,action,label,value):
+ self.gaAddObject(GA_EVENT_ObjClass(self.UserID,self.GoogleID,Event,action,label,value,self.appName,self.appVer))
+
+ def gaAddException(self,ExceptionName,ExceptionFatal):
+ self.gaAddObject(GA_EXCEPTION_ObjClass(self.UserID,self.GoogleID,ExceptionName,ExceptionFatal,self.appName,self.appVer))
+
+ def gaAddObject(self,Object):
+ if (self.BlockingMode==1):
+ while (self.GA_q.full()):
+ sleep(self.Timeout)
+# sys.stdout.write('blocking mode=1 \n queue full - sleeping for timeout \n') # within Timout, the thread will empty part of the queue
+# sys.stdout.flush()
+ lockState = self.threadLock.acquire(self.BlockingMode)
+ if (lockState==1):
+# sys.stdout.write('got lock, adding item \n')
+# sys.stdout.flush()
+ try:
+ self.GA_q.put_nowait(Object)
+# sys.stdout.write('got lock, item added \n')
+# sys.stdout.flush()
+ except Queue.Full:
+# sys.stdout.write('Queue full \n')
+# sys.stdout.flush()
+ pass
+ self.threadLock.release()
+
+ def emptyQueueToList(self,obj_list):
+ items=0
+ while ((not self.GA_q.empty()) and (items<20)):
+ obj_list.append(self.GA_q.get_nowait().payload)
+ items+=1
+# print items
+ return obj_list
+
+ def reportBatched(self,batched):
+ req = Request(url_batched, data=batched.encode('ascii'))
+ urlopen(req)
+# pprint(r.json())
+
+ def emptyAndReportQ(self):
+ obj_list = []
+ obj_list = self.emptyQueueToList(obj_list)
+ if (len(obj_list)==0):
+ return
+ batched = '\n'.join(obj_list)
+# print sys.getsizeof(batched)
+# print batched # - for debug
+ self.reportBatched(batched)
+
+ def printSelf(self):
+ print('remaining in queue:')
+ while not self.GA_q.empty():
+ obj = self.GA_q.get_nowait()
+ print(obj.payload)
+
+ def internet_on(self):
+ try:
+ urlopen(url_conn,timeout=10)
+ return True
+ except URLError as err: pass
+ return False
+
+ def activate(self):
+ if (self.UserPermission==1):
+ self.thread.start()
+
+
+#.....................................................................class GAmanager_Regression.................................................................
+"""
+ *-*-*-*-Google Analytics Regression Manager-*-*-*-*
+ attributes:
+GoogleID - the tracker ID that Google uses in order to track the activity of a property. for regression use: 'UA-75220362-4'
+AnalyticsUserID - text value - used by Google to differ between 2 users sending data. (will not be presented on reports). use only as a way to differ between different users
+TRexMode - text - will be presented on analysis. put here TRexMode
+appName - text - will be presented on analysis. put here appName as string describing app name
+appVer - text - will be presented on analysis. put here the appVer
+QueueSize - integer - determines the queue size. the queue will hold pending request before submission. RECOMMENDED VALUE: 20
+Timeout - integer (seconds) - the timeout in seconds between automated reports when activating reporting thread
+UserPermission - boolean (1/0) - required in order to send packets, should be 1.
+BlockingMode - boolean (1/0) - required when each tracked event is critical and program should halt until the event is reported
+SetupName - text - will be presented on analysis. put here setup name as string.
+"""
+class GAmanager_Regression(GAmanager):
+ def __init__(self, GoogleID, AnalyticsUserID, appName, appVer,
+ QueueSize, Timeout, UserPermission, BlockingMode):
+ GAmanager.__init__(self, GoogleID, AnalyticsUserID, appName, appVer,
+ QueueSize, Timeout, UserPermission, BlockingMode)
+ self.GoogleID = GoogleID
+ self.AnalyticsUserID = AnalyticsUserID
+
+ def gaAddTestQuery(self, TestName, TRexMode, SetupName, ActionNumber, TestType, Mppspc, GoldenMin, GoldenMax):
+ self.gaAddObject(GA_TESTING_ObjClass(self.AnalyticsUserID, self.GoogleID, TRexMode, TestName, SetupName, self.appName, ActionNumber, self.appVer, TestType, Mppspc, GoldenMin, GoldenMax))
+
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/__init__.py
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
new file mode 100644
index 00000000..72ee8972
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py
@@ -0,0 +1,88 @@
+import os
+import sys
+import string
+import random
+import time
+
+try:
+ import pwd
+except ImportError:
+ import getpass
+ pwd = None
+
+using_python_3 = True if sys.version_info.major == 3 else False
+
+def get_current_user():
+ if pwd:
+ return pwd.getpwuid(os.geteuid()).pw_name
+ else:
+ return getpass.getuser()
+
+
+def user_input():
+ if using_python_3:
+ return input()
+ else:
+ # using python version 2
+ return raw_input()
+
+
+class random_id_gen:
+ """
+ Emulated generator for creating a random chars id of specific length
+
+ :parameters:
+ length : int
+ the desired length of the generated id
+
+ default: 8
+
+ :return:
+ a random id with each next() request.
+ """
+ def __init__(self, length=8):
+ self.id_chars = string.ascii_lowercase + string.digits
+ self.length = length
+
+ def next(self):
+ return ''.join(random.choice(self.id_chars) for _ in range(self.length))
+
+ __next__ = next
+
+
+# try to get number from input, return None in case of fail
+def get_number(input):
+ try:
+ return long(input)
+ except:
+ try:
+ return int(input)
+ except:
+ return None
+
+def list_intersect(l1, l2):
+ return list(filter(lambda x: x in l2, l1))
+
+def list_difference (l1, l2):
+ return list(filter(lambda x: x not in l2, l1))
+
+def is_sub_list (l1, l2):
+ return set(l1) <= set(l2)
+
+# a simple passive timer
+class PassiveTimer(object):
+
+ # timeout_sec = None means forever
+ def __init__ (self, timeout_sec):
+ if timeout_sec != None:
+ self.expr_sec = time.time() + timeout_sec
+ else:
+ self.expr_sec = None
+
+ def has_expired (self):
+ # if no timeout was set - return always false
+ if self.expr_sec == None:
+ return False
+
+ return (time.time() > self.expr_sec)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py
new file mode 100755
index 00000000..a4942094
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/constants.py
@@ -0,0 +1,26 @@
+from collections import OrderedDict
+
+ON_OFF_DICT = OrderedDict([
+ ('on', True),
+ ('off', False),
+])
+
+UP_DOWN_DICT = OrderedDict([
+ ('up', True),
+ ('down', False),
+])
+
+FLOW_CTRL_DICT = OrderedDict([
+ ('none', 0), # Disable flow control
+ ('tx', 1), # Enable flowctrl on TX side (RX pause frames)
+ ('rx', 2), # Enable flowctrl on RX side (TX pause frames)
+ ('full', 3), # Enable flow control on both sides
+])
+
+
+
+# generate reverse dicts
+
+for var_name in list(vars().keys()):
+ if var_name.endswith('_DICT'):
+ exec('{0}_REVERSED = OrderedDict([(val, key) for key, val in {0}.items()])'.format(var_name))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py
new file mode 100644
index 00000000..714f7807
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/filters.py
@@ -0,0 +1,144 @@
+
+def shallow_copy(x):
+ return type(x)(x)
+
+
+class ToggleFilter(object):
+ """
+ This class provides a "sticky" filter, that works by "toggling" items of the original database on and off.
+ """
+ def __init__(self, db_ref, show_by_default=True):
+ """
+ Instantiate a ToggleFilter object
+
+ :parameters:
+ db_ref : iterable
+ an iterable object (i.e. list, set etc) that would serve as the reference db of the instance.
+ Changes in that object will affect the output of ToggleFilter instance.
+
+ show_by_default: bool
+ decide if by default all the items are "on", i.e. these items will be presented if no other
+ toggling occurred.
+
+ default value : **True**
+
+ """
+ self._data = db_ref
+ self._toggle_db = set()
+ self._filter_method = filter
+ self.__set_initial_state(show_by_default)
+
+ def reset (self):
+ """
+ Toggles off all the items
+ """
+ self._toggle_db = set()
+
+
+ def toggle_item(self, item_key):
+ """
+ Toggle a single item in/out.
+
+ :parameters:
+ item_key :
+ an item the by its value the filter can decide to toggle or not.
+ Example: int, str and so on.
+
+ :return:
+ + **True** if item toggled **into** the filtered items
+ + **False** if item toggled **out from** the filtered items
+
+ :raises:
+ + KeyError, in case if item key is not part of the toggled list and not part of the referenced db.
+
+ """
+ if item_key in self._toggle_db:
+ self._toggle_db.remove(item_key)
+ return False
+ elif item_key in self._data:
+ self._toggle_db.add(item_key)
+ return True
+ else:
+ raise KeyError("Provided item key isn't a key of the referenced data structure.")
+
+ def toggle_items(self, *args):
+ """
+ Toggle multiple items in/out with a single call. Each item will be ha.
+
+ :parameters:
+ args : iterable
+ an iterable object containing all item keys to be toggled in/out
+
+ :return:
+ + **True** if all toggled items were toggled **into** the filtered items
+ + **False** if at least one of the items was toggled **out from** the filtered items
+
+ :raises:
+ + KeyError, in case if ont of the item keys was not part of the toggled list and not part of the referenced db.
+
+ """
+ # in python 3, 'map' returns an iterator, so wrapping with 'list' call creates same effect for both python 2 and 3
+ return all(list(map(self.toggle_item, args)))
+
+ def filter_items(self):
+ """
+ Filters the pointed database by showing only the items mapped at toggle_db set.
+
+ :returns:
+ Filtered data of the original object.
+
+ """
+ return self._filter_method(self.__toggle_filter, self._data)
+
+ # private methods
+
+ def __set_initial_state(self, show_by_default):
+ try:
+ _ = (x for x in self._data)
+ if isinstance(self._data, dict):
+ self._filter_method = ToggleFilter.dict_filter
+ if show_by_default:
+ self._toggle_db = set(self._data.keys())
+ return
+ elif isinstance(self._data, list):
+ self._filter_method = ToggleFilter.list_filter
+ elif isinstance(self._data, set):
+ self._filter_method = ToggleFilter.set_filter
+ elif isinstance(self._data, tuple):
+ self._filter_method = ToggleFilter.tuple_filter
+ if show_by_default:
+ self._toggle_db = set(shallow_copy(self._data)) # assuming all relevant data with unique identifier
+ return
+ except TypeError:
+ raise TypeError("provided data object is not iterable")
+
+ def __toggle_filter(self, x):
+ return (x in self._toggle_db)
+
+ # static utility methods
+
+ @staticmethod
+ def dict_filter(function, iterable):
+ assert isinstance(iterable, dict)
+ return {k: v
+ for k,v in iterable.items()
+ if function(k)}
+
+ @staticmethod
+ def list_filter(function, iterable):
+ # in python 3, filter returns an iterator, so wrapping with list creates same effect for both python 2 and 3
+ return list(filter(function, iterable))
+
+ @staticmethod
+ def set_filter(function, iterable):
+ return {x
+ for x in iterable
+ if function(x)}
+
+ @staticmethod
+ def tuple_filter(function, iterable):
+ return tuple(filter(function, iterable))
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
new file mode 100755
index 00000000..7eda8635
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py
@@ -0,0 +1,596 @@
+import argparse
+from collections import namedtuple, OrderedDict
+from .common import list_intersect, list_difference
+from .text_opts import format_text
+from ..trex_stl_types import *
+from .constants import ON_OFF_DICT, UP_DOWN_DICT, FLOW_CTRL_DICT
+
+import sys
+import re
+import os
+
+ArgumentPack = namedtuple('ArgumentPack', ['name_or_flags', 'options'])
+ArgumentGroup = namedtuple('ArgumentGroup', ['type', 'args', 'options'])
+
+
+# list of available parsing options
+MULTIPLIER = 1
+MULTIPLIER_STRICT = 2
+PORT_LIST = 3
+ALL_PORTS = 4
+PORT_LIST_WITH_ALL = 5
+FILE_PATH = 6
+FILE_FROM_DB = 7
+SERVER_IP = 8
+STREAM_FROM_PATH_OR_FILE = 9
+DURATION = 10
+FORCE = 11
+DRY_RUN = 12
+XTERM = 13
+TOTAL = 14
+FULL_OUTPUT = 15
+IPG = 16
+SPEEDUP = 17
+COUNT = 18
+PROMISCUOUS = 19
+LINK_STATUS = 20
+LED_STATUS = 21
+TUNABLES = 22
+REMOTE_FILE = 23
+LOCKED = 24
+PIN_CORES = 25
+CORE_MASK = 26
+DUAL = 27
+FLOW_CTRL = 28
+SUPPORTED = 29
+
+GLOBAL_STATS = 50
+PORT_STATS = 51
+PORT_STATUS = 52
+STREAMS_STATS = 53
+STATS_MASK = 54
+CPU_STATS = 55
+MBUF_STATS = 56
+EXTENDED_STATS = 57
+EXTENDED_INC_ZERO_STATS = 58
+
+STREAMS_MASK = 60
+CORE_MASK_GROUP = 61
+
+# ALL_STREAMS = 61
+# STREAM_LIST_WITH_ALL = 62
+
+
+
+# list of ArgumentGroup types
+MUTEX = 1
+
+def check_negative(value):
+ ivalue = int(value)
+ if ivalue < 0:
+ raise argparse.ArgumentTypeError("non positive value provided: '{0}'".format(value))
+ return ivalue
+
+def match_time_unit(val):
+ '''match some val against time shortcut inputs '''
+ match = re.match("^(\d+(\.\d+)?)([m|h]?)$", val)
+ if match:
+ digit = float(match.group(1))
+ unit = match.group(3)
+ if not unit:
+ return digit
+ elif unit == 'm':
+ return digit*60
+ else:
+ return digit*60*60
+ else:
+ raise argparse.ArgumentTypeError("Duration should be passed in the following format: \n"
+ "-d 100 : in sec \n"
+ "-d 10m : in min \n"
+ "-d 1h : in hours")
+
+
+match_multiplier_help = """Multiplier should be passed in the following format:
+ [number][<empty> | bps | kbps | mbps | gbps | pps | kpps | mpps | %% ].
+
+ no suffix will provide an absoulute factor and percentage
+ will provide a percentage of the line rate. examples
+
+ '-m 10',
+ '-m 10kbps',
+ '-m 10kbpsl1',
+ '-m 10mpps',
+ '-m 23%% '
+
+ '-m 23%%' : is 23%% L1 bandwidth
+ '-m 23mbps': is 23mbps in L2 bandwidth (including FCS+4)
+ '-m 23mbpsl1': is 23mbps in L1 bandwidth
+
+ """
+
+
+# decodes multiplier
+# if allow_update - no +/- is allowed
+# divide states between how many entities the
+# value should be divided
+def decode_multiplier(val, allow_update = False, divide_count = 1):
+
+ factor_table = {None: 1, 'k': 1e3, 'm': 1e6, 'g': 1e9}
+ pattern = "^(\d+(\.\d+)?)(((k|m|g)?(bpsl1|pps|bps))|%)?"
+
+ # do we allow updates ? +/-
+ if not allow_update:
+ pattern += "$"
+ match = re.match(pattern, val)
+ op = None
+ else:
+ pattern += "([\+\-])?$"
+ match = re.match(pattern, val)
+ if match:
+ op = match.group(7)
+ else:
+ op = None
+
+ result = {}
+
+ if not match:
+ return None
+
+ # value in group 1
+ value = float(match.group(1))
+
+ # decode unit as whole
+ unit = match.group(3)
+
+ # k,m,g
+ factor = match.group(5)
+
+ # type of multiplier
+ m_type = match.group(6)
+
+ # raw type (factor)
+ if not unit:
+ result['type'] = 'raw'
+ result['value'] = value
+
+ # percentage
+ elif unit == '%':
+ result['type'] = 'percentage'
+ result['value'] = value
+
+ elif m_type == 'bps':
+ result['type'] = 'bps'
+ result['value'] = value * factor_table[factor]
+
+ elif m_type == 'pps':
+ result['type'] = 'pps'
+ result['value'] = value * factor_table[factor]
+
+ elif m_type == 'bpsl1':
+ result['type'] = 'bpsl1'
+ result['value'] = value * factor_table[factor]
+
+
+ if op == "+":
+ result['op'] = "add"
+ elif op == "-":
+ result['op'] = "sub"
+ else:
+ result['op'] = "abs"
+
+ if result['op'] != 'percentage':
+ result['value'] = result['value'] / divide_count
+
+ return result
+
+
+
+def match_multiplier(val):
+ '''match some val against multiplier shortcut inputs '''
+ result = decode_multiplier(val, allow_update = True)
+ if not result:
+ raise argparse.ArgumentTypeError(match_multiplier_help)
+
+ return val
+
+
+def match_multiplier_strict(val):
+ '''match some val against multiplier shortcut inputs '''
+ result = decode_multiplier(val, allow_update = False)
+ if not result:
+ raise argparse.ArgumentTypeError(match_multiplier_help)
+
+ return val
+
+def hex_int (val):
+ pattern = r"0x[1-9a-fA-F][0-9a-fA-F]*"
+
+ if not re.match(pattern, val):
+ raise argparse.ArgumentTypeError("{0} is not a valid positive HEX formatted number".format(val))
+
+ return int(val, 16)
+
+
+def is_valid_file(filename):
+ if not os.path.isfile(filename):
+ raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
+
+ return filename
+
+
+
+def decode_tunables (tunable_str):
+ tunables = {}
+
+ # split by comma to tokens
+ tokens = tunable_str.split(',')
+
+ # each token is of form X=Y
+ for token in tokens:
+ m = re.search('(\S+)=(.+)', token)
+ if not m:
+ raise argparse.ArgumentTypeError("bad syntax for tunables: {0}".format(token))
+ val = m.group(2) # string
+ if val.startswith(("'", '"')) and val.endswith(("'", '"')) and len(val) > 1: # need to remove the quotes from value
+ val = val[1:-1]
+ elif val.startswith('0x'): # hex
+ val = int(val, 16)
+ else:
+ try:
+ if '.' in val: # float
+ val = float(val)
+ else: # int
+ val = int(val)
+ except:
+ pass
+ tunables[m.group(1)] = val
+
+ return tunables
+
+
+
+OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'],
+ {'help': match_multiplier_help,
+ 'dest': "mult",
+ 'default': "1",
+ 'type': match_multiplier}),
+
+ MULTIPLIER_STRICT: ArgumentPack(['-m', '--multiplier'],
+ {'help': match_multiplier_help,
+ 'dest': "mult",
+ 'default': "1",
+ 'type': match_multiplier_strict}),
+
+ TOTAL: ArgumentPack(['-t', '--total'],
+ {'help': "traffic will be divided between all ports specified",
+ 'dest': "total",
+ 'default': False,
+ 'action': "store_true"}),
+
+ IPG: ArgumentPack(['-i', '--ipg'],
+ {'help': "IPG value in usec between packets. default will be from the pcap",
+ 'dest': "ipg_usec",
+ 'default': None,
+ 'type': float}),
+
+
+ SPEEDUP: ArgumentPack(['-s', '--speedup'],
+ {'help': "Factor to accelerate the injection. effectively means IPG = IPG / SPEEDUP",
+ 'dest': "speedup",
+ 'default': 1.0,
+ 'type': float}),
+
+ COUNT: ArgumentPack(['-n', '--count'],
+ {'help': "How many times to perform action [default is 1, 0 means forever]",
+ 'dest': "count",
+ 'default': 1,
+ 'type': int}),
+
+ PROMISCUOUS: ArgumentPack(['--prom'],
+ {'help': "Set port promiscuous on/off",
+ 'choices': ON_OFF_DICT}),
+
+ LINK_STATUS: ArgumentPack(['--link'],
+ {'help': 'Set link status up/down',
+ 'choices': UP_DOWN_DICT}),
+
+ LED_STATUS: ArgumentPack(['--led'],
+ {'help': 'Set LED status on/off',
+ 'choices': ON_OFF_DICT}),
+
+ FLOW_CTRL: ArgumentPack(['--fc'],
+ {'help': 'Set Flow Control type',
+ 'dest': 'flow_ctrl',
+ 'choices': FLOW_CTRL_DICT}),
+
+ SUPPORTED: ArgumentPack(['--supp'],
+ {'help': 'Show which attributes are supported by current NICs',
+ 'default': None,
+ 'action': 'store_true'}),
+
+ TUNABLES: ArgumentPack(['-t'],
+ {'help': "Sets tunables for a profile. Example: '-t fsize=100,pg_id=7'",
+ 'metavar': 'T1=VAL[,T2=VAL ...]',
+ 'dest': "tunables",
+ 'default': None,
+ 'action': 'merge',
+ 'type': decode_tunables}),
+
+ PORT_LIST: ArgumentPack(['--port', '-p'],
+ {"nargs": '+',
+ 'dest':'ports',
+ 'metavar': 'PORTS',
+ 'action': 'merge',
+ 'type': int,
+ 'help': "A list of ports on which to apply the command",
+ 'default': []}),
+
+ ALL_PORTS: ArgumentPack(['-a'],
+ {"action": "store_true",
+ "dest": "all_ports",
+ 'help': "Set this flag to apply the command on all available ports",
+ 'default': False},),
+
+ DURATION: ArgumentPack(['-d'],
+ {'action': "store",
+ 'metavar': 'TIME',
+ 'dest': 'duration',
+ 'type': match_time_unit,
+ 'default': -1.0,
+ 'help': "Set duration time for job."}),
+
+ FORCE: ArgumentPack(['--force'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "Set if you want to stop active ports before appyling command."}),
+
+ REMOTE_FILE: ArgumentPack(['-r', '--remote'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "file path should be interpeted by the server (remote file)"}),
+
+ DUAL: ArgumentPack(['--dual'],
+ {"action": "store_true",
+ 'default': False,
+ 'help': "Transmit in a dual mode - requires ownership on the adjacent port"}),
+
+ FILE_PATH: ArgumentPack(['-f'],
+ {'metavar': 'FILE',
+ 'dest': 'file',
+ 'nargs': 1,
+ 'required': True,
+ 'type': is_valid_file,
+ 'help': "File path to load"}),
+
+ FILE_FROM_DB: ArgumentPack(['--db'],
+ {'metavar': 'LOADED_STREAM_PACK',
+ 'help': "A stream pack which already loaded into console cache."}),
+
+ SERVER_IP: ArgumentPack(['--server'],
+ {'metavar': 'SERVER',
+ 'help': "server IP"}),
+
+ DRY_RUN: ArgumentPack(['-n', '--dry'],
+ {'action': 'store_true',
+ 'dest': 'dry',
+ 'default': False,
+ 'help': "Dry run - no traffic will be injected"}),
+
+ XTERM: ArgumentPack(['-x', '--xterm'],
+ {'action': 'store_true',
+ 'dest': 'xterm',
+ 'default': False,
+ 'help': "Starts TUI in xterm window"}),
+
+ LOCKED: ArgumentPack(['-l', '--locked'],
+ {'action': 'store_true',
+ 'dest': 'locked',
+ 'default': False,
+ 'help': "Locks TUI on legend mode"}),
+
+ FULL_OUTPUT: ArgumentPack(['--full'],
+ {'action': 'store_true',
+ 'help': "Prompt full info in a JSON format"}),
+
+ GLOBAL_STATS: ArgumentPack(['-g'],
+ {'action': 'store_true',
+ 'help': "Fetch only global statistics"}),
+
+ PORT_STATS: ArgumentPack(['-p'],
+ {'action': 'store_true',
+ 'help': "Fetch only port statistics"}),
+
+ PORT_STATUS: ArgumentPack(['--ps'],
+ {'action': 'store_true',
+ 'help': "Fetch only port status data"}),
+
+ STREAMS_STATS: ArgumentPack(['-s'],
+ {'action': 'store_true',
+ 'help': "Fetch only streams stats"}),
+
+ CPU_STATS: ArgumentPack(['-c'],
+ {'action': 'store_true',
+ 'help': "Fetch only CPU utilization stats"}),
+
+ MBUF_STATS: ArgumentPack(['-m'],
+ {'action': 'store_true',
+ 'help': "Fetch only MBUF utilization stats"}),
+
+ EXTENDED_STATS: ArgumentPack(['-x'],
+ {'action': 'store_true',
+ 'help': "Fetch xstats of port, excluding lines with zero values"}),
+
+ EXTENDED_INC_ZERO_STATS: ArgumentPack(['--xz'],
+ {'action': 'store_true',
+ 'help': "Fetch xstats of port, including lines with zero values"}),
+
+ STREAMS_MASK: ArgumentPack(['--streams'],
+ {"nargs": '+',
+ 'dest':'streams',
+ 'metavar': 'STREAMS',
+ 'type': int,
+ 'help': "A list of stream IDs to query about. Default: analyze all streams",
+ 'default': []}),
+
+
+ PIN_CORES: ArgumentPack(['--pin'],
+ {'action': 'store_true',
+ 'dest': 'pin_cores',
+ 'default': False,
+ 'help': "Pin cores to interfaces - cores will be divided between interfaces (performance boot for symetric profiles)"}),
+
+ CORE_MASK: ArgumentPack(['--core_mask'],
+ {'action': 'store',
+ 'nargs': '+',
+ 'type': hex_int,
+ 'dest': 'core_mask',
+ 'default': None,
+ 'help': "Core mask - only cores responding to the bit mask will be active"}),
+
+ # advanced options
+ PORT_LIST_WITH_ALL: ArgumentGroup(MUTEX, [PORT_LIST,
+ ALL_PORTS],
+ {'required': False}),
+
+ STREAM_FROM_PATH_OR_FILE: ArgumentGroup(MUTEX, [FILE_PATH,
+ FILE_FROM_DB],
+ {'required': True}),
+ STATS_MASK: ArgumentGroup(MUTEX, [GLOBAL_STATS,
+ PORT_STATS,
+ PORT_STATUS,
+ STREAMS_STATS,
+ CPU_STATS,
+ MBUF_STATS,
+ EXTENDED_STATS,
+ EXTENDED_INC_ZERO_STATS,],
+ {}),
+
+
+ CORE_MASK_GROUP: ArgumentGroup(MUTEX, [PIN_CORES,
+ CORE_MASK],
+ {'required': False}),
+
+ }
+
+class _MergeAction(argparse._AppendAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, self.dest)
+ if not items:
+ items = values
+ elif type(items) is list and type(values) is list:
+ items.extend(values)
+ elif type(items) is dict and type(values) is dict: # tunables are dict
+ items.update(values)
+ else:
+ raise Exception("Argparser 'merge' option should be used on dict or list.")
+
+ setattr(namespace, self.dest, items)
+
+class CCmdArgParser(argparse.ArgumentParser):
+
+ def __init__(self, stateless_client, *args, **kwargs):
+ super(CCmdArgParser, self).__init__(*args, **kwargs)
+ self.stateless_client = stateless_client
+ self.cmd_name = kwargs.get('prog')
+ self.register('action', 'merge', _MergeAction)
+
+ # hook this to the logger
+ def _print_message(self, message, file=None):
+ self.stateless_client.logger.log(message)
+
+ def error(self, message):
+ self.print_usage()
+ self._print_message(('%s: error: %s\n') % (self.prog, message))
+ raise ValueError(message)
+
+ def has_ports_cfg (self, opts):
+ return hasattr(opts, "all_ports") or hasattr(opts, "ports")
+
+ def parse_args(self, args=None, namespace=None, default_ports=None, verify_acquired=False):
+ try:
+ opts = super(CCmdArgParser, self).parse_args(args, namespace)
+ if opts is None:
+ return RC_ERR("'{0}' - invalid arguments".format(self.cmd_name))
+
+ if not self.has_ports_cfg(opts):
+ return opts
+
+ # if all ports are marked or
+ if (getattr(opts, "all_ports", None) == True) or (getattr(opts, "ports", None) == []):
+ if default_ports is None:
+ opts.ports = self.stateless_client.get_acquired_ports()
+ else:
+ opts.ports = default_ports
+
+ # so maybe we have ports configured
+ invalid_ports = list_difference(opts.ports, self.stateless_client.get_all_ports())
+ if invalid_ports:
+ msg = "{0}: port(s) {1} are not valid port IDs".format(self.cmd_name, invalid_ports)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ # verify acquired ports
+ if verify_acquired:
+ acquired_ports = self.stateless_client.get_acquired_ports()
+
+ diff = list_difference(opts.ports, acquired_ports)
+ if diff:
+ msg = "{0} - port(s) {1} are not acquired".format(self.cmd_name, diff)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+ # no acquire ports at all
+ if not acquired_ports:
+ msg = "{0} - no acquired ports".format(self.cmd_name)
+ self.stateless_client.logger.log(format_text(msg, 'bold'))
+ return RC_ERR(msg)
+
+
+ return opts
+
+ except ValueError as e:
+ return RC_ERR("'{0}' - {1}".format(self.cmd_name, str(e)))
+
+ except SystemExit:
+ # recover from system exit scenarios, such as "help", or bad arguments.
+ return RC_ERR("'{0}' - {1}".format(self.cmd_name, "no action"))
+
+
+def get_flags (opt):
+ return OPTIONS_DB[opt].name_or_flags
+
+def gen_parser(stateless_client, op_name, description, *args):
+ parser = CCmdArgParser(stateless_client, prog=op_name, conflict_handler='resolve',
+ description=description)
+ for param in args:
+ try:
+
+ if isinstance(param, int):
+ argument = OPTIONS_DB[param]
+ else:
+ argument = param
+
+ if isinstance(argument, ArgumentGroup):
+ if argument.type == MUTEX:
+ # handle as mutually exclusive group
+ group = parser.add_mutually_exclusive_group(**argument.options)
+ for sub_argument in argument.args:
+ group.add_argument(*OPTIONS_DB[sub_argument].name_or_flags,
+ **OPTIONS_DB[sub_argument].options)
+ else:
+ # ignore invalid objects
+ continue
+ elif isinstance(argument, ArgumentPack):
+ parser.add_argument(*argument.name_or_flags,
+ **argument.options)
+ else:
+ # ignore invalid objects
+ continue
+ except KeyError as e:
+ cause = e.args[0]
+ raise KeyError("The attribute '{0}' is missing as a field of the {1} option.\n".format(cause, param))
+ return parser
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
new file mode 100644
index 00000000..ab4f98a7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/pcap.py
@@ -0,0 +1,29 @@
+import os
+from ..trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter
+
+
+def __ts_key (a):
+ return float(a[1][0]) + (float(a[1][1]) / 1e6)
+
+def merge_cap_files (pcap_file_list, out_filename, delete_src = False):
+
+ if not all([os.path.exists(f) for f in pcap_file_list]):
+ print("failed to merge cap file list...\nnot all files exist\n")
+ return
+
+ out_pkts = []
+ for src in pcap_file_list:
+ pkts = RawPcapReader(src)
+ out_pkts += pkts
+ if delete_src:
+ os.unlink(src)
+
+ # sort by timestamp
+ out_pkts = sorted(out_pkts, key = __ts_key)
+
+ writer = RawPcapWriter(out_filename, linktype = 1)
+
+ writer._write_header(None)
+ for pkt in out_pkts:
+ writer._write_packet(pkt[0], sec=pkt[1][0], usec=pkt[1][1], caplen=pkt[1][2], wirelen=None)
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
new file mode 100644
index 00000000..bfb96950
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py
@@ -0,0 +1,195 @@
+import json
+import re
+
+TEXT_CODES = {'bold': {'start': '\x1b[1m',
+ 'end': '\x1b[22m'},
+ 'cyan': {'start': '\x1b[36m',
+ 'end': '\x1b[39m'},
+ 'blue': {'start': '\x1b[34m',
+ 'end': '\x1b[39m'},
+ 'red': {'start': '\x1b[31m',
+ 'end': '\x1b[39m'},
+ 'magenta': {'start': '\x1b[35m',
+ 'end': '\x1b[39m'},
+ 'green': {'start': '\x1b[32m',
+ 'end': '\x1b[39m'},
+ 'yellow': {'start': '\x1b[33m',
+ 'end': '\x1b[39m'},
+ 'underline': {'start': '\x1b[4m',
+ 'end': '\x1b[24m'}}
+
+class TextCodesStripper:
+ keys = [re.escape(v['start']) for k,v in TEXT_CODES.items()]
+ keys += [re.escape(v['end']) for k,v in TEXT_CODES.items()]
+ pattern = re.compile("|".join(keys))
+
+ @staticmethod
+ def strip (s):
+ return re.sub(TextCodesStripper.pattern, '', s)
+
+def format_num (size, suffix = "", compact = True, opts = None):
+ if opts is None:
+ opts = ()
+
+ txt = "NaN"
+
+ if type(size) == str:
+ return "N/A"
+
+ u = ''
+
+ if compact:
+ for unit in ['','K','M','G','T','P']:
+ if abs(size) < 1000.0:
+ u = unit
+ break
+ size /= 1000.0
+
+ if isinstance(size, float):
+ txt = "%3.2f" % (size)
+ else:
+ txt = "{:,}".format(size)
+
+ if u or suffix:
+ txt += " {:}{:}".format(u, suffix)
+
+ if isinstance(opts, tuple):
+ return format_text(txt, *opts)
+ else:
+ return format_text(txt, (opts))
+
+
+
+def format_time (t_sec):
+ if t_sec < 0:
+ return "infinite"
+
+ if t_sec == 0:
+ return "zero"
+
+ if t_sec < 1:
+ # low numbers
+ for unit in ['ms', 'usec', 'ns']:
+ t_sec *= 1000.0
+ if t_sec >= 1.0:
+ return '{:,.2f} [{:}]'.format(t_sec, unit)
+
+ return "NaN"
+
+ else:
+ # seconds
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'sec')
+
+ # minutes
+ t_sec /= 60.0
+ if t_sec < 60.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
+
+ # hours
+ t_sec /= 60.0
+ if t_sec < 24.0:
+ return '{:,.2f} [{:}]'.format(t_sec, 'hours')
+
+ # days
+ t_sec /= 24.0
+ return '{:,.2f} [{:}]'.format(t_sec, 'days')
+
+
+def format_percentage (size):
+ return "%0.2f %%" % (size)
+
+def bold(text):
+ return text_attribute(text, 'bold')
+
+
+def cyan(text):
+ return text_attribute(text, 'cyan')
+
+
+def blue(text):
+ return text_attribute(text, 'blue')
+
+
+def red(text):
+ return text_attribute(text, 'red')
+
+
+def magenta(text):
+ return text_attribute(text, 'magenta')
+
+
+def green(text):
+ return text_attribute(text, 'green')
+
+def yellow(text):
+ return text_attribute(text, 'yellow')
+
+def underline(text):
+ return text_attribute(text, 'underline')
+
+
+def text_attribute(text, attribute):
+ return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
+ txt=text,
+ stop=TEXT_CODES[attribute]['end'])
+
+
+FUNC_DICT = {'blue': blue,
+ 'bold': bold,
+ 'green': green,
+ 'yellow': yellow,
+ 'cyan': cyan,
+ 'magenta': magenta,
+ 'underline': underline,
+ 'red': red}
+
+
+def format_text(text, *args):
+ return_string = text
+ for i in args:
+ func = FUNC_DICT.get(i)
+ if func:
+ return_string = func(return_string)
+
+ return return_string
+
+
+def format_threshold (value, red_zone, green_zone):
+ try:
+ if value >= red_zone[0] and value <= red_zone[1]:
+ return format_text("{0}".format(value), 'red')
+
+ if value >= green_zone[0] and value <= green_zone[1]:
+ return format_text("{0}".format(value), 'green')
+ except TypeError:
+ # if value is not comparable or not a number - skip this
+ pass
+
+ return "{0}".format(value)
+
+# pretty print for JSON
+def pretty_json (json_str, use_colors = True):
+ pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
+
+ if not use_colors:
+ return pretty_str
+
+ try:
+ # int numbers
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
+ # float
+ pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
+ # # strings
+ #
+ pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
+ pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
+ TEXT_CODES['red']['start']), pretty_str)
+ except :
+ pass
+
+ return pretty_str
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
new file mode 100644
index 00000000..393ba111
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
@@ -0,0 +1,35 @@
+from __future__ import print_function
+
+import sys
+from texttable import Texttable
+from .text_opts import format_text
+
+
+class TRexTextTable(Texttable):
+
+ def __init__(self):
+ Texttable.__init__(self)
+ # set class attributes so that it'll be more like TRex standard output
+ self.set_chars(['-', '|', '-', '-'])
+ self.set_deco(Texttable.HEADER | Texttable.VLINES)
+
+class TRexTextInfo(Texttable):
+
+ def __init__(self):
+ Texttable.__init__(self)
+ # set class attributes so that it'll be more like TRex standard output
+ self.set_chars(['-', ':', '-', '-'])
+ self.set_deco(Texttable.VLINES)
+
+def generate_trex_stats_table():
+ pass
+
+def print_table_with_header(texttable_obj, header="", untouched_header="", buffer=sys.stdout):
+ header = header.replace("_", " ").title() + untouched_header
+ print(format_text(header, 'cyan', 'underline') + "\n", file=buffer)
+
+ print((texttable_obj.draw() + "\n"), file=buffer)
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
new file mode 100644
index 00000000..397ada16
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
@@ -0,0 +1,32 @@
+import zlib
+import struct
+
+class ZippedMsg:
+
+ MSG_COMPRESS_THRESHOLD = 256
+ MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA
+
+ def check_threshold (self, msg):
+ return len(msg) >= self.MSG_COMPRESS_THRESHOLD
+
+ def compress (self, msg):
+ # compress
+ compressed = zlib.compress(msg)
+ new_msg = struct.pack(">II", self.MSG_COMPRESS_HEADER_MAGIC, len(msg)) + compressed
+ return new_msg
+
+
+ def decompress (self, msg):
+ if len(msg) < 8:
+ return None
+
+ t = struct.unpack(">II", msg[:8])
+ if (t[0] != self.MSG_COMPRESS_HEADER_MAGIC):
+ return None
+
+ x = zlib.decompress(msg[8:])
+ if len(x) != t[1]:
+ return None
+
+ return x
+