summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/automation/sshpass.exp4
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_client.py16
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/client/trex_stateless_client.py462
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/client_utils/external_packages.py (renamed from scripts/automation/trex_control_plane/client_utils/outer_packages.py)1
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py238
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/client_utils/packet_builder.py524
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py2
-rw-r--r--scripts/automation/trex_control_plane/common/outer_packages.py30
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_exceptions.py34
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_stats.py60
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_status_e.py2
-rwxr-xr-xscripts/automation/trex_control_plane/doc/about_trex.rst13
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/index.rst19
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/json_fields.rst464
-rwxr-xr-xscripts/automation/trex_control_plane/doc/authors.rst13
-rwxr-xr-xscripts/automation/trex_control_plane/doc/client_utils.rst26
-rwxr-xr-xscripts/automation/trex_control_plane/doc/index.rst24
-rwxr-xr-xscripts/automation/trex_control_plane/doc/installation.rst25
-rwxr-xr-xscripts/automation/trex_control_plane/doc/json_dictionary.yaml6
-rwxr-xr-xscripts/automation/trex_control_plane/doc/license.rst18
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/index.rst18
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.rst12
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst29
-rw-r--r--scripts/automation/trex_control_plane/examples/interactive_stateless.py128
-rwxr-xr-xscripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py20
-rwxr-xr-xscripts/automation/trex_control_plane/server/extended_daemon_runner.py4
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server.py2
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py24
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py102
-rwxr-xr-xscripts/automation/trex_control_plane/server/zmq_monitor_thread.py4
30 files changed, 1555 insertions, 769 deletions
diff --git a/scripts/automation/sshpass.exp b/scripts/automation/sshpass.exp
index f27210c8..3b5ce560 100755
--- a/scripts/automation/sshpass.exp
+++ b/scripts/automation/sshpass.exp
@@ -12,4 +12,6 @@ match_max 100000
expect "*?assword:*"
send -- "$pass\r"
send -- "\r"
-interact
+expect eof
+wait
+#interact
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py
index 4c40f142..607251fe 100755
--- a/scripts/automation/trex_control_plane/client/trex_client.py
+++ b/scripts/automation/trex_control_plane/client/trex_client.py
@@ -69,7 +69,7 @@ class CTRexClient(object):
self.result_obj = CTRexResult(max_history_size)
self.decoder = JSONDecoder()
self.trex_server_path = "http://{hostname}:{port}/".format( hostname = trex_host, port = trex_daemon_port )
- self.__verbose_print("Connecting to T-Rex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
+ self.__verbose_print("Connecting to TRex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
self.history = jsonrpclib.history.History()
self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
self.check_server_connectivity()
@@ -128,7 +128,7 @@ class CTRexClient(object):
if d < 30: # specify a test should take at least 30 seconds long.
raise ValueError
except ValueError:
- raise ValueError('d parameter must be integer, specifying how long T-Rex run, and must be larger than 30 secs.')
+ raise ValueError('d parameter must be integer, specifying how long TRex run, and must be larger than 30 secs.')
trex_cmd_options.update( {'f' : f, 'd' : d} )
@@ -147,7 +147,7 @@ class CTRexClient(object):
self.seq = retval # update seq num only on successful submission
return True
else: # TRex is has been started by another user
- raise TRexInUseError('T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.')
+ raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.')
def stop_trex (self):
"""
@@ -199,7 +199,7 @@ class CTRexClient(object):
"""
if confirm:
- prompt = "WARNING: This will terminate active T-Rex session indiscriminately.\nAre you sure? "
+ prompt = "WARNING: This will terminate active TRex session indiscriminately.\nAre you sure? "
sys.stdout.write('%s [y/n]\n' % prompt)
while True:
try:
@@ -416,7 +416,7 @@ class CTRexClient(object):
time.sleep(time_between_samples)
except TRexWarning:
# means we're back to Idle state, and didn't meet our condition
- raise UserWarning("T-Rex results condition wasn't met during T-Rex run.")
+ raise UserWarning("TRex results condition wasn't met during TRex run.")
except Exception:
# this could come from provided method 'condition_func'
raise
@@ -627,7 +627,7 @@ class CTRexClient(object):
return method_to_call()
except socket.error as e:
if e.errno == errno.ECONNREFUSED:
- raise SocketError(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+ raise SocketError(errno.ECONNREFUSED, "Connection from TRex server was refused. Please make sure the server is up.")
def check_server_connectivity (self):
"""
@@ -640,7 +640,7 @@ class CTRexClient(object):
raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
except socket.error as e:
if e.errno == errno.ECONNREFUSED:
- raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+ raise socket.error(errno.ECONNREFUSED, "Connection from TRex server was refused. Please make sure the server is up.")
finally:
self.prompt_verbose_data()
@@ -856,7 +856,7 @@ class CTRexResult(object):
defines a path to desired data.
.. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
- | Use '[i]' to access the i'th indexed obejct of an array.
+ | Use '[i]' to access the i'th indexed object of an array.
tree_path_to_key : regex
apply a regex to filter results out from a multiple results set.
diff --git a/scripts/automation/trex_control_plane/client/trex_stateless_client.py b/scripts/automation/trex_control_plane/client/trex_stateless_client.py
index 5513f420..334496d1 100644..100755
--- a/scripts/automation/trex_control_plane/client/trex_stateless_client.py
+++ b/scripts/automation/trex_control_plane/client/trex_stateless_client.py
@@ -6,22 +6,314 @@ try:
except ImportError:
# support import for Python 3
import client.outer_packages
-from client_utils.jsonrpc_client import JsonRpcClient
-
+from client_utils.jsonrpc_client import JsonRpcClient, BatchMessage
+from client_utils.packet_builder import CTRexPktBuilder
+import json
+from common.trex_stats import *
+from collections import namedtuple
class CTRexStatelessClient(object):
"""docstring for CTRexStatelessClient"""
- def __init__(self, server="localhost", port=5050, virtual=False):
+ RpcCmdData = namedtuple('RpcCmdData', ['method', 'params'])
+
+ def __init__(self, username, server="localhost", port=5050, virtual=False):
super(CTRexStatelessClient, self).__init__()
+ self.user = username
self.tx_link = CTRexStatelessClient.CTxLink(server, port, virtual)
+ self._conn_handler = {}
+ self._active_ports = set()
+ self._stats = CTRexStatsManager("port", "stream")
+ self._system_info = None
+
+ # ----- decorator methods ----- #
+ def force_status(owned=True, active_and_owned=False):
+ def wrapper(func):
+ def wrapper_f(self, *args, **kwargs):
+ port_ids = kwargs.get("port_id")
+ if isinstance(port_ids, int):
+ # make sure port_ids is a list
+ port_ids = [port_ids]
+ bad_ids = set()
+ for port_id in port_ids:
+ port_owned = self._conn_handler.get(kwargs.get(port_id))
+ if owned and not port_owned:
+ bad_ids.add(port_ids)
+ elif active_and_owned: # stronger condition than just owned, hence gets precedence
+ if port_owned and port_id in self._active_ports:
+ continue
+ else:
+ bad_ids.add(port_ids)
+ else:
+ continue
+ if bad_ids:
+ # Some port IDs are not according to desires status
+ raise RuntimeError("The requested method ('{0}') cannot be invoked since port IDs {1} are not"
+ "at allowed stated".format(func.__name__))
+ else:
+ func(self, *args, **kwargs)
+ return wrapper_f
+ return wrapper
+
+ @property
+ def system_info(self):
+ if not self._system_info:
+ self._system_info = self.get_system_info()
+ return self._system_info
+
+ # ----- user-access methods ----- #
+ def ping(self):
+ return self.transmit("ping")
+
+ def get_supported_cmds(self):
+ return self.transmit("get_supported_cmds")
+
+ def get_version(self):
+ return self.transmit("get_version")
+
+ def get_system_info(self):
+ return self.transmit("get_system_info")
+
+ def get_port_count(self):
+ return self.system_info.get("port_count")
+
+ def acquire(self, port_id, force=False):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("acquire", {"port_id": p_id, "user": self.user, "force": force})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_acquire_response)
+ else:
+ params = {"port_id": port_id,
+ "user": self.user,
+ "force": force}
+ command = self.RpcCmdData("acquire", params)
+ self._handle_acquire_response(command, self.transmit(command.method, command.params))
+ return self._conn_handler.get(port_id)
+
+ @force_status(owned=True)
+ def release(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("release", {"handler": self._conn_handler.get(p_id), "port_id": p_id})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_release_response)
+ else:
+ self._conn_handler.pop(port_id)
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ command = self.RpcCmdData("release", params)
+ self._handle_release_response(command, self.transmit(command.method, command.params))
+ return
+
+ @force_status(owned=True)
+ def add_stream(self, stream_id, stream_obj, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ assert isinstance(stream_obj, CStream)
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id,
+ "stream": stream_obj.dump()}
+ return self.transmit("add_stream", params)
+
+ @force_status(owned=True)
+ def remove_stream(self, stream_id, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id}
+ return self.transmit("remove_stream", params)
+
+ @force_status(owned=True, active_and_owned=True)
+ def get_stream_id_list(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ return self.transmit("get_stream_list", params)
+
+ @force_status(owned=True, active_and_owned=True)
+ def get_stream(self, stream_id, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id}
+ return self.transmit("get_stream_list", params)
+ @force_status(owned=True)
+ def start_traffic(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("start_traffic", {"handler": self._conn_handler.get(p_id), "port_id": p_id})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_start_traffic_response)
+ else:
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ command = self.RpcCmdData("start_traffic", params)
+ self._handle_start_traffic_response(command, self.transmit(command.method, command.params))
+ return
- def transmit(self, method_name, params = {}):
+ @force_status(owned=False, active_and_owned=True)
+ def stop_traffic(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("stop_traffic", {"handler": self._conn_handler.get(p_id), "port_id": p_id})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_stop_traffic_response)
+ else:
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ command = self.RpcCmdData("stop_traffic", params)
+ self._handle_start_traffic_response(command, self.transmit(command.method, command.params))
+ return
+
+ def get_global_stats(self):
+ command = self.RpcCmdData("get_global_stats", {})
+ return self._handle_get_global_stats_response(command, self.transmit(command.method, command.params))
+ # return self.transmit("get_global_stats")
+
+ @force_status(owned=True, active_and_owned=True)
+ def get_port_stats(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("get_port_stats", {"handler": self._conn_handler.get(p_id), "port_id": p_id})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_get_port_stats_response)
+ else:
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ command = self.RpcCmdData("get_port_stats", params)
+ return self._handle_get_port_stats_response(command, self.transmit(command.method, command.params))
+
+ @force_status(owned=True, active_and_owned=True)
+ def get_stream_stats(self, port_id=None):
+ if not self._is_ports_valid(port_id):
+ raise ValueError("Provided illegal port id input")
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # handle as batch mode
+ port_ids = set(port_id) # convert to set to avoid duplications
+ commands = [self.RpcCmdData("get_stream_stats", {"handler": self._conn_handler.get(p_id), "port_id": p_id})
+ for p_id in port_ids]
+ rc, resp_list = self.transmit_batch(commands)
+ if rc:
+ self._process_batch_result(commands, resp_list, self._handle_get_stream_stats_response)
+ else:
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ command = self.RpcCmdData("get_stream_stats", params)
+ return self._handle_get_stream_stats_response(command, self.transmit(command.method, command.params))
+
+ # ----- internal methods ----- #
+ def transmit(self, method_name, params={}):
return self.tx_link.transmit(method_name, params)
+ def transmit_batch(self, batch_list):
+ return self.tx_link.transmit_batch(batch_list)
+
+ @staticmethod
+ def _object_decoder(obj_type, obj_data):
+ if obj_type == "global":
+ return CGlobalStats(**obj_data)
+ elif obj_type == "port":
+ return CPortStats(**obj_data)
+ elif obj_type == "stream":
+ return CStreamStats(**obj_data)
+ else:
+ # Do not serialize the data into class
+ return obj_data
+
+ @staticmethod
+ def default_success_test(result_obj):
+ if result_obj.success:
+ return True
+ else:
+ return False
+
+ # ----- handler internal methods ----- #
+ def _handle_acquire_response(self, request, response):
+ if response.success:
+ self._conn_handler[request.get("port_id")] = response.data
+
+ def _handle_release_response(self, request, response):
+ if response.success:
+ del self._conn_handler[request.get("port_id")]
+ def _handle_start_traffic_response(self, request, response):
+ if response.success:
+ self._active_ports.add(request.get("port_id"))
+
+ def _handle_stop_traffic_response(self, request, response):
+ if response.success:
+ self._active_ports.remove(request.get("port_id"))
+
+ def _handle_get_global_stats_response(self, request, response):
+ if response.success:
+ return CGlobalStats(**response.success)
+ else:
+ return False
+
+ def _handle_get_port_stats_response(self, request, response):
+ if response.success:
+ return CPortStats(**response.success)
+ else:
+ return False
+
+ def _handle_get_stream_stats_response(self, request, response):
+ if response.success:
+ return CStreamStats(**response.success)
+ else:
+ return False
+
+ def _is_ports_valid(self, port_id):
+ if isinstance(port_id, list) or isinstance(port_id, set):
+ # check each item of the sequence
+ return all([self._is_ports_valid(port)
+ for port in port_id])
+ elif (isinstance(port_id, int)) and (port_id > 0) and (port_id <= self.get_port_count()):
+ return True
+ else:
+ return False
+
+ def _process_batch_result(self, req_list, resp_list, handler_func=None, success_test=default_success_test):
+ for i, response in enumerate(resp_list):
+ # testing each result with success test so that a conclusion report could be deployed in future.
+ if success_test(response):
+ # run handler method with its params
+ handler_func(req_list[i], response)
+ else:
+ continue # TODO: mark in this case somehow the bad result
+
+ # ------ private classes ------ #
class CTxLink(object):
"""describes the connectivity of the stateless client method"""
def __init__(self, server="localhost", port=5050, virtual=False):
@@ -33,18 +325,170 @@ class CTRexStatelessClient(object):
if not self.virtual:
self.rpc_link.connect()
- def transmit(self, method_name, params = {}):
+ def transmit(self, method_name, params={}):
if self.virtual:
- print "Transmitting virtually over tcp://{server}:{port}".format(
- server=self.server,
- port=self.port)
- id, msg = self.rpc_link.create_jsonrpc_v2(method_name, params)
+ self._prompt_virtual_tx_msg()
+ _, msg = self.rpc_link.create_jsonrpc_v2(method_name, params)
print msg
return
else:
return self.rpc_link.invoke_rpc_method(method_name, params)
+ def transmit_batch(self, batch_list):
+ if self.virtual:
+ self._prompt_virtual_tx_msg()
+ print [msg
+ for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params)
+ for command in batch_list]]
+ else:
+ batch = self.rpc_link.create_batch()
+ for command in batch_list:
+ batch.add(command.method, command.params)
+ # invoke the batch
+ return batch.invoke()
+
+ def _prompt_virtual_tx_msg(self):
+ print "Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
+ port=self.port)
+
+
+class CStream(object):
+ """docstring for CStream"""
+ DEFAULTS = {"rx_stats": CRxStats,
+ "mode": CTxMode,
+ "isg": 5.0,
+ "next_stream": -1,
+ "self_start": True,
+ "enabled": True}
+
+ def __init__(self, **kwargs):
+ super(CStream, self).__init__()
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+ # set default values to unset attributes, according to DEFAULTS dict
+ set_keys = set(kwargs.keys())
+ keys_to_set = [x
+ for x in self.DEFAULTS
+ if x not in set_keys]
+ for key in keys_to_set:
+ default = self.DEFAULTS.get(key)
+ if type(default) == type:
+ setattr(self, key, default())
+ else:
+ setattr(self, key, default)
+
+ @property
+ def packet(self):
+ return self._packet
+
+ @packet.setter
+ def packet(self, packet_obj):
+ assert isinstance(packet_obj, CTRexPktBuilder)
+ self._packet = packet_obj
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @enabled.setter
+ def enabled(self, bool_value):
+ self._enabled = bool(bool_value)
+
+ @property
+ def self_start(self):
+ return self._self_start
+
+ @self_start.setter
+ def self_start(self, bool_value):
+ self._self_start = bool(bool_value)
+
+ @property
+ def next_stream(self):
+ return self._next_stream
+
+ @next_stream.setter
+ def next_stream(self, value):
+ self._next_stream = int(value)
+
+ def dump(self):
+ pass
+ return {"enabled": self.enabled,
+ "self_start": self.self_start,
+ "isg": self.isg,
+ "next_stream": self.next_stream,
+ "packet": self.packet.dump_pkt(),
+ "mode": self.mode.dump(),
+ "vm": self.packet.get_vm_data(),
+ "rx_stats": self.rx_stats.dump()}
+
+class CRxStats(object):
+
+ def __init__(self, enabled=False, seq_enabled=False, latency_enabled=False):
+ self._rx_dict = {"enabled": enabled,
+ "seq_enabled": seq_enabled,
+ "latency_enabled": latency_enabled}
+
+ @property
+ def enabled(self):
+ return self._rx_dict.get("enabled")
+
+ @enabled.setter
+ def enabled(self, bool_value):
+ self._rx_dict['enabled'] = bool(bool_value)
+
+ @property
+ def seq_enabled(self):
+ return self._rx_dict.get("seq_enabled")
+
+ @seq_enabled.setter
+ def seq_enabled(self, bool_value):
+ self._rx_dict['seq_enabled'] = bool(bool_value)
+
+ @property
+ def latency_enabled(self):
+ return self._rx_dict.get("latency_enabled")
+
+ @latency_enabled.setter
+ def latency_enabled(self, bool_value):
+ self._rx_dict['latency_enabled'] = bool(bool_value)
+
+ def dump(self):
+ return {k: v
+ for k, v in self._rx_dict.items()
+ if v
+ }
+
+
+class CTxMode(object):
+ """docstring for CTxMode"""
+ def __init__(self, tx_mode, pps):
+ super(CTxMode, self).__init__()
+ if tx_mode not in ["continuous", "single_burst", "multi_burst"]:
+ raise ValueError("Unknown TX mode ('{0}')has been initialized.".format(tx_mode))
+ self._tx_mode = tx_mode
+ self._fields = {'pps': float(pps)}
+ if tx_mode == "single_burst":
+ self._fields['total_pkts'] = 0
+ elif tx_mode == "multi_burst":
+ self._fields['pkts_per_burst'] = 0
+ self._fields['ibg'] = 0.0
+ self._fields['count'] = 0
+ else:
+ pass
+
+ def set_tx_mode_attr(self, attr, val):
+ if attr in self._fields:
+ self._fields[attr] = type(self._fields.get(attr))(val)
+ else:
+ raise ValueError("The provided attribute ('{0}') is not a legal attribute in selected TX mode ('{1}')".
+ format(attr, self._tx_mode))
+ def dump(self):
+ dump = {"type": self._tx_mode}
+ dump.update({k: v
+ for k, v in self._fields.items()
+ })
+ return dump
if __name__ == "__main__":
diff --git a/scripts/automation/trex_control_plane/client_utils/outer_packages.py b/scripts/automation/trex_control_plane/client_utils/external_packages.py
index a6c9a2eb..f8de0323 100644..100755
--- a/scripts/automation/trex_control_plane/client_utils/outer_packages.py
+++ b/scripts/automation/trex_control_plane/client_utils/external_packages.py
@@ -12,7 +12,6 @@ CLIENT_UTILS_MODULES = ['zmq',
'dpkt-1.8.6'
]
-
def import_client_utils_modules():
# must be in a higher priority
sys.path.insert(0, PATH_TO_PYTHON_LIB)
diff --git a/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
index 8b091b5e..163c6923 100644..100755
--- a/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
@@ -1,11 +1,14 @@
#!/router/bin/python
-import outer_packages
+import external_packages
import zmq
import json
import general_utils
import re
from time import sleep
+from collections import namedtuple
+
+CmdResponse = namedtuple('CmdResponse', ['success', 'data'])
class bcolors:
BLUE = '\033[94m'
@@ -23,12 +26,12 @@ class BatchMessage(object):
self.rpc_client = rpc_client
self.batch_list = []
- def add (self, method_name, params = {}):
+ def add (self, method_name, params={}):
id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, encode = False)
self.batch_list.append(msg)
- def invoke (self, block = False):
+ def invoke(self, block = False):
if not self.rpc_client.connected:
return False, "Not connected to server"
@@ -36,9 +39,9 @@ class BatchMessage(object):
rc, resp_list = self.rpc_client.send_raw_msg(msg, block = False)
if len(self.batch_list) == 1:
- return True, [(rc, resp_list)]
+ return CmdResponse(True, [CmdResponse(rc, resp_list)])
else:
- return rc, resp_list
+ return CmdResponse(rc, resp_list)
# JSON RPC v2.0 client
@@ -60,6 +63,7 @@ class JsonRpcClient(object):
return rc
+ # pretty print for JSON
def pretty_json (self, json_str, use_colors = True):
pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
@@ -87,6 +91,7 @@ class JsonRpcClient(object):
print "[verbose] " + msg
+ # batch messages
def create_batch (self):
return BatchMessage(self)
@@ -114,6 +119,7 @@ class JsonRpcClient(object):
return self.send_raw_msg(msg, block)
+ # low level send of string message
def send_raw_msg (self, msg, block = False):
self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
@@ -124,7 +130,7 @@ class JsonRpcClient(object):
self.socket.send(msg, flags = zmq.NOBLOCK)
except zmq.error.ZMQError as e:
self.disconnect()
- return False, "Failed To Get Send Message"
+ return CmdResponse(False, "Failed To Get Send Message")
got_response = False
@@ -142,7 +148,7 @@ class JsonRpcClient(object):
if not got_response:
self.disconnect()
- return False, "Failed To Get Server Response"
+ return CmdResponse(False, "Failed To Get Server Response")
self.verbose_msg("Server Response:\n\n" + self.pretty_json(response) + "\n")
@@ -156,19 +162,19 @@ class JsonRpcClient(object):
for single_response in response_json:
rc, msg = self.process_single_response(single_response)
- rc_list.append( (rc, msg) )
+ rc_list.append( CmdResponse(rc, msg) )
- return True, rc_list
+ return CmdResponse(True, rc_list)
else:
rc, msg = self.process_single_response(response_json)
- return rc, msg
+ return CmdResponse(rc, msg)
def process_single_response (self, response_json):
if (response_json.get("jsonrpc") != "2.0"):
- return False, "Malfromed Response ({0})".format(str(response))
+ return False, "Malformed Response ({0})".format(str(response))
# error reported by server
if ("error" in response_json):
@@ -179,7 +185,7 @@ class JsonRpcClient(object):
# if no error there should be a result
if ("result" not in response_json):
- return False, "Malfromed Response ({0})".format(str(response))
+ return False, "Malformed Response ({0})".format(str(response))
return True, response_json["result"]
@@ -188,7 +194,7 @@ class JsonRpcClient(object):
def set_verbose(self, mode):
self.verbose = mode
- def disconnect (self):
+ def disconnect(self):
if self.connected:
self.socket.close(linger = 0)
self.context.destroy(linger = 0)
@@ -241,209 +247,3 @@ class JsonRpcClient(object):
print "Shutting down RPC client\n"
if hasattr(self, "context"):
self.context.destroy(linger=0)
-
-# MOVE THIS TO DAN'S FILE
-class TrexStatelessClient(JsonRpcClient):
-
- def __init__ (self, server, port, user):
-
- super(TrexStatelessClient, self).__init__(server, port)
-
- self.user = user
- self.port_handlers = {}
-
- self.supported_cmds = []
- self.system_info = None
- self.server_version = None
-
-
- def whoami (self):
- return self.user
-
- def ping_rpc_server(self):
-
- return self.invoke_rpc_method("ping", block = False)
-
- def get_rpc_server_version (self):
- return self.server_version
-
- def get_system_info (self):
- return self.system_info
-
- def get_supported_cmds(self):
- return self.supported_cmds
-
- def get_port_count (self):
- if not self.system_info:
- return 0
-
- return self.system_info["port_count"]
-
- # refresh the client for transient data
- def refresh (self):
-
- # get server versionrc, msg = self.get_supported_cmds()
- rc, msg = self.invoke_rpc_method("get_version")
- if not rc:
- self.disconnect()
- return rc, msg
-
- self.server_version = msg
-
- # get supported commands
- rc, msg = self.invoke_rpc_method("get_supported_cmds")
- if not rc:
- self.disconnect()
- return rc, msg
-
- self.supported_cmds = [str(x) for x in msg if x]
-
- # get system info
- rc, msg = self.invoke_rpc_method("get_system_info")
- if not rc:
- self.disconnect()
- return rc, msg
-
- self.system_info = msg
-
- return True, ""
-
- def connect (self):
- rc, err = super(TrexStatelessClient, self).connect()
- if not rc:
- return rc, err
-
- return self.refresh()
-
-
- # take ownership over ports
- def take_ownership (self, port_id_array, force = False):
- if not self.connected:
- return False, "Not connected to server"
-
- batch = self.create_batch()
-
- for port_id in port_id_array:
- batch.add("acquire", params = {"port_id":port_id, "user":self.user, "force":force})
-
- rc, resp_list = batch.invoke()
- if not rc:
- return rc, resp_list
-
- for i, rc in enumerate(resp_list):
- if rc[0]:
- self.port_handlers[port_id_array[i]] = rc[1]
-
- return True, resp_list
-
-
- def release_ports (self, port_id_array):
- batch = self.create_batch()
-
- for port_id in port_id_array:
-
- # let the server handle un-acquired errors
- if self.port_handlers.get(port_id):
- handler = self.port_handlers[port_id]
- else:
- handler = ""
-
- batch.add("release", params = {"port_id":port_id, "handler":handler})
-
-
- rc, resp_list = batch.invoke()
- if not rc:
- return rc, resp_list
-
- for i, rc in enumerate(resp_list):
- if rc[0]:
- self.port_handlers.pop(port_id_array[i])
-
- return True, resp_list
-
- def get_owned_ports (self):
- return self.port_handlers.keys()
-
- # fetch port stats
- def get_port_stats (self, port_id_array):
- if not self.connected:
- return False, "Not connected to server"
-
- batch = self.create_batch()
-
- # empty list means all
- if port_id_array == []:
- port_id_array = list([x for x in xrange(0, self.system_info["port_count"])])
-
- for port_id in port_id_array:
-
- # let the server handle un-acquired errors
- if self.port_handlers.get(port_id):
- handler = self.port_handlers[port_id]
- else:
- handler = ""
-
- batch.add("get_port_stats", params = {"port_id":port_id, "handler":handler})
-
-
- rc, resp_list = batch.invoke()
-
- return rc, resp_list
-
- # snapshot will take a snapshot of all your owned ports for streams and etc.
- def snapshot(self):
-
-
- if len(self.get_owned_ports()) == 0:
- return {}
-
- snap = {}
-
- batch = self.create_batch()
-
- for port_id in self.get_owned_ports():
-
- batch.add("get_port_stats", params = {"port_id": port_id, "handler": self.port_handlers[port_id]})
- batch.add("get_stream_list", params = {"port_id": port_id, "handler": self.port_handlers[port_id]})
-
- rc, resp_list = batch.invoke()
- if not rc:
- return rc, resp_list
-
- # split the list to 2s
- index = 0
- for port_id in self.get_owned_ports():
- if not resp_list[index] or not resp_list[index + 1]:
- snap[port_id] = None
- continue
-
- # fetch the first two
- stats = resp_list[index][1]
- stream_list = resp_list[index + 1][1]
-
- port = {}
- port['status'] = stats['status']
- port['stream_list'] = []
-
- # get all the streams
- if len(stream_list) > 0:
- batch = self.create_batch()
- for stream_id in stream_list:
- batch.add("get_stream", params = {"port_id": port_id, "stream_id": stream_id, "handler": self.port_handlers[port_id]})
-
- rc, stream_resp_list = batch.invoke()
- if not rc:
- port = {}
-
- port['streams'] = {}
- for i, resp in enumerate(stream_resp_list):
- if resp[0]:
- port['streams'][stream_list[i]] = resp[1]
-
- snap[port_id] = port
-
- # move to next one
- index += 2
-
-
- return snap \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/client_utils/packet_builder.py b/scripts/automation/trex_control_plane/client_utils/packet_builder.py
index fc34d931..1c643335 100644..100755
--- a/scripts/automation/trex_control_plane/client_utils/packet_builder.py
+++ b/scripts/automation/trex_control_plane/client_utils/packet_builder.py
@@ -1,7 +1,6 @@
#!/router/bin/python
-
-import outer_packages
+import external_packages
import dpkt
import socket
import binascii
@@ -10,6 +9,8 @@ import random
import string
import struct
import re
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
class CTRexPktBuilder(object):
@@ -30,7 +31,7 @@ class CTRexPktBuilder(object):
self._pkt_by_hdr = {}
self._pkt_top_layer = None
self._max_pkt_size = max_pkt_size
- self.payload_generator = CTRexPktBuilder.CTRexPayloadGen(self._packet, self._max_pkt_size)
+ self.payload_gen = CTRexPktBuilder.CTRexPayloadGen(self._packet, self._max_pkt_size)
self.vm = CTRexPktBuilder.CTRexVM()
def add_pkt_layer(self, layer_name, pkt_layer):
@@ -63,6 +64,38 @@ class CTRexPktBuilder(object):
return
def set_ip_layer_addr(self, layer_name, attr, ip_addr, ip_type="ipv4"):
+ """
+ This method sets the IP address fields of an IP header (source or destination, for both IPv4 and IPv6)
+ using a human readable addressing representation.
+
+ :parameters:
+ layer_name: str
+ a string representing the name of the layer.
+ Example: "l3_ip", etc.
+
+ attr: str
+ a string representation of the sub-field to be set:
+
+ + "src" for source
+ + "dst" for destination
+
+ ip_addr: str
+ a string representation of the IP address to be set.
+ Example: "10.0.0.1" for IPv4, or "5001::DB8:1:3333:1:1" for IPv6
+
+ ip_type : str
+ a string representation of the IP version to be set:
+
+ + "ipv4" for IPv4
+ + "ipv6" for IPv6
+
+ Default: **ipv4**
+
+ :raises:
+ + :exc:`ValueError`, in case the desired layer_name is not an IP layer
+ + :exc:`KeyError`, in case the desired layer_name does not exists.
+
+ """
try:
layer = self._pkt_by_hdr[layer_name.lower()]
if not (isinstance(layer, dpkt.ip.IP) or isinstance(layer, dpkt.ip6.IP6)):
@@ -74,9 +107,55 @@ class CTRexPktBuilder(object):
raise KeyError("Specified layer '{0}' doesn't exist on packet.".format(layer_name))
def set_ipv6_layer_addr(self, layer_name, attr, ip_addr):
+ """
+ This method sets the IPv6 address fields of an IP header (source or destination)
+
+ :parameters:
+ layer_name: str
+ a string representing the name of the layer.
+ Example: "l3_ip", etc.
+
+ attr: str
+ a string representation of the sub-field to be set:
+
+ + "src" for source
+ + "dst" for destination
+
+ ip_addr: str
+ a string representation of the IP address to be set.
+ Example: "5001::DB8:1:3333:1:1"
+
+ :raises:
+ + :exc:`ValueError`, in case the desired layer_name is not an IPv6 layer
+ + :exc:`KeyError`, in case the desired layer_name does not exists.
+
+ """
self.set_ip_layer_addr(layer_name, attr, ip_addr, ip_type="ipv6")
def set_eth_layer_addr(self, layer_name, attr, mac_addr):
+ """
+ This method sets the ethernet address fields of an Ethernet header (source or destination)
+ using a human readable addressing representation.
+
+ :parameters:
+ layer_name: str
+ a string representing the name of the layer.
+ Example: "l2", etc.
+
+ attr: str
+ a string representation of the sub-field to be set:
+ + "src" for source
+ + "dst" for destination
+
+ mac_addr: str
+ a string representation of the MAC address to be set.
+ Example: "00:de:34:ef:2e:f4".
+
+ :raises:
+ + :exc:`ValueError`, in case the desired layer_name is not an Ethernet layer
+ + :exc:`KeyError`, in case the desired layer_name does not exists.
+
+ """
try:
layer = self._pkt_by_hdr[layer_name.lower()]
if not isinstance(layer, dpkt.ethernet.Ethernet):
@@ -135,6 +214,30 @@ class CTRexPktBuilder(object):
raise KeyError("Specified layer '{0}' doesn't exist on packet.".format(layer_name))
def set_layer_bit_attr(self, layer_name, attr, val):
+ """
+ This method enables the user to set the value of a field smaller that 1 Byte in size.
+ This method isn't used to set full-sized fields value (>= 1 byte).
+ Use :func:`packet_builder.CTRexPktBuilder.set_layer_attr` instead.
+
+ :parameters:
+ layer_name: str
+ a string representing the name of the layer.
+ Example: "l2", "l4_tcp", etc.
+
+ attr : str
+ a string representing the attribute to be set on desired layer
+
+ val : int
+ value of attribute.
+ This value will be set "ontop" of the existing value using bitwise "OR" operation.
+
+ .. tip:: It is very useful to use dpkt constants to define the values of these fields.
+
+ :raises:
+ + :exc:`KeyError`, in case of missing layer (the desired layer isn't part of packet)
+ + :exc:`ValueError`, in case invalid attribute to the specified layer.
+
+ """
return self.set_layer_attr(layer_name, attr, val, True)
def set_pkt_payload(self, payload):
@@ -238,17 +341,88 @@ class CTRexPktBuilder(object):
return copy.copy(layer) if layer else None
# VM access methods
- def set_vm_ip_range(self, ip_start, ip_end, ip_type="ipv4"):
- pass
-
- def set_vm_range_type(self, ip_type):
- pass
+ def set_vm_ip_range(self, ip_layer_name, ip_field,
+ ip_init, ip_start, ip_end, add_value,
+ operation, is_big_endian=False, val_size=4,
+ ip_type="ipv4", add_checksum_inst=True):
+ if ip_field not in ["src", "dst"]:
+ raise ValueError("set_vm_ip_range only available for source ('src') or destination ('dst') ip addresses")
+ # set differences between IPv4 and IPv6
+ if ip_type == "ipv4":
+ ip_class = dpkt.ip.IP
+ ip_addr_size = val_size if val_size <= 4 else 4
+ elif ip_type == "ipv6":
+ ip_class = dpkt.ip6.IP6
+ ip_addr_size = val_size if val_size <= 8 else 4
+ else:
+ raise CTRexPktBuilder.IPAddressError()
- def set_vm_core_mask(self, ip_type):
- pass
+ self._verify_layer_prop(ip_layer_name, ip_class)
+ trim_size = ip_addr_size*2
+ init_val = int(binascii.hexlify(CTRexPktBuilder._decode_ip_addr(ip_init, ip_type))[-trim_size:], 16)
+ start_val = int(binascii.hexlify(CTRexPktBuilder._decode_ip_addr(ip_start, ip_type))[-trim_size:], 16)
+ end_val = int(binascii.hexlify(CTRexPktBuilder._decode_ip_addr(ip_end, ip_type))[-trim_size:], 16)
+ # All validations are done, start adding VM instructions
+ flow_var_name = "{layer}__{field}".format(layer=ip_layer_name, field=ip_field)
+ hdr_offset, field_abs_offset = self._calc_offset(ip_layer_name, ip_field, ip_addr_size)
+ self.vm.add_flow_man_inst(flow_var_name, size=ip_addr_size, operation=operation,
+ init_value=init_val,
+ min_value=start_val,
+ max_value=end_val)
+ self.vm.add_write_flow_inst(flow_var_name, field_abs_offset)
+ self.vm.set_vm_off_inst_field(flow_var_name, "add_value", add_value)
+ self.vm.set_vm_off_inst_field(flow_var_name, "is_big_endian", is_big_endian)
+ if ip_type == "ipv4" and add_checksum_inst:
+ self.vm.add_fix_checksum_inst(self._pkt_by_hdr.get(ip_layer_name), hdr_offset)
+
+ def set_vm_eth_range(self, eth_layer_name, eth_field,
+ mac_init, mac_start, mac_end, add_value,
+ operation, val_size=4, is_big_endian=False):
+ if eth_field not in ["src", "dst"]:
+ raise ValueError("set_vm_eth_range only available for source ('src') or destination ('dst') eth addresses")
+ self._verify_layer_prop(eth_layer_name, dpkt.ethernet.Ethernet)
+ eth_addr_size = val_size if val_size <= 4 else 4
+ trim_size = eth_addr_size*2
+ init_val = int(binascii.hexlify(CTRexPktBuilder._decode_mac_addr(mac_init))[-trim_size:], 16)
+ start_val = int(binascii.hexlify(CTRexPktBuilder._decode_mac_addr(mac_start))[-trim_size:], 16)
+ end_val = int(binascii.hexlify(CTRexPktBuilder._decode_mac_addr(mac_end))[-trim_size:], 16)
+ # All validations are done, start adding VM instructions
+ flow_var_name = "{layer}__{field}".format(layer=eth_layer_name, field=eth_field)
+ hdr_offset, field_abs_offset = self._calc_offset(eth_layer_name, eth_field, eth_addr_size)
+ self.vm.add_flow_man_inst(flow_var_name, size=8, operation=operation,
+ init_value=init_val,
+ min_value=start_val,
+ max_value=end_val)
+ self.vm.add_write_flow_inst(flow_var_name, field_abs_offset)
+ self.vm.set_vm_off_inst_field(flow_var_name, "add_value", add_value)
+ self.vm.set_vm_off_inst_field(flow_var_name, "is_big_endian", is_big_endian)
+
+ def set_vm_custom_range(self, layer_name, hdr_field,
+ init_val, start_val, end_val, add_val, val_size,
+ operation, is_big_endian=False, range_name="",
+ add_checksum_inst=True):
+ # verify input validity for init/start/end values
+ for val in [init_val, start_val, end_val]:
+ if not isinstance(val, int):
+ raise ValueError("init/start/end values are expected integers, but received type '{0}'".
+ format(type(val)))
+ self._verify_layer_prop(layer_name=layer_name, field_name=hdr_field)
+ if not range_name:
+ range_name = "{layer}__{field}".format(layer=layer_name, field=hdr_field)
+ trim_size = val_size*2
+ hdr_offset, field_abs_offset = self._calc_offset(layer_name, hdr_field, val_size)
+ self.vm.add_flow_man_inst(range_name, size=val_size, operation=operation,
+ init_value=init_val,
+ min_value=start_val,
+ max_value=end_val)
+ self.vm.add_write_flow_inst(range_name, field_abs_offset)
+ self.vm.set_vm_off_inst_field(range_name, "add_value", add_val)
+ self.vm.set_vm_off_inst_field(range_name, "is_big_endian", is_big_endian)
+ if isinstance(self._pkt_by_hdr.get(layer_name), dpkt.ip.IP) and add_checksum_inst:
+ self.vm.add_fix_checksum_inst(self._pkt_by_hdr.get(layer_name), hdr_offset)
def get_vm_data(self):
- pass
+ return self.vm.dump()
def dump_pkt(self):
"""
@@ -302,9 +476,7 @@ class CTRexPktBuilder(object):
except IOError:
raise IOError(2, "The provided path could not be accessed")
-
- # ----- useful shortcut methods ----- #
- def gen_dns_packet(self):
+ def export_pkt(self, file_path, link_pcap=False, pcap_name=None, pcap_ts=None):
pass
# ----- internal methods ----- #
@@ -342,6 +514,41 @@ class CTRexPktBuilder(object):
if self._pkt_by_hdr[layer] is layer_obj:
return layer
+ def _calc_offset(self, layer_name, hdr_field, hdr_field_size):
+ pkt_header = self._pkt_by_hdr.get(layer_name)
+ hdr_offset = len(self._packet) - len(pkt_header)
+ inner_hdr_offsets = []
+ for field in pkt_header.__hdr__:
+ if field[0] == hdr_field:
+ field_size = struct.calcsize(field[1])
+ if field_size == hdr_field_size:
+ break
+ elif field_size < hdr_field_size:
+ raise CTRexPktBuilder.PacketLayerError(layer_name,
+ "The specified field '{0}' size is smaller than given range"
+ " size ('{1}')".format(hdr_field, hdr_field_size))
+ else:
+ inner_hdr_offsets.append(field_size - hdr_field_size)
+ break
+ else:
+ inner_hdr_offsets.append(struct.calcsize(field[1]))
+ return hdr_offset, hdr_offset + sum(inner_hdr_offsets)
+
+ def _verify_layer_prop(self, layer_name, layer_type=None, field_name=None):
+ if layer_name not in self._pkt_by_hdr:
+ raise CTRexPktBuilder.PacketLayerError(layer_name)
+ pkt_layer = self._pkt_by_hdr.get(layer_name)
+ if layer_type:
+ # check for layer type
+ if not isinstance(pkt_layer, layer_type):
+ raise CTRexPktBuilder.PacketLayerTypeError(layer_name, type(pkt_layer), layer_type)
+ if field_name and not hasattr(pkt_layer, field_name):
+ # check if field exists on certain header
+ raise CTRexPktBuilder.PacketLayerError(layer_name, "The specified field '{0}' does not exists on "
+ "given packet layer ('{1}')".format(field_name,
+ layer_name))
+ return
+
@staticmethod
def _decode_mac_addr(mac_addr):
"""
@@ -449,6 +656,8 @@ class CTRexPktBuilder(object):
This class defines the TRex VM which represents how TRex will regenerate packets.
The packets will be regenerated based on the built packet containing this class.
"""
+ InstStore = namedtuple('InstStore', ['type', 'inst'])
+
def __init__(self):
"""
Instantiate a CTRexVM object
@@ -458,8 +667,10 @@ class CTRexPktBuilder(object):
"""
super(CTRexPktBuilder.CTRexVM, self).__init__()
self.vm_variables = {}
+ self._inst_by_offset = {} # this data structure holds only offset-related instructions, ordered in tuples
+ self._off_inst_by_name = {}
- def set_vm_var_field(self, var_name, field_name, val):
+ def set_vm_var_field(self, var_name, field_name, val, offset_inst=False):
"""
Set VM variable field. Only existing variables are allowed to be changed.
@@ -477,9 +688,15 @@ class CTRexPktBuilder(object):
+ :exc:`CTRexPktBuilder.VMVarValueError`, in case val isn't one of allowed options of field_name.
"""
- return self.vm_variables[var_name].set_field(field_name, val)
+ if offset_inst:
+ return self._off_inst_by_name[var_name].inst.set_field(field_name, val)
+ else:
+ return self.vm_variables[var_name].set_field(field_name, val)
+
+ def set_vm_off_inst_field(self, var_name, field_name, val):
+ return self.set_vm_var_field(var_name, field_name, val, True)
- def add_flow_man_simple(self, name, **kwargs):
+ def add_flow_man_inst(self, name, **kwargs):
"""
Adds a new flow manipulation object to the VM instance.
@@ -488,7 +705,7 @@ class CTRexPktBuilder(object):
name of the manipulation, must be distinct.
Example: 'source_ip_change'
- **kwargs : dict
+ **kwargs** : dict
optional, set flow_man fields on initialization (key = field_name, val = field_val).
Must be used with legit fields, see :func:`CTRexPktBuilder.CTRexVM.CTRexVMVariable.set_field`.
@@ -500,14 +717,40 @@ class CTRexPktBuilder(object):
+ Exceptions from :func:`CTRexPktBuilder.CTRexVM.CTRexVMVariable.set_field` method.
Will rise when VM variables were misconfiguration.
"""
- if name not in self.vm_variables.keys():
- self.vm_variables[name] = self.CTRexVMVariable(name)
- # try configuring VM var attributes
+ if name not in self.vm_variables:
+ self.vm_variables[name] = self.CTRexVMFlowVariable(name)
+ # try configuring VM instruction attributes
for (field, value) in kwargs.items():
self.vm_variables[name].set_field(field, value)
else:
raise CTRexPktBuilder.VMVarNameExistsError(name)
+ def add_fix_checksum_inst(self, linked_ipv4_obj, offset_to_obj=14, name=None):
+ # check if specified linked_ipv4_obj is indeed an ipv4 object
+ if not (isinstance(linked_ipv4_obj, dpkt.ip.IP)):
+ raise ValueError("The provided layer object is not of IPv4.")
+ if not name:
+ name = "checksum_{off}".format(off=offset_to_obj) # name will override previous checksum inst, OK
+ new_checksum_inst = self.CTRexVMChecksumInst(name, offset_to_obj)
+ # store the checksum inst in the end of the IP header (20 Bytes long)
+ inst = self.InstStore('checksum', new_checksum_inst)
+ self._inst_by_offset[offset_to_obj + 20] = inst
+ self._off_inst_by_name[name] = inst
+
+ def add_write_flow_inst(self, name, pkt_offset, **kwargs):
+ if name not in self.vm_variables:
+ raise KeyError("Trying to add write_flow_var instruction to a not-exists VM flow variable ('{0}')".
+ format(name))
+ else:
+ new_write_inst = self.CTRexVMWrtFlowVarInst(name, pkt_offset)
+ # try configuring VM instruction attributes
+ for (field, value) in kwargs.items():
+ new_write_inst.set_field(field, value)
+ # add the instruction to the date-structure
+ inst = self.InstStore('write', new_write_inst)
+ self._inst_by_offset[pkt_offset] = inst
+ self._off_inst_by_name[name] = inst
+
def load_flow_man(self, flow_obj):
"""
Loads an outer VM variable (instruction) into current VM.
@@ -521,7 +764,7 @@ class CTRexPktBuilder(object):
list holds variables data of VM
"""
- assert isinstance(flow_obj, CTRexPktBuilder.CTRexVM.CTRexVMVariable)
+ assert isinstance(flow_obj, CTRexPktBuilder.CTRexVM.CTRexVMFlowVariable)
if flow_obj.name not in self.vm_variables.keys():
self.vm_variables[flow_obj.name] = flow_obj
else:
@@ -529,7 +772,7 @@ class CTRexPktBuilder(object):
def dump(self):
"""
- dumps a VM variables (instructions) into an list data structure.
+ dumps a VM variables (instructions) into a list data structure.
:parameters:
None
@@ -538,14 +781,42 @@ class CTRexPktBuilder(object):
list holds variables data of VM
"""
- return [var.dump()
- for key, var in self.vm_variables.items()]
+ # at first, dump all CTRexVMFlowVariable instructions
+ ret_val = [var.dump()
+ for key, var in self.vm_variables.items()]
+ # then, dump all the CTRexVMWrtFlowVarInst and CTRexVMChecksumInst instructions
+ ret_val += [self._inst_by_offset.get(key).inst.dump()
+ for key in sorted(self._inst_by_offset)]
+ return ret_val
+
+ class CVMAbstractInstruction(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, name):
+ """
+ Instantiate a CTRexVMVariable object
+
+ :parameters:
+ name : str
+ a string representing the name of the VM variable.
+ """
+ super(CTRexPktBuilder.CTRexVM.CVMAbstractInstruction, self).__init__()
+ self.name = name
+
+ def set_field(self, field_name, val):
+ if not hasattr(self, field_name):
+ raise CTRexPktBuilder.VMFieldNameError(field_name)
+ setattr(self, field_name, val)
+
+ @abstractmethod
+ def dump(self):
+ pass
- class CTRexVMVariable(object):
+ class CTRexVMFlowVariable(CVMAbstractInstruction):
"""
This class defines a single VM variable to be used as part of CTRexVar object.
"""
- VALID_SIZE = [1, 2, 4, 8]
+ VALID_SIZE = [1, 2, 4, 8] # size in Bytes
VALID_OPERATION = ["inc", "dec", "random"]
def __init__(self, name):
@@ -556,12 +827,12 @@ class CTRexPktBuilder(object):
name : str
a string representing the name of the VM variable.
"""
- super(CTRexPktBuilder.CTRexVM.CTRexVMVariable, self).__init__()
- self.name = name
+ super(CTRexPktBuilder.CTRexVM.CTRexVMFlowVariable, self).__init__(name)
+ # self.name = name
self.size = 4
self.big_endian = True
self.operation = "inc"
- self.split_by_core = False
+ # self.split_by_core = False
self.init_value = 1
self.min_value = self.init_value
self.max_value = self.init_value
@@ -586,32 +857,25 @@ class CTRexPktBuilder(object):
"""
if not hasattr(self, field_name):
- raise CTRexPktBuilder.VMVarNameError(field_name)
+ raise CTRexPktBuilder.VMFieldNameError(field_name)
elif field_name == "size":
if type(val) != int:
- raise CTRexPktBuilder.VMVarFieldTypeError("size", int)
+ raise CTRexPktBuilder.VMFieldTypeError("size", int)
elif val not in self.VALID_SIZE:
- raise CTRexPktBuilder.VMVarValueError("size", self.VALID_SIZE)
- elif field_name == "init_value":
+ raise CTRexPktBuilder.VMFieldValueError("size", self.VALID_SIZE)
+ elif field_name in ["init_value", "min_value", "max_value"]:
if type(val) != int:
- raise CTRexPktBuilder.VMVarFieldTypeError("init_value", int)
+ raise CTRexPktBuilder.VMFieldTypeError(field_name, int)
elif field_name == "operation":
if type(val) != str:
- raise CTRexPktBuilder.VMVarFieldTypeError("operation", str)
+ raise CTRexPktBuilder.VMFieldTypeError("operation", str)
elif val not in self.VALID_OPERATION:
- raise CTRexPktBuilder.VMVarValueError("operation", self.VALID_OPERATION)
- elif field_name == "split_by_core":
- val = bool(val)
+ raise CTRexPktBuilder.VMFieldValueError("operation", self.VALID_OPERATION)
+ # elif field_name == "split_by_core":
+ # val = bool(val)
# update field value on success
setattr(self, field_name, val)
- def is_valid(self):
- if self.size not in self.VALID_SIZE:
- return False
- if self.type not in self.VALID_OPERATION:
- return False
- return True
-
def dump(self):
"""
dumps a variable fields in a dictionary data structure.
@@ -623,15 +887,116 @@ class CTRexPktBuilder(object):
dictionary holds variable data of VM variable
"""
- return {"ins_name": "flow_man_simple", # VM variable dump always refers to manipulate instruction.
- "flow_variable_name": self.name,
- "object_size": self.size,
- # "big_endian": self.big_endian,
- "Operation": self.operation,
- "split_by_core": self.split_by_core,
- "init_value": self.init_value,
- "min_value": self.min_value,
- "max_value": self.max_value}
+ return {"ins_name": "flow_var", # VM variable dump always refers to manipulate instruction.
+ "name": self.name,
+ "size": self.size,
+ "op": self.operation,
+ # "split_by_core": self.split_by_core,
+ "init_value": str(self.init_value),
+ "min_value": str(self.min_value),
+ "max_value": str(self.max_value)}
+
+ class CTRexVMChecksumInst(CVMAbstractInstruction):
+
+ def __init__(self, name, offset):
+ """
+ Instantiate a CTRexVMChecksumInst object
+
+ :parameters:
+ name : str
+ a string representing the name of the VM variable.
+ """
+ super(CTRexPktBuilder.CTRexVM.CTRexVMChecksumInst, self).__init__(name)
+ self.pkt_offset = offset
+
+ def dump(self):
+ return {"type": "fix_checksum_ipv4",
+ "pkt_offset": int(self.pkt_offset)}
+
+ class CTRexVMWrtFlowVarInst(CVMAbstractInstruction):
+
+ def __init__(self, name, pkt_offset):
+ """
+ Instantiate a CTRexVMWrtFlowVarInst object
+
+ :parameters:
+ name : str
+ a string representing the name of the VM variable.
+ """
+ super(CTRexPktBuilder.CTRexVM.CTRexVMWrtFlowVarInst, self).__init__(name)
+ self.pkt_offset = int(pkt_offset)
+ self.add_value = 0
+ self.is_big_endian = False
+
+ def set_field(self, field_name, val):
+ if not hasattr(self, field_name):
+ raise CTRexPktBuilder.VMFieldNameError(field_name)
+ elif field_name == 'pkt_offset':
+ raise ValueError("pkt_offset value cannot be changed")
+ cur_attr_type = type(getattr(self, field_name))
+ if cur_attr_type == type(val):
+ setattr(self, field_name, val)
+ else:
+ CTRexPktBuilder.VMFieldTypeError(field_name, cur_attr_type)
+
+ def dump(self):
+ return {"type": "write_flow_var",
+ "name": self.name,
+ "pkt_offset": self.pkt_offset,
+ "add_value": int(self.add_value),
+ "is_big_endian": bool(self.is_big_endian)
+ }
+
+ class CTRexVMChecksumInst(CVMAbstractInstruction):
+
+ def __init__(self, name, offset):
+ """
+ Instantiate a CTRexVMChecksumInst object
+
+ :parameters:
+ name : str
+ a string representing the name of the VM variable.
+ """
+ super(CTRexPktBuilder.CTRexVM.CTRexVMChecksumInst, self).__init__(name)
+ self.pkt_offset = offset
+
+ def dump(self):
+ return {"type": "fix_checksum_ipv4",
+ "pkt_offset": int(self.pkt_offset)}
+
+ class CTRexVMWrtFlowVarInst(CVMAbstractInstruction):
+
+ def __init__(self, name, pkt_offset):
+ """
+ Instantiate a CTRexVMWrtFlowVarInst object
+
+ :parameters:
+ name : str
+ a string representing the name of the VM variable.
+ """
+ super(CTRexPktBuilder.CTRexVM.CTRexVMWrtFlowVarInst, self).__init__(name)
+ self.pkt_offset = int(pkt_offset)
+ self.add_value = 0
+ self.is_big_endian = False
+
+ def set_field(self, field_name, val):
+ if not hasattr(self, field_name):
+ raise CTRexPktBuilder.VMFieldNameError(field_name)
+ elif field_name == 'pkt_offset':
+ raise ValueError("pkt_offset value cannot be changed")
+ cur_attr_type = type(getattr(self, field_name))
+ if cur_attr_type == type(val):
+ setattr(self, field_name, val)
+ else:
+ CTRexPktBuilder.VMFieldTypeError(field_name, cur_attr_type)
+
+ def dump(self):
+ return {"type": "write_flow_var",
+ "name": self.name,
+ "pkt_offset": self.pkt_offset,
+ "add_value": int(self.add_value),
+ "is_big_endian": bool(self.is_big_endian)
+ }
class CPacketBuildException(Exception):
"""
@@ -672,7 +1037,28 @@ class CTRexPktBuilder(object):
def __init__(self, message=''):
self._default_message = 'Illegal MAC address has been provided.'
self.message = message or self._default_message
- super(CTRexPktBuilder.MACAddressError, self).__init__(-11, self.message)
+ super(CTRexPktBuilder.MACAddressError, self).__init__(-12, self.message)
+
+ class PacketLayerError(CPacketBuildException):
+ """
+ This exception is used to indicate an error caused by operation performed on an non-exists layer of the packet.
+ """
+ def __init__(self, name, message=''):
+ self._default_message = "The given packet layer name ({0}) does not exists.".format(name)
+ self.message = message or self._default_message
+ super(CTRexPktBuilder.PacketLayerError, self).__init__(-13, self.message)
+
+ class PacketLayerTypeError(CPacketBuildException):
+ """
+ This exception is used to indicate an error caused by operation performed on an non-exists layer of the packet.
+ """
+ def __init__(self, name, layer_type, ok_type, message=''):
+ self._default_message = "The type of packet layer {layer_name} is of type {layer_type}, " \
+ "and not of the expected {allowed_type}.".format(layer_name=name,
+ layer_type=layer_type,
+ allowed_type=ok_type.__name__)
+ self.message = message or self._default_message
+ super(CTRexPktBuilder.PacketLayerTypeError, self).__init__(-13, self.message)
class VMVarNameExistsError(CPacketBuildException):
"""
@@ -683,37 +1069,37 @@ class CTRexPktBuilder(object):
self.message = message or self._default_message
super(CTRexPktBuilder.VMVarNameExistsError, self).__init__(-21, self.message)
- class VMVarNameError(CPacketBuildException):
+ class VMFieldNameError(CPacketBuildException):
"""
This exception is used to indicate that an undefined VM var field name has been accessed.
"""
def __init__(self, name, message=''):
self._default_message = "The given VM field name ({0}) is not defined and isn't legal.".format(name)
self.message = message or self._default_message
- super(CTRexPktBuilder.VMVarNameError, self).__init__(-22, self.message)
+ super(CTRexPktBuilder.VMFieldNameError, self).__init__(-22, self.message)
- class VMVarFieldTypeError(CPacketBuildException):
+ class VMFieldTypeError(CPacketBuildException):
"""
This exception is used to indicate an illegal value has type has been given to VM variable field.
"""
def __init__(self, name, ok_type, message=''):
- self._default_message = 'The desired value of field {field_name} is of type {field_type}, \
- and not of the allowed {allowed_type}.'.format(field_name=name,
- field_type=type(name).__name__,
- allowed_type=ok_type.__name__)
+ self._default_message = "The desired value of field {field_name} is of type {field_type}, " \
+ "and not of the allowed {allowed_type}.".format(field_name=name,
+ field_type=type(name).__name__,
+ allowed_type=ok_type.__name__)
self.message = message or self._default_message
- super(CTRexPktBuilder.VMVarFieldTypeError, self).__init__(-31, self.message)
+ super(CTRexPktBuilder.VMFieldTypeError, self).__init__(-31, self.message)
- class VMVarValueError(CPacketBuildException):
+ class VMFieldValueError(CPacketBuildException):
"""
This exception is used to indicate an error an illegal value has been assigned to VM variable field.
"""
def __init__(self, name, ok_opts, message=''):
- self._default_message = 'The desired value of field {field_name} is illegal.\n \
- The only allowed options are: {allowed_opts}.'.format(field_name=name,
- allowed_opts=ok_opts)
+ self._default_message = "The desired value of field {field_name} is illegal.\n" \
+ "The only allowed options are: {allowed_opts}.".format(field_name=name,
+ allowed_opts=ok_opts)
self.message = message or self._default_message
- super(CTRexPktBuilder.VMVarValueError, self).__init__(-32, self.message)
+ super(CTRexPktBuilder.VMFieldValueError, self).__init__(-32, self.message)
if __name__ == "__main__":
diff --git a/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
index 351b7b9c..c26fef29 100755
--- a/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
+++ b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
@@ -114,7 +114,7 @@ class CTRexYaml(object):
:parameters:
None
- :reaturn:
+ :return:
None
"""
diff --git a/scripts/automation/trex_control_plane/common/outer_packages.py b/scripts/automation/trex_control_plane/common/outer_packages.py
deleted file mode 100644
index 4d0afd1a..00000000
--- a/scripts/automation/trex_control_plane/common/outer_packages.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/router/bin/python
-
-import sys
-import site
-import os
-
-CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
-ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
-PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
-
-COMMON_UTILS_MODULES = ['enum34-1.0.4'
- ]
-
-
-def import_common_utils_modules():
- # must be in a higher priority
- sys.path.insert(0, PATH_TO_PYTHON_LIB)
- sys.path.append(ROOT_PATH)
- import_module_list(COMMON_UTILS_MODULES)
-
-
-def import_module_list(modules_list):
- assert(isinstance(modules_list, list))
- for p in modules_list:
- full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
- fix_path = os.path.normcase(full_path)
- site.addsitedir(full_path)
-
-import_common_utils_modules()
-
diff --git a/scripts/automation/trex_control_plane/common/trex_exceptions.py b/scripts/automation/trex_control_plane/common/trex_exceptions.py
index 1353fd00..0de38411 100755
--- a/scripts/automation/trex_control_plane/common/trex_exceptions.py
+++ b/scripts/automation/trex_control_plane/common/trex_exceptions.py
@@ -27,7 +27,7 @@ class RPCError(Exception):
class TRexException(RPCError):
"""
- This is the most general T-Rex exception.
+ This is the most general TRex exception.
All exceptions inherits from this class has an error code and a default message which describes the most common use case of the error.
@@ -35,55 +35,55 @@ class TRexException(RPCError):
"""
code = -10
- _default_message = 'T-Rex encountered an unexpected error. please contact T-Rex dev team.'
+ _default_message = 'TRex encountered an unexpected error. please contact TRex dev team.'
# api_name = 'TRex'
class TRexError(TRexException):
"""
- This is the most general T-Rex exception.
+ This is the most general TRex exception.
This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
"""
code = -11
- _default_message = 'T-Rex run failed due to wrong input parameters, or due to reachability issues.'
+ _default_message = 'TRex run failed due to wrong input parameters, or due to reachability issues.'
class TRexWarning(TRexException):
- """ Indicates a warning from T-Rex server. When this exception raises it normally used to indicate required data isn't ready yet """
+ """ Indicates a warning from TRex server. When this exception raises it normally used to indicate required data isn't ready yet """
code = -12
- _default_message = 'T-Rex is starting (data is not available yet).'
+ _default_message = 'TRex is starting (data is not available yet).'
class TRexRequestDenied(TRexException):
""" Indicates the desired reques was denied by the server """
code = -33
- _default_message = 'T-Rex desired request denied because the requested resource is already taken. Try again once T-Rex is back in IDLE state.'
+ _default_message = 'TRex desired request denied because the requested resource is already taken. Try again once TRex is back in IDLE state.'
class TRexInUseError(TRexException):
"""
- Indicates that T-Rex is currently in use
+ Indicates that TRex is currently in use
"""
code = -13
- _default_message = 'T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.'
+ _default_message = 'TRex is already being used by another user or process. Try again once TRex is back in IDLE state.'
class TRexRunFailedError(TRexException):
- """ Indicates that T-Rex has failed due to some reason. This Exception is used when T-Rex process itself terminates due to unknown reason """
+ """ Indicates that TRex has failed due to some reason. This Exception is used when TRex process itself terminates due to unknown reason """
code = -14
_default_message = ''
class TRexIncompleteRunError(TRexException):
"""
- Indicates that T-Rex has failed due to some reason.
- This Exception is used when T-Rex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
+ Indicates that TRex has failed due to some reason.
+ This Exception is used when TRex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
"""
code = -15
- _default_message = 'T-Rex run was terminated unexpectedly by outer process or by the hosting OS'
+ _default_message = 'TRex run was terminated unexpectedly by outer process or by the hosting OS'
EXCEPTIONS = [TRexException, TRexError, TRexWarning, TRexInUseError, TRexRequestDenied, TRexRunFailedError, TRexIncompleteRunError]
class CExceptionHandler(object):
"""
- CExceptionHandler is responsible for generating T-Rex API related exceptions in client side.
+ CExceptionHandler is responsible for generating TRex API related exceptions in client side.
"""
def __init__(self, exceptions):
"""
@@ -92,7 +92,7 @@ class CExceptionHandler(object):
:parameters:
exceptions : list
- a list of all T-Rex acceptable exception objects.
+ a list of all TRex acceptable exception objects.
default list:
- :exc:`trex_exceptions.TRexException`
@@ -113,7 +113,7 @@ class CExceptionHandler(object):
"""
Generates an exception based on a general ProtocolError exception object `err`.
- When T-Rex is reserved, no other user can start new T-Rex runs.
+ When TRex is reserved, no other user can start new TRex runs.
:parameters:
@@ -122,7 +122,7 @@ class CExceptionHandler(object):
a ProtocolError exception raised by :class:`trex_client.CTRexClient` class
:return:
- A T-Rex exception from the exception list defined in class creation.
+ A TRex exception from the exception list defined in class creation.
If such exception wasn't found, returns a TRexException exception
diff --git a/scripts/automation/trex_control_plane/common/trex_stats.py b/scripts/automation/trex_control_plane/common/trex_stats.py
new file mode 100755
index 00000000..b7e768c1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_stats.py
@@ -0,0 +1,60 @@
+#!/router/bin/python
+import copy
+
+
+class CTRexStatsManager(object):
+
+ def __init__(self, *args):
+ for stat_type in args:
+ # register stat handler for each stats type
+ setattr(self, stat_type, CTRexStatsManager.CSingleStatsHandler())
+
+ def __getitem__(self, item):
+ stats_obj = getattr(self, item)
+ if stats_obj:
+ return stats_obj.get_stats()
+ else:
+ return None
+
+ class CSingleStatsHandler(object):
+
+ def __init__(self):
+ self._stats = {}
+
+ def update(self, obj_id, stats_obj):
+ assert isinstance(stats_obj, CTRexStats)
+ self._stats[obj_id] = stats_obj
+
+ def get_stats(self, obj_id=None):
+ if obj_id:
+ return copy.copy(self._stats.pop(obj_id))
+ else:
+ return copy.copy(self._stats)
+
+
+class CTRexStats(object):
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+class CGlobalStats(CTRexStats):
+ def __init__(self, **kwargs):
+ super(CGlobalStats, self).__init__(kwargs)
+ pass
+
+
+class CPortStats(CTRexStats):
+ def __init__(self, **kwargs):
+ super(CPortStats, self).__init__(kwargs)
+ pass
+
+
+class CStreamStats(CTRexStats):
+ def __init__(self, **kwargs):
+ super(CStreamStats, self).__init__(kwargs)
+ pass
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/common/trex_status_e.py b/scripts/automation/trex_control_plane/common/trex_status_e.py
index a14901a1..fbfe92af 100755
--- a/scripts/automation/trex_control_plane/common/trex_status_e.py
+++ b/scripts/automation/trex_control_plane/common/trex_status_e.py
@@ -4,5 +4,5 @@ import outer_packages # import this to overcome doc building import error by sp
from enum import Enum
-# define the states in which a T-Rex can hold during its lifetime
+# define the states in which a TRex can hold during its lifetime
TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
diff --git a/scripts/automation/trex_control_plane/doc/about_trex.rst b/scripts/automation/trex_control_plane/doc/about_trex.rst
index 08858048..669e2b28 100755
--- a/scripts/automation/trex_control_plane/doc/about_trex.rst
+++ b/scripts/automation/trex_control_plane/doc/about_trex.rst
@@ -7,10 +7,13 @@ Full project's official site
To learn all about TRex project, visit TRex `official site <http://trex-tgn.cisco.com>`_
-Even more
----------
+GitHub Repository
+-----------------
-.. toctree::
- :maxdepth: 2
+`TRex GitHub repository <https://github.com/cisco-system-traffic-generator>`_
+
+TRex Google Group
+-----------------
+
+Check out the project's `google group <https://groups.google.com/forum/#!forum/trex-tgn>`_ to contact TRex DEV team and follow other user stories.
- authors \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/api/index.rst b/scripts/automation/trex_control_plane/doc/api/index.rst
deleted file mode 100755
index 7c558a94..00000000
--- a/scripts/automation/trex_control_plane/doc/api/index.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-
-API Reference
-=============
-The TRex API reference section is currently a work in progress.
-
-**TRex Modules**
-
-.. toctree::
- :maxdepth: 4
-
- client_code
- exceptions
-
-**TRex JSON Template**
-
-.. toctree::
- :maxdepth: 4
-
- json_fields
diff --git a/scripts/automation/trex_control_plane/doc/api/json_fields.rst b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
index 193aa01b..c921fec4 100755
--- a/scripts/automation/trex_control_plane/doc/api/json_fields.rst
+++ b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
@@ -1,233 +1,233 @@
-
-TRex JSON Template
-==================
-
-Whenever TRex is publishing live data, it uses JSON notation to describe the data-object.
-
-Each client may parse it diffrently, however this page will describe the values meaning when published by TRex server.
-
-
-Main Fields
------------
-
-Each TRex server-published JSON object contains data divided to main fields under which the actual data lays.
-
-These main fields are:
-
-+-----------------------------+----------------------------------------------------+---------------------------+
-| Main field | Contains | Comments |
-+=============================+====================================================+===========================+
-| :ref:`trex-global-field` | Must-have data on TRex run, | |
-| | mainly regarding Tx/Rx and packet drops | |
-+-----------------------------+----------------------------------------------------+---------------------------+
-| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
-| | In case histogram is zero it means that all packets| |
-| | were injected in the right time. | |
-+-----------------------------+----------------------------------------------------+---------------------------+
-| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
-| | generated data and on response traffic | test is enabled (``l`` |
-| | | param) |
-| | | - *typo* on field key: |
-+-----------------------------+----------------------------------------------------+ will be fixed on next |
-| :ref:`trex-latecny-v2-field`| Extended latency information | release |
-+-----------------------------+----------------------------------------------------+---------------------------+
-
-
-Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
-
-For example, in order to access some trex-global data, the access path would look like::
-
- AllData -> trex-global -> data -> desired_info
-
-
-
-
-Detailed explanation
---------------------
-
-.. _trex-global-field:
-
-trex-global field
-~~~~~~~~~~~~~~~~~
-
-
-+--------------------------------+-------+-----------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+================================+=======+===========================================================+
-| m_cpu_util | float | CPU utilization (0-100) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_platform_factor | float | multiplier factor |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_bps | float | total tx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_rx_bps | float | total rx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_pps | float | total tx packet per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_cps | float | total tx connection per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_cps | float | expected tx connection per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_pps | float | expected tx packet per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_bps | float | expected tx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_rx_drop_bps | float | drop rate in bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_active_flows | float | active trex flows |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_pkts | int | total tx in packets |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_rx_pkts | int | total rx in packets |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_bytes | int | total tx in bytes |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_rx_bytes | int | total rx in bytes |
-+--------------------------------+-------+-----------------------------------------------------------+
-| opackets-# | int | output packets (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| obytes-# | int | output bytes (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ipackets-# | int | input packet (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ibytes-# | int | input bytes (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ierrors-# | int | input errors (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| oerrors-# | int | input errors (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_bps-# | float | total transmitted data in bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| unknown | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_learn_error [#f1]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_active [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_no_fid [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_time_out [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_open [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-
-
-.. _tx-gen-field:
-
-tx-gen field
-~~~~~~~~~~~~~~
-
-+-------------------+-------+-----------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+===================+=======+===========================================================+
-| realtime-hist | dict | histogram of transmission. See extended information about |
-| | | histogram object under :ref:`histogram-object-fields`. |
-| | | The attribute analyzed is time packet has been sent |
-| | | before/after it was intended to be |
-+-------------------+-------+-----------------------------------------------------------+
-| unknown | int | |
-+-------------------+-------+-----------------------------------------------------------+
-
-.. _trex-latecny-field:
-
-trex-latecny field
-~~~~~~~~~~~~~~~~~~
-
-+---------+-------+---------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+=========+=======+=========================================================+
-| avg-# | float | average latency in usec (per interface) |
-+---------+-------+---------------------------------------------------------+
-| max-# | float | max latency in usec from the test start (per interface) |
-+---------+-------+---------------------------------------------------------+
-| c-max-# | float | max in the last 1 sec window (per interface) |
-+---------+-------+---------------------------------------------------------+
-| error-# | float | errors in latency packets (per interface) |
-+---------+-------+---------------------------------------------------------+
-| unknown | int | |
-+---------+-------+---------------------------------------------------------+
-
-.. _trex-latecny-v2-field:
-
-trex-latecny-v2 field
-~~~~~~~~~~~~~~~~~~~~~
-
-+--------------------------------------+-------+--------------------------------------+
-| Sub-key | Type | Meaning |
-+======================================+=======+======================================+
-| cpu_util | float | rx thread cpu % (this is not trex DP |
-| | | threads cpu%%) |
-+--------------------------------------+-------+--------------------------------------+
-| port-# | | Containing per interface |
-| | dict | information. See extended |
-| | | information under ``port-# -> |
-| | | key_name -> sub_key`` |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->hist | dict | histogram of latency. See extended |
-| | | information about histogram object |
-| | | under :ref:`histogram-object-fields`.|
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats | | Containing per interface |
-| | dict | information. See extended |
-| | | information under ``port-# -> |
-| | | key_name -> sub_key`` |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_no_magic | int | rx error with no magic |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_no_id | int | rx errors with no id |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_seq_error | int | error in seq number |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_length_error | int | |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_rx_check | int | packets tested in rx |
-+--------------------------------------+-------+--------------------------------------+
-| unknown | int | |
-+--------------------------------------+-------+--------------------------------------+
-
-
-
-.. _histogram-object-fields:
-
-Histogram object fields
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The histogram object is being used in number of place throughout the JSON object.
-The following section describes its fields in detail.
-
-
-+-----------+-------+-----------------------------------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+===========+=======+===================================================================================+
-| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| max_usec | int | max attribute value in usec |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| high_cnt | int | how many packets on which its attribute > min_usec |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| cnt | int | total packets from test startup |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| s_avg | float | average value from test startup |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| histogram | | histogram of relevant object by the following keys: |
-| | array | - key: value in usec |
-| | | - val: number of packets |
-+-----------+-------+-----------------------------------------------------------------------------------+
-
-
-Access Examples
----------------
-
-
-
-.. rubric:: Footnotes
-
-.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
-
+
+TRex JSON Template
+==================
+
+Whenever TRex is publishing live data, it uses JSON notation to describe the data-object.
+
+Each client may parse it differently, however this page will describe the values meaning when published by TRex server.
+
+
+Main Fields
+-----------
+
+Each TRex server-published JSON object contains data divided to main fields under which the actual data lays.
+
+These main fields are:
+
++-----------------------------+----------------------------------------------------+---------------------------+
+| Main field | Contains | Comments |
++=============================+====================================================+===========================+
+| :ref:`trex-global-field` | Must-have data on TRex run, | |
+| | mainly regarding Tx/Rx and packet drops | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
+| | In case histogram is zero it means that all packets| |
+| | were injected in the right time. | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
+| | generated data and on response traffic | test is enabled (``l`` |
+| | | param) |
+| | | - *typo* on field key: |
++-----------------------------+----------------------------------------------------+ will be fixed on next |
+| :ref:`trex-latecny-v2-field`| Extended latency information | release |
++-----------------------------+----------------------------------------------------+---------------------------+
+
+
+Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
+
+For example, in order to access some trex-global data, the access path would look like::
+
+ AllData -> trex-global -> data -> desired_info
+
+
+
+
+Detailed explanation
+--------------------
+
+.. _trex-global-field:
+
+trex-global field
+~~~~~~~~~~~~~~~~~
+
+
++--------------------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++================================+=======+===========================================================+
+| m_cpu_util | float | CPU utilization (0-100) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_platform_factor | float | multiplier factor |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_bps | float | total tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_bps | float | total rx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_pps | float | total tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_cps | float | total tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_cps | float | expected tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_pps | float | expected tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_bps | float | expected tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_drop_bps | float | drop rate in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_active_flows | float | active trex flows |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_pkts | int | total tx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_pkts | int | total rx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bytes | int | total tx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_bytes | int | total rx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| opackets-# | int | output packets (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| obytes-# | int | output bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ipackets-# | int | input packet (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ibytes-# | int | input bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ierrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| oerrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bps-# | float | total transmitted data in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_learn_error [#f1]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_active [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_no_fid [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_time_out [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_open [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+
+
+.. _tx-gen-field:
+
+tx-gen field
+~~~~~~~~~~~~
+
++-------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===================+=======+===========================================================+
+| realtime-hist | dict | histogram of transmission. See extended information about |
+| | | histogram object under :ref:`histogram-object-fields`. |
+| | | The attribute analyzed is time packet has been sent |
+| | | before/after it was intended to be |
++-------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++-------------------+-------+-----------------------------------------------------------+
+
+.. _trex-latecny-field:
+
+trex-latecny field
+~~~~~~~~~~~~~~~~~~
+
++---------+-------+---------------------------------------------------------+
+| Sub-key | Type | Meaning |
++=========+=======+=========================================================+
+| avg-# | float | average latency in usec (per interface) |
++---------+-------+---------------------------------------------------------+
+| max-# | float | max latency in usec from the test start (per interface) |
++---------+-------+---------------------------------------------------------+
+| c-max-# | float | max in the last 1 sec window (per interface) |
++---------+-------+---------------------------------------------------------+
+| error-# | float | errors in latency packets (per interface) |
++---------+-------+---------------------------------------------------------+
+| unknown | int | |
++---------+-------+---------------------------------------------------------+
+
+.. _trex-latecny-v2-field:
+
+trex-latecny-v2 field
+~~~~~~~~~~~~~~~~~~~~~
+
++--------------------------------------+-------+--------------------------------------+
+| Sub-key | Type | Meaning |
++======================================+=======+======================================+
+| cpu_util | float | rx thread cpu % (this is not trex DP |
+| | | threads cpu%%) |
++--------------------------------------+-------+--------------------------------------+
+| port-# | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->hist | dict | histogram of latency. See extended |
+| | | information about histogram object |
+| | | under :ref:`histogram-object-fields`.|
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_magic | int | rx error with no magic |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_id | int | rx errors with no id |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_seq_error | int | error in seq number |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_length_error | int | |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_rx_check | int | packets tested in rx |
++--------------------------------------+-------+--------------------------------------+
+| unknown | int | |
++--------------------------------------+-------+--------------------------------------+
+
+
+
+.. _histogram-object-fields:
+
+Histogram object fields
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The histogram object is being used in number of place throughout the JSON object.
+The following section describes its fields in detail.
+
+
++-----------+-------+-----------------------------------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===========+=======+===================================================================================+
+| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
++-----------+-------+-----------------------------------------------------------------------------------+
+| max_usec | int | max attribute value in usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| high_cnt | int | how many packets on which its attribute > min_usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| cnt | int | total packets from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| s_avg | float | average value from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| histogram | | histogram of relevant object by the following keys: |
+| | array | - key: value in usec |
+| | | - val: number of packets |
++-----------+-------+-----------------------------------------------------------------------------------+
+
+
+Access Examples
+---------------
+
+
+
+.. rubric:: Footnotes
+
+.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
+
.. [#f2] Available only in NAT operation (``learn`` flag) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/authors.rst b/scripts/automation/trex_control_plane/doc/authors.rst
deleted file mode 100755
index 08ee5db5..00000000
--- a/scripts/automation/trex_control_plane/doc/authors.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-=======
-Authors
-=======
-
-TRex is developed in Cisco Systems Inc. as the next generation traffic generator.
-
-TRex core-team developers are:
-
- - Hanoch Haim
- - Dave Johnson
- - Wenxian Li
- - Dan Klein
- - Itay Marom \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/client_utils.rst b/scripts/automation/trex_control_plane/doc/client_utils.rst
index 5f133eee..122ae310 100755
--- a/scripts/automation/trex_control_plane/doc/client_utils.rst
+++ b/scripts/automation/trex_control_plane/doc/client_utils.rst
@@ -1,14 +1,14 @@
-
-Client Utilities
-================
-
-TRex YAML generator
--------------------
-
-.. automodule:: trex_yaml_gen
- :members:
-
-General Utilities
------------------
-.. automodule:: general_utils
+
+Client Utilities
+================
+
+TRex YAML generator
+-------------------
+
+.. automodule:: trex_yaml_gen
+ :members:
+
+General Utilities
+-----------------
+.. automodule:: general_utils
:members: \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/index.rst b/scripts/automation/trex_control_plane/doc/index.rst
index dcaf9505..10803cb0 100755
--- a/scripts/automation/trex_control_plane/doc/index.rst
+++ b/scripts/automation/trex_control_plane/doc/index.rst
@@ -4,15 +4,15 @@
contain the root `toctree` directive.
Welcome to TRex Control Plain's documentation!
-===============================================
+==============================================
-TRex is a **realistic traffic generator** that enables you to do get learn more about your under developement devices.
+TRex is a **realistic traffic generator** that enables you to do get learn more about your under development devices.
This site covers the Python API of TRex control plane, and explains how to utilize it to your needs.
However, since the entire API is JSON-RPC [#f1]_ based, you may want to check out other implementations that could suit you.
-To understand the entirely how the API works and how to set up the server side, check out the `API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ undee the documentation section of TRex website.
+To understand the entirely how the API works and how to set up the server side, check out the `trex-core Wiki <https://github.com/cisco-system-traffic-generator/trex-core/wiki>`_ under the documentation section of TRex website.
**Use the table of contents below or the menu to your left to navigate through the site**
@@ -24,8 +24,6 @@ Getting Started
:maxdepth: 2
installation
- client_utils
- usage_examples
API Reference
=============
@@ -34,14 +32,26 @@ API Reference
api/index
+Client Utilities
+================
+.. toctree::
+ :maxdepth: 2
+
+ client_utils
+
+Usage Examples
+==============
+.. toctree::
+ :maxdepth: 2
+
+ usage_examples
+
About TRex
==========
.. toctree::
:maxdepth: 2
All about TRex <about_trex>
- license
-
Indices and tables
diff --git a/scripts/automation/trex_control_plane/doc/installation.rst b/scripts/automation/trex_control_plane/doc/installation.rst
deleted file mode 100755
index 29239d97..00000000
--- a/scripts/automation/trex_control_plane/doc/installation.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-============
-Installation
-============
-
-Prerequisites
--------------
-The TRex control plane is based on client-server model that interacts using JSON-RPC.
-
-In order to use the client-side API documented a TRex server daemon must be up and listening on the same host and port that the client tries to connect with.
-
-Compatibility
--------------
-Both client and server side were developed for Linux platform.
-The client-side module is also compatible with windows python.
-
-The client side can be used with both Python 2 and Python 3 versions.
-However, the server side was desined to and best fits with Python 2.7.6 and on (all 2.x series, assuming > 2.6.9).
-
-
-Installation manual
--------------------
-
-TRex Control Plane is a cross-platform, cross-operating system APi to control and run TRex.
-
-The full, most updated manual (which refers to all programming languages) can be found under the `Automation API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ . \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/json_dictionary.yaml b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
index 853ded65..89535b56 100755
--- a/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
+++ b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
@@ -1,6 +1,6 @@
-################################################################
-#### T-Rex JSON Dictionary definitions ####
-################################################################
+###############################################################
+#### TRex JSON Dictionary definitions ####
+###############################################################
trex-global :
diff --git a/scripts/automation/trex_control_plane/doc/license.rst b/scripts/automation/trex_control_plane/doc/license.rst
deleted file mode 100755
index b83dd4b3..00000000
--- a/scripts/automation/trex_control_plane/doc/license.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-=======
-License
-=======
-
-
-Copyright 2015 Cisco Systems Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/index.rst b/scripts/automation/trex_control_plane/doc/packet_generator/index.rst
new file mode 100755
index 00000000..ed1d460d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/index.rst
@@ -0,0 +1,18 @@
+
+TRex Packet Builder
+-------------------
+The TRex Packet Generator is a module designed to generate single-packet and set its ranging options, later to be transmitted using TRex.
+
+The packet generator module does extensive usage with `dkpt <https://github.com/kbandla/dpkt>`_ python module to create packet headers.
+
+.. toctree::
+ :maxdepth: 4
+
+ packet_builder_code
+
+.. toctree::
+ :maxdepth: 0
+ :titlesonly:
+
+ examples
+ stream_export \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.rst b/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.rst
new file mode 100755
index 00000000..3a6e8d5f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/packet_builder_code.rst
@@ -0,0 +1,12 @@
+
+CTRexPktBuilder class
+---------------------
+
+.. autoclass:: packet_builder.CTRexPktBuilder
+ :members:
+ :member-order: bysource
+
+Packet Builder Exceptions
+-------------------------
+
+For exceptions documentation see here: :exc:`Packet Builder Exceptions <packet_builder.CTRexPktBuilder.CPacketBuildException>` \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
new file mode 100755
index 00000000..eb639f7c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
@@ -0,0 +1,29 @@
+
+Stream Export YAML syntax
+=========================
+
+In order to provide a fluent work-flow that utilize the best TRex user's time, an export-import mini language has been created.
+
+This enables a work-flow that supports saving and sharing a built packets and its scenarios, so that other tools
+(such as TRex Console) could use them.
+
+The TRex Packet Builder module supports (using ___ method) the export of built stream according to the format described below.
+
+Guidelines
+----------
+
+1. The YAML file can either contain Byte representation of the packet of refer to a .pcap file that contains it.
+2. The YAML file is similar as much as possible to the `add_stream method <http://trex-tgn.cisco.com/trex/doc/trex_rpc_server_spec.html#_add_stream>`_ of TRex RPC server spec, which defines the raw interaction with TRex server.
+3. Only packet binary data and VM instructions are to be saved. Any meta-data packet builder module used while creating the packet will be stripped out.
+
+Export Format
+-------------
+
+.. literalinclude:: export_format.yaml
+ :lines: 4-
+ :linenos:
+
+Example
+-------
+
+The following files snapshot represents each of the options (Binary/pcap) for the very same HTTP GET request packet.
diff --git a/scripts/automation/trex_control_plane/examples/interactive_stateless.py b/scripts/automation/trex_control_plane/examples/interactive_stateless.py
new file mode 100644
index 00000000..7c25b4ef
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/interactive_stateless.py
@@ -0,0 +1,128 @@
+#!/router/bin/python
+
+import trex_root_path
+from client.trex_stateless_client import *
+from common.trex_exceptions import *
+import cmd
+from termstyle import termstyle
+# import termstyle
+import os
+from argparse import ArgumentParser
+import socket
+import errno
+import ast
+import json
+
+
+class InteractiveStatelessTRex(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to play with Cisco's TRex stateless API.\
+ \nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, trex_host, trex_port, virtual, verbose):
+ cmd.Cmd.__init__(self)
+
+ self.verbose = verbose
+ self.virtual = virtual
+ self.trex = CTRexStatelessClient(trex_host, trex_port, self.virtual)
+ self.DEFAULT_RUN_PARAMS = dict(m=1.5,
+ nc=True,
+ p=True,
+ d=100,
+ f='avl/sfr_delay_10_1g.yaml',
+ l=1000)
+ self.run_params = dict(self.DEFAULT_RUN_PARAMS)
+
+ def do_transmit(self, line):
+ """Transmits a request over using a given link to server.\
+ \nuse: transmit [method_name] [method_params]"""
+ if line == "":
+ print "\nUsage: [method name] [param dict as string]\n"
+ print "Example: rpc test_add {'x': 12, 'y': 17}\n"
+ return
+
+ args = line.split(' ', 1) # args will have max length of 2
+ method_name = args[0]
+ params = None
+ bad_parse = False
+
+ try:
+ params = ast.literal_eval(args[1])
+ if not isinstance(params, dict):
+ bad_parse = True
+ except ValueError as e1:
+ bad_parse = True
+ except SyntaxError as e2:
+ bad_parse = True
+
+ if bad_parse:
+ print "\nValue should be a valid dict: '{0}'".format(args[1])
+ print "\nUsage: [method name] [param dict as string]\n"
+ print "Example: rpc test_add {'x': 12, 'y': 17}\n"
+ return
+
+ response = self.trex.transmit(method_name, params)
+ if not self.virtual:
+ # expect response
+ rc, msg = response
+ if rc:
+ print "\nServer Response:\n\n" + json.dumps(msg) + "\n"
+ else:
+ print "\n*** " + msg + "\n"
+
+
+
+
+
+ def do_push_files(self, filepaths):
+ """Pushes a custom file to be stored locally on T-Rex server.\
+ \nPush multiple files by specifying their path separated by ' ' (space)."""
+ try:
+ filepaths = filepaths.split(' ')
+ print termstyle.green("*** Starting pushing files ({trex_files}) to T-Rex. ***".format(
+ trex_files=', '.join(filepaths))
+ )
+ ret_val = self.trex.push_files(filepaths)
+ if ret_val:
+ print termstyle.green("*** End of T-Rex push_files method (success) ***")
+ else:
+ print termstyle.magenta("*** End of T-Rex push_files method (failed) ***")
+
+ except IOError as inst:
+ print termstyle.magenta(inst)
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description=termstyle.cyan('Run TRex client stateless API demos and scenarios.'),
+ usage="client_interactive_example [options]")
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
+
+ parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
+ action="store", help="Specify the hostname or ip to connect with T-Rex server.",
+ metavar="HOST" )
+ parser.add_argument("-p", "--trex-port", type=int, default = 5050, metavar="PORT", dest="trex_port",
+ help="Select port on which the T-Rex server listens. Default port is 5050.", action="store")
+ # parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
+ # help="Specify maximum history size saved at client side. Default size is 100.", action="store")
+ parser.add_argument("--virtual", dest="virtual",
+ action="store_true",
+ help="Switch ON virtual option at TRex client. Default is: OFF.",
+ default=False)
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true",
+ help="Switch ON verbose option at TRex client. Default is: OFF.",
+ default=False)
+ args = parser.parse_args()
+
+ try:
+ InteractiveStatelessTRex(**vars(args)).cmdloop()
+
+ except KeyboardInterrupt:
+ print termstyle.cyan('Bye Bye!')
+ exit(-1)
+ except socket.error, e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED,
+ "Connection from T-Rex server was terminated. \
+ Please make sure the server is up.")
diff --git a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
index 7e7f6139..acaa95d3 100755
--- a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
+++ b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
@@ -2,7 +2,7 @@
######################################################################################
### ###
-### T-Rex end-to-end demo script, written by T-Rex dev-team ###
+### TRex end-to-end demo script, written by TRex dev-team ###
### THIS SCRIPT ASSUMES PyYaml and Scapy INSTALLED ON PYTHON'S RUNNING MACHINE ###
### (for any question please contact trex-dev team @ trex-dev@cisco.com) ###
### ###
@@ -33,13 +33,13 @@ def pkts_to_pcap (pcap_filename, packets):
def main (args):
- # instantiate T-Rex client
+ # instantiate TRex client
trex = CTRexClient('trex-dan', verbose = args.verbose)
if args.steps:
print "\nNext step: .pcap generation."
raw_input("Press Enter to continue...")
- # generate T-Rex traffic.
+ # generate TRex traffic.
pkts = generate_dns_packets('21.0.0.2', '22.0.0.12') # In this case - DNS traffic (request-response)
print "\ngenerated traffic:"
print "=================="
@@ -50,7 +50,7 @@ def main (args):
print "\nNext step: .yaml generation."
raw_input("Press Enter to continue...")
# Generate .yaml file that uses the generated .pcap file
- trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on T-Rex server
+ trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on TRex server
yaml_obj = CTRexYaml(trex_files_path) # instantiate CTRexYaml obj
# set .yaml file parameters according to need and use
@@ -65,12 +65,12 @@ def main (args):
yaml_obj.dump()
if args.steps:
- print "\nNext step: run T-Rex with provided files."
+ print "\nNext step: run TRex with provided files."
raw_input("Press Enter to continue...")
# push all relevant files to server
trex.push_files( yaml_obj.get_file_list() )
- print "\nStarting T-Rex..."
+ print "\nStarting TRex..."
trex.start_trex(c = 2,
m = 1.5,
nc = True,
@@ -80,8 +80,8 @@ def main (args):
l = 1000)
if args.verbose:
- print "T-Rex state changed to 'Running'."
- print "Sampling T-Rex in 0.2 samples/sec (single sample every 5 secs)"
+ print "TRex state changed to 'Running'."
+ print "Sampling TRex in 0.2 samples/sec (single sample every 5 secs)"
last_res = dict()
while trex.is_running(dump_out = last_res):
@@ -92,14 +92,14 @@ def main (args):
if __name__ == "__main__":
- parser = ArgumentParser(description = 'Run T-Rex client API end-to-end example.',
+ parser = ArgumentParser(description = 'Run TRex client API end-to-end example.',
usage = """pkt_generation_for_trex [options]""" )
parser.add_argument("-s", "--step-by-step", dest="steps",
action="store_false", help="Switch OFF step-by-step script overview. Default is: ON.",
default = True )
parser.add_argument("--verbose", dest="verbose",
- action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
default = False )
args = parser.parse_args()
main(args) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
index 1813ed48..734fa22e 100755
--- a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
+++ b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
@@ -97,9 +97,9 @@ class ExtendedDaemonRunner(runner.DaemonRunner):
@staticmethod
def _show(self):
if self.pidfile.is_locked():
- print termstyle.red("T-Rex server daemon is running")
+ print termstyle.red("TRex server daemon is running")
else:
- print termstyle.red("T-Rex server daemon is NOT running")
+ print termstyle.red("TRex server daemon is NOT running")
def do_action(self):
self.__prevent_duplicate_runs()
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server.py b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
index 5032423a..ec07cb8a 100755
--- a/scripts/automation/trex_control_plane/server/trex_daemon_server.py
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
@@ -54,7 +54,7 @@ def main ():
logger.addHandler(handler)
except EnvironmentError, e:
if e.errno == errno.EACCES: # catching permission denied error
- print "Launching user must have sudo privileges in order to run T-Rex daemon.\nTerminating daemon process."
+ print "Launching user must have sudo privileges in order to run TRex daemon.\nTerminating daemon process."
exit(-1)
try:
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
index b4be60a9..59c382ea 100755
--- a/scripts/automation/trex_control_plane/server/trex_launch_thread.py
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -33,44 +33,44 @@ class AsynchronousTRexSession(threading.Thread):
with open(os.devnull, 'w') as DEVNULL:
self.time_stamps['start'] = self.time_stamps['run_time'] = time.time()
self.session = subprocess.Popen("exec "+self.cmd, cwd = self.launch_path, shell=True, stdin = DEVNULL, stderr = subprocess.PIPE, preexec_fn=os.setsid)
- logger.info("T-Rex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
+ logger.info("TRex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
while self.session.poll() is None: # subprocess is NOT finished
time.sleep(0.5)
if self.stoprequest.is_set():
- logger.debug("Abort request received by handling thread. Terminating T-Rex session." )
+ logger.debug("Abort request received by handling thread. Terminating TRex session." )
os.killpg(self.session.pid, signal.SIGUSR1)
self.trexObj.set_status(TRexStatus.Idle)
- self.trexObj.set_verbose_status("T-Rex is Idle")
+ self.trexObj.set_verbose_status("TRex is Idle")
break
self.time_stamps['run_time'] = time.time() - self.time_stamps['start']
try:
if self.time_stamps['run_time'] < 5:
- logger.error("T-Rex run failed due to wrong input parameters, or due to reachability issues.")
- self.trexObj.set_verbose_status("T-Rex run failed due to wrong input parameters, or due to reachability issues.\n\nT-Rex command: {cmd}\n\nRun output:\n{output}".format(
+ logger.error("TRex run failed due to wrong input parameters, or due to readability issues.")
+ self.trexObj.set_verbose_status("TRex run failed due to wrong input parameters, or due to readability issues.\n\nTRex command: {cmd}\n\nRun output:\n{output}".format(
cmd = self.cmd, output = self.load_trex_output(self.export_path)))
self.trexObj.errcode = -11
elif (self.session.returncode is not None and self.session.returncode < 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
if (self.session.returncode is not None and self.session.returncode < 0):
- logger.debug("Failed T-Rex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
+ logger.debug("Failed TRex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
- logger.debug("Failed T-Rex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
+ logger.debug("Failed TRex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
- logger.warning("T-Rex run was terminated unexpectedly by outer process or by the hosting OS")
- self.trexObj.set_verbose_status("T-Rex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
+ logger.warning("TRex run was terminated unexpectedly by outer process or by the hosting OS")
+ self.trexObj.set_verbose_status("TRex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
output = self.load_trex_output(self.export_path)))
self.trexObj.errcode = -15
else:
- logger.info("T-Rex run session finished.")
- self.trexObj.set_verbose_status('T-Rex finished.')
+ logger.info("TRex run session finished.")
+ self.trexObj.set_verbose_status('TRex finished.')
self.trexObj.errcode = None
finally:
self.trexObj.set_status(TRexStatus.Idle)
logger.info("TRex running state changed to 'Idle'.")
self.trexObj.expect_trex.clear()
- logger.debug("Finished handling a single run of T-Rex.")
+ logger.debug("Finished handling a single run of TRex.")
self.trexObj.zmq_dump = None
def join (self, timeout = None):
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
index 35b2669a..1e5098fb 100755
--- a/scripts/automation/trex_control_plane/server/trex_server.py
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -34,7 +34,7 @@ CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
class CTRexServer(object):
- """This class defines the server side of the RESTfull interaction with T-Rex"""
+ """This class defines the server side of the RESTfull interaction with TRex"""
DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
@@ -53,7 +53,7 @@ class CTRexServer(object):
the port number on which trex's zmq module will interact with daemon server
default value: 4500
- Instantiate a T-Rex client object, and connecting it to listening daemon-server
+ Instantiate a TRex client object, and connecting it to listening daemon-server
"""
self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
@@ -94,17 +94,17 @@ class CTRexServer(object):
"""This method fires up the daemon server based on initialized parameters of the class"""
# initialize the server instance with given reasources
try:
- print "Firing up T-Rex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
- logger.info("Firing up T-Rex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
+ print "Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
+ logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
logger.info("current working dir is: {0}".format(self.TREX_PATH) )
logger.info("current files dir is : {0}".format(self.trex_files_path) )
logger.debug("Starting TRex server. Registering methods to process.")
self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
- logger.error("T-Rex server requested address already in use. Aborting server launching.")
- print "T-Rex server requested address already in use. Aborting server launching."
- raise socket.error(errno.EADDRINUSE, "T-Rex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
+ logger.error("TRex server requested address already in use. Aborting server launching.")
+ print "TRex server requested address already in use. Aborting server launching."
+ raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
# set further functionality and peripherals to server instance
try:
@@ -136,7 +136,7 @@ class CTRexServer(object):
def stop_handler (self, signum, frame):
logger.info("Daemon STOP request detected.")
if self.is_running():
- # in case T-Rex process is currently running, stop it before terminating server process
+ # in case TRex process is currently running, stop it before terminating server process
self.stop_trex(self.trex.get_seq())
sys.exit(0)
@@ -163,25 +163,25 @@ class CTRexServer(object):
def reserve_trex (self, user):
if user == "":
- logger.info("T-Rex reservation cannot apply to empty string user. Request denied.")
- return Fault(-33, "T-Rex reservation cannot apply to empty string user. Request denied.")
+ logger.info("TRex reservation cannot apply to empty string user. Request denied.")
+ return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
with self.start_lock:
logger.info("Processing reserve_trex() command.")
if self.is_reserved():
if user == self.__reservation['user']:
# return True is the same user is asking and already has the resrvation
- logger.info("the same user is asking and already has the resrvation. Re-reserving T-Rex.")
+ logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
return True
- logger.info("T-Rex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
- return Fault(-33, "T-Rex is already reserved to another user ({res_user}). Please make sure T-Rex is free before reserving it.".format(
+ logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
+ return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
res_user = self.__reservation['user']) ) # raise at client TRexInUseError
elif self.trex.get_status() != TRexStatus.Idle:
- logger.info("T-Rex is currently running, cannot reserve T-Rex unless in Idle state.")
- return Fault(-13, 'T-Rex is currently running, cannot reserve T-Rex unless in Idle state. Please try again when T-Rex run finished.') # raise at client TRexInUseError
+ logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
+ return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.') # raise at client TRexInUseError
else:
- logger.info("T-Rex is now reserved for user ({res_user}).".format( res_user = user ))
+ logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
self.__reservation = {'user' : user, 'since' : time.ctime()}
logger.debug("Reservation details: "+ str(self.__reservation))
return True
@@ -191,15 +191,15 @@ class CTRexServer(object):
logger.info("Processing cancel_reservation() command.")
if self.is_reserved():
if self.__reservation['user'] == user:
- logger.info("T-Rex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
+ logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
self.__reservation = None
return True
else:
- logger.warning("T-Rex is reserved to different user than the provided one. Reservation wasn't canceled.")
+ logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
else:
- logger.info("T-Rex is not reserved to anyone. No need to cancel anything")
+ logger.info("TRex is not reserved to anyone. No need to cancel anything")
assert(self.__reservation is None)
return False
@@ -208,21 +208,21 @@ class CTRexServer(object):
with self.start_lock:
logger.info("Processing start_trex() command.")
if self.is_reserved():
- # check if this is not the user to which T-Rex is reserved
+ # check if this is not the user to which TRex is reserved
if self.__reservation['user'] != user:
- logger.info("T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
- return Fault(-33, "T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
+ logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
+ return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
elif self.trex.get_status() != TRexStatus.Idle:
- logger.info("T-Rex is already taken, cannot create another run until done.")
+ logger.info("TRex is already taken, cannot create another run until done.")
return Fault(-13, '') # raise at client TRexInUseError
try:
server_cmd_data = self.generate_run_cmd(**trex_cmd_options)
self.zmq_monitor.first_dump = True
self.trex.start_trex(self.TREX_PATH, server_cmd_data)
- logger.info("T-Rex session has been successfully initiated.")
+ logger.info("TRex session has been successfully initiated.")
if block_to_success:
- # delay server response until T-Rex is at 'Running' state.
+ # delay server response until TRex is at 'Running' state.
start_time = time.time()
trex_state = None
while (time.time() - start_time) < timeout :
@@ -232,20 +232,20 @@ class CTRexServer(object):
else:
time.sleep(0.5)
- # check for T-Rex run started normally
+ # check for TRex run started normally
if trex_state == TRexStatus.Starting: # reached timeout
- logger.warning("TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.")
- return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
elif trex_state == TRexStatus.Idle:
return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
- # reach here only if T-Rex is at 'Running' state
+ # reach here only if TRex is at 'Running' state
self.trex.gen_seq()
return self.trex.get_seq() # return unique seq number to client
except TypeError as e:
- logger.error("T-Rex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
- raise TypeError('T-Rex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
+ logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
+ raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
def stop_trex(self, seq):
@@ -262,11 +262,11 @@ class CTRexServer(object):
return False
def force_trex_kill (self):
- logger.info("Processing force_trex_kill() command. --> Killing T-Rex session indiscriminately.")
+ logger.info("Processing force_trex_kill() command. --> Killing TRex session indiscriminately.")
return self.trex.stop_trex()
def wait_until_kickoff_finish (self, timeout = 40):
- # block until T-Rex exits Starting state
+ # block until TRex exits Starting state
logger.info("Processing wait_until_kickoff_finish() command.")
trex_state = None
start_time = time.time()
@@ -274,7 +274,7 @@ class CTRexServer(object):
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
return
- return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
def get_running_info (self):
logger.info("Processing get_running_info() command.")
@@ -283,7 +283,7 @@ class CTRexServer(object):
def generate_run_cmd (self, f, d, iom = 0, export_path="/tmp/trex.txt", **kwargs):
""" generate_run_cmd(self, trex_cmd_options, export_path) -> str
- Generates a custom running command for the kick-off of the T-Rex traffic generator.
+ Generates a custom running command for the kick-off of the TRex traffic generator.
Returns a tuple of command (string) and export path (string) to be issued on the trex server
Parameters
@@ -325,14 +325,14 @@ class CTRexServer(object):
def __check_trex_path_validity(self):
# check for executable existance
if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
- print "The provided T-Rex path do not contain an executable T-Rex file.\nPlease check the path and retry."
- logger.error("The provided T-Rex path do not contain an executable T-Rex file")
+ print "The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry."
+ logger.error("The provided TRex path do not contain an executable TRex file")
exit(-1)
# check for executable permissions
st = os.stat(self.TREX_PATH+'/t-rex-64')
if not bool(st.st_mode & (stat.S_IXUSR ) ):
- print "The provided T-Rex path do not contain an T-Rex file with execution privileges.\nPlease check the files permissions and retry."
- logger.error("The provided T-Rex path do not contain an T-Rex file with execution privileges")
+ print "The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry."
+ logger.error("The provided TRex path do not contain an TRex file with execution privileges")
exit(-1)
else:
return
@@ -357,7 +357,7 @@ class CTRexServer(object):
class CTRex(object):
def __init__(self):
self.status = TRexStatus.Idle
- self.verbose_status = 'T-Rex is Idle'
+ self.verbose_status = 'TRex is Idle'
self.errcode = None
self.session = None
self.zmq_monitor = None
@@ -388,34 +388,34 @@ class CTRex(object):
if self.status == TRexStatus.Running:
return self.encoder.encode(self.zmq_dump)
else:
- logger.info("T-Rex isn't running. Running information isn't available.")
+ logger.info("TRex isn't running. Running information isn't available.")
if self.status == TRexStatus.Idle:
if self.errcode is not None: # some error occured
- logger.info("T-Rex is in Idle state, with errors. returning fault")
+ logger.info("TRex is in Idle state, with errors. returning fault")
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
else:
- logger.info("T-Rex is in Idle state, no errors. returning {}")
+ logger.info("TRex is in Idle state, no errors. returning {}")
return u'{}'
- return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating T-Rex is back to Idle state or still in Starting state
+ return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
def stop_trex(self):
if self.status == TRexStatus.Idle:
# t-rex isn't running, nothing to abort
- logger.info("T-Rex isn't running. No need to stop anything.")
- if self.errcode is not None: # some error occured, notify client despite T-Rex already stopped
+ logger.info("TRex isn't running. No need to stop anything.")
+ if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
return False
else:
# handle stopping t-rex's run
self.session.join()
- logger.info("T-Rex session has been successfully aborted.")
+ logger.info("TRex session has been successfully aborted.")
return True
def start_trex(self, trex_launch_path, trex_cmd):
self.set_status(TRexStatus.Starting)
logger.info("TRex running state changed to 'Starting'.")
- self.set_verbose_status('T-Rex is starting (data is not available yet)')
+ self.set_verbose_status('TRex is starting (data is not available yet)')
self.errcode = None
self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
@@ -430,7 +430,7 @@ def generate_trex_parser ():
default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
- parser = ArgumentParser(description = 'Run server application for T-Rex traffic generator',
+ parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
formatter_class = RawTextHelpFormatter,
usage = """
trex_daemon_server [options]
@@ -440,10 +440,10 @@ trex_daemon_server [options]
parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
- action="store", help="Select port on which the ZMQ module listens to T-Rex.\nDefault port is 4500.", metavar="PORT",
+ action="store", help="Select port on which the ZMQ module listens to TRex.\nDefault port is 4500.", metavar="PORT",
default = 4500)
parser.add_argument("-t", "--trex-path", dest="trex_path",
- action="store", help="Specify the compiled T-Rex directory from which T-Rex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
+ action="store", help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
metavar="PATH", default = default_path )
parser.add_argument("-f", "--files-path", dest="files_path",
action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
index 7a278af8..db9bf7da 100755
--- a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
+++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
@@ -22,7 +22,7 @@ class ZmqMonitorSession(threading.Thread):
self.zmq_port = zmq_port
self.zmq_publisher = "tcp://localhost:{port}".format(port=self.zmq_port)
self.trexObj = trexObj
- self.expect_trex = self.trexObj.expect_trex # used to signal if T-Rex is expected to run and if data should be considered
+ self.expect_trex = self.trexObj.expect_trex # used to signal if TRex is expected to run and if data should be considered
self.decoder = JSONDecoder()
logger.info("ZMQ monitor initialization finished")
@@ -69,7 +69,7 @@ class ZmqMonitorSession(threading.Thread):
# change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
self.first_dump = False
self.trexObj.set_status(TRexStatus.Running)
- self.trexObj.set_verbose_status("T-Rex is Running")
+ self.trexObj.set_verbose_status("TRex is Running")
logger.info("First ZMQ dump received and successfully parsed. TRex running state changed to 'Running'.")