summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Klein <danklei@cisco.com>2015-10-06 02:40:42 +0300
committerDan Klein <danklei@cisco.com>2015-10-06 02:42:15 +0300
commit54fb5cd69e0166073acac1eec08bd29341dbd6be (patch)
treed49cee17f20d7013771a3570890a07c4c4859faf
parent25c528e867b13d8ddaee19f208ddedd8a2e505ca (diff)
parentddad1117a1bdc616eb1a5fc4e4e5ef2b8dcf6938 (diff)
Merge branch 'master' into dan_stateless
-rw-r--r--.gitignore1
-rwxr-xr-xVERSION2
-rwxr-xr-xlinux/ws_main.py2
-rwxr-xr-xlinux_dpdk/ws_main.py5
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/client/trex_stateless_client.py54
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/jsonrpc_client.py302
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/packet_builder.py10
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_status_e.py2
-rw-r--r--scripts/automation/trex_control_plane/console/trex_console.py126
-rw-r--r--scripts/automation/trex_control_plane/console/trex_status.py403
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/index.rst5
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/json_fields.rst14
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/examples.rst230
-rwxr-xr-xscripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst6
-rwxr-xr-xscripts/automation/trex_control_plane/examples/client_interactive_example.py100
-rwxr-xr-xscripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py20
-rwxr-xr-xscripts/automation/trex_control_plane/server/extended_daemon_runner.py4
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server.py2
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py24
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py102
-rwxr-xr-xscripts/automation/trex_control_plane/server/zmq_monitor_thread.py4
-rwxr-xr-xscripts/avl/sfr_branch_profile_delay_10.yaml2
-rwxr-xr-xscripts/avl/sfr_delay_10.yaml2
-rwxr-xr-xscripts/avl/sfr_delay_10_1g.yaml2
-rw-r--r--scripts/cap2/per_template_gen1.yaml40
-rw-r--r--scripts/cap2/per_template_gen2.yaml41
-rw-r--r--scripts/cap2/per_template_gen3.yaml41
-rw-r--r--scripts/cap2/per_template_gen4.yaml41
-rwxr-xr-xscripts/cap2/per_template_gen5.yaml51
-rwxr-xr-xscripts/trex-console2
-rwxr-xr-xsrc/bp_gtest.cpp42
-rwxr-xr-xsrc/bp_sim.cpp199
-rwxr-xr-xsrc/bp_sim.h42
-rw-r--r--src/gtest/rpc_test.cpp2
-rwxr-xr-xsrc/gtest/tuple_gen_test.cpp407
-rwxr-xr-xsrc/main_dpdk.cpp9
-rw-r--r--src/rpc-server/commands/trex_rpc_cmd_general.cpp51
-rw-r--r--src/rpc-server/commands/trex_rpc_cmds.h7
-rw-r--r--src/rpc-server/trex_rpc_cmds_table.cpp1
-rw-r--r--src/stateless/trex_stateless.cpp23
-rw-r--r--src/stateless/trex_stateless_api.h31
-rwxr-xr-xsrc/tuple_gen.cpp521
-rwxr-xr-xsrc/tuple_gen.h718
43 files changed, 2456 insertions, 1237 deletions
diff --git a/.gitignore b/.gitignore
index 73ce5afe..c9576dc0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,6 +14,7 @@ scripts/bp-sim-*
scripts/doc/*
scripts/mock-*
scripts/automation/trex_control_plane/doc/_build/*
+
*.pyc
diff --git a/VERSION b/VERSION
index fe509dde..6781e9e3 100755
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,4 @@
-v1.76
+v1.77
diff --git a/linux/ws_main.py b/linux/ws_main.py
index a1f207ae..8ad3e5ba 100755
--- a/linux/ws_main.py
+++ b/linux/ws_main.py
@@ -383,7 +383,7 @@ def build_prog (bld, build_obj):
bld.program(features='cxx cxxprogram',
includes =includes_path,
- cxxflags =build_obj.get_flags(),
+ cxxflags =(build_obj.get_flags()+['-std=gnu++11',]),
linkflags = build_obj.get_link_flags(),
source = build_obj.get_src(),
use = build_obj.get_use_libs(),
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index fc9fc587..6aad508a 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -616,7 +616,7 @@ def build_prog (bld, build_obj):
bld.program(features='cxx cxxprogram',
includes =includes_path,
- cxxflags =build_obj.get_cxx_flags(),
+ cxxflags =(build_obj.get_cxx_flags()+['-std=gnu++11',]),
linkflags = build_obj.get_link_flags() ,
lib=['pthread','dl'],
use =[build_obj.get_dpdk_target(),'zmq'],
@@ -747,7 +747,8 @@ files_list=[
'dpdk_nic_bind.py',
'dpdk_setup_ports.py',
'doc_process.py',
- 'trex_daemon_server'
+ 'trex_daemon_server',
+ 'trex-console'
];
files_dir=['cap2','avl','cfg','ko','automation', 'external_libs', 'python-lib']
diff --git a/scripts/automation/trex_control_plane/client/trex_stateless_client.py b/scripts/automation/trex_control_plane/client/trex_stateless_client.py
index b7580531..b25d5cd5 100644..100755
--- a/scripts/automation/trex_control_plane/client/trex_stateless_client.py
+++ b/scripts/automation/trex_control_plane/client/trex_stateless_client.py
@@ -19,7 +19,7 @@ class CTRexStatelessClient(object):
self._conn_handler = {}
def owned(func):
- def wrapper(self, *args, **kwargs ) :
+ def wrapper(self, *args, **kwargs):
if self._conn_handler.get(kwargs.get("port_id")):
return func(self, *args, **kwargs)
else:
@@ -37,38 +37,66 @@ class CTRexStatelessClient(object):
@owned
def release(self, port_id=None):
self._conn_handler.pop(port_id)
- params = {"handler":self._conn_handler.get(port_id),
+ params = {"handler": self._conn_handler.get(port_id),
"port_id": port_id}
return self.transmit("release", params)
@owned
def add_stream(self, stream_id, stream_obj, port_id=None):
assert isinstance(stream_obj, CStream)
- params = {"handler":self._conn_handler.get(port_id),
- "port_id":port_id,
- "stream_id":stream_id,
- "stream":stream_obj.dump()}
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id,
+ "stream": stream_obj.dump()}
return self.transmit("add_stream", params)
@owned
def remove_stream(self, stream_id, port_id=None):
- params = {"handler":self._conn_handler.get(port_id),
- "port_id":port_id,
- "stream_id":stream_id}
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id}
return self.transmit("remove_stream", params)
@owned
def get_stream_list(self, port_id=None):
- params = {"handler":self._conn_handler.get(port_id),
- "port_id":port_id}
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
return self.transmit("get_stream_list", params)
@owned
def get_stream(self, stream_id, port_id=None):
- params = {"handler":self._conn_handler.get(port_id),
- "port_id":port_id}
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id,
+ "stream_id": stream_id}
return self.transmit("get_stream_list", params)
+ @owned
+ def start_traffic(self, port_id=None):
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ return self.transmit("start_traffic", params)
+
+ @owned
+ def stop_traffic(self, port_id=None):
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ return self.transmit("stop_traffic", params)
+
+ def get_global_stats(self):
+ return self.transmit("get_global_stats")
+
+ @owned
+ def stop_traffic(self, port_id=None):
+ params = {"handler": self._conn_handler.get(port_id),
+ "port_id": port_id}
+ return self.transmit("stop_traffic", params)
+
+
+
+
+
+
+
def transmit(self, method_name, params={}):
diff --git a/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
index ebeec77e..51bb3a14 100755
--- a/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py
@@ -17,13 +17,37 @@ class bcolors:
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
+# sub class to describe a batch
+class BatchMessage(object):
+ def __init__ (self, rpc_client):
+ self.rpc_client = rpc_client
+ self.batch_list = []
+ def add (self, method_name, params = {}):
+
+ id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, encode = False)
+ self.batch_list.append(msg)
+
+ def invoke (self, block = False):
+ if not self.rpc_client.connected:
+ return False, "Not connected to server"
+
+ msg = json.dumps(self.batch_list)
+
+ rc, resp_list = self.rpc_client.send_raw_msg(msg, block = False)
+ if len(self.batch_list) == 1:
+ return True, [(rc, resp_list)]
+ else:
+ return rc, resp_list
+
+
+# JSON RPC v2.0 client
class JsonRpcClient(object):
def __init__ (self, default_server, default_port):
self.verbose = False
self.connected = False
-
+
# default values
self.port = default_port
self.server = default_server
@@ -63,7 +87,10 @@ class JsonRpcClient(object):
print "[verbose] " + msg
- def create_jsonrpc_v2 (self, method_name, params = {}):
+ def create_batch (self):
+ return BatchMessage(self)
+
+ def create_jsonrpc_v2 (self, method_name, params = {}, encode = True):
msg = {}
msg["jsonrpc"] = "2.0"
msg["method"] = method_name
@@ -72,21 +99,22 @@ class JsonRpcClient(object):
msg["id"] = self.id_gen.next()
- return id, json.dumps(msg)
-
- def invoke_rpc_method (self, method_name, params = {}, block = False):
- rc, msg = self._invoke_rpc_method(method_name, params, block)
- if not rc:
- self.disconnect()
+ if encode:
+ return id, json.dumps(msg)
+ else:
+ return id, msg
- return rc, msg
- def _invoke_rpc_method (self, method_name, params = {}, block = False):
+ def invoke_rpc_method (self, method_name, params = {}, block = False):
if not self.connected:
return False, "Not connected to server"
id, msg = self.create_jsonrpc_v2(method_name, params)
+ return self.send_raw_msg(msg, block)
+
+
+ def send_raw_msg (self, msg, block = False):
self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
if block:
@@ -95,6 +123,7 @@ class JsonRpcClient(object):
try:
self.socket.send(msg, flags = zmq.NOBLOCK)
except zmq.error.ZMQError as e:
+ self.disconnect()
return False, "Failed To Get Send Message"
got_response = False
@@ -112,22 +141,41 @@ class JsonRpcClient(object):
sleep(0.2)
if not got_response:
+ self.disconnect()
return False, "Failed To Get Server Response"
self.verbose_msg("Server Response:\n\n" + self.pretty_json(response) + "\n")
# decode
+
+ # batch ?
response_json = json.loads(response)
+ if isinstance(response_json, list):
+ rc_list = []
+
+ for single_response in response_json:
+ rc, msg = self.process_single_response(single_response)
+ rc_list.append( (rc, msg) )
+
+ return True, rc_list
+
+ else:
+ rc, msg = self.process_single_response(response_json)
+ return rc, msg
+
+
+ def process_single_response (self, response_json):
+
if (response_json.get("jsonrpc") != "2.0"):
return False, "Malfromed Response ({0})".format(str(response))
- if (response_json.get("id") != id):
- return False, "Server Replied With Bad ID ({0})".format(str(response))
-
# error reported by server
if ("error" in response_json):
- return True, response_json["error"]["message"]
+ if "specific_err" in response_json["error"]:
+ return False, response_json["error"]["specific_err"]
+ else:
+ return False, response_json["error"]["message"]
# if no error there should be a result
if ("result" not in response_json):
@@ -136,17 +184,7 @@ class JsonRpcClient(object):
return True, response_json["result"]
- def ping_rpc_server(self):
-
- return self.invoke_rpc_method("ping", block = False)
-
- def get_rpc_server_status (self):
- return self.invoke_rpc_method("get_status")
-
- def query_rpc_server(self):
- return self.invoke_rpc_method("get_supported_cmds")
-
-
+
def set_verbose(self, mode):
self.verbose = mode
@@ -182,12 +220,6 @@ class JsonRpcClient(object):
self.connected = True
- # ping the server
- rc, err = self.ping_rpc_server()
- if not rc:
- self.disconnect()
- return rc, err
-
return True, ""
@@ -205,11 +237,213 @@ class JsonRpcClient(object):
def is_connected(self):
return self.connected
-
def __del__(self):
print "Shutting down RPC client\n"
if hasattr(self, "context"):
self.context.destroy(linger=0)
-if __name__ == "__main__":
- pass
+# MOVE THIS TO DAN'S FILE
+class TrexStatelessClient(JsonRpcClient):
+
+ def __init__ (self, server, port, user):
+
+ super(TrexStatelessClient, self).__init__(server, port)
+
+ self.user = user
+ self.port_handlers = {}
+
+ self.supported_cmds = []
+ self.system_info = None
+ self.server_version = None
+
+
+ def whoami (self):
+ return self.user
+
+ def ping_rpc_server(self):
+
+ return self.invoke_rpc_method("ping", block = False)
+
+ def get_rpc_server_version (self):
+ return self.server_version
+
+ def get_system_info (self):
+ return self.system_info
+
+ def get_supported_cmds(self):
+ return self.supported_cmds
+
+ def get_port_count (self):
+ if not self.system_info:
+ return 0
+
+ return self.system_info["port_count"]
+
+ # refresh the client for transient data
+ def refresh (self):
+
+ # get server versionrc, msg = self.get_supported_cmds()
+ rc, msg = self.invoke_rpc_method("get_version")
+ if not rc:
+ self.disconnect()
+ return rc, msg
+
+ self.server_version = msg
+
+ # get supported commands
+ rc, msg = self.invoke_rpc_method("get_supported_cmds")
+ if not rc:
+ self.disconnect()
+ return rc, msg
+
+ self.supported_cmds = [str(x) for x in msg if x]
+
+ # get system info
+ rc, msg = self.invoke_rpc_method("get_system_info")
+ if not rc:
+ self.disconnect()
+ return rc, msg
+
+ self.system_info = msg
+
+ return True, ""
+
+ def connect (self):
+ rc, err = super(TrexStatelessClient, self).connect()
+ if not rc:
+ return rc, err
+
+ return self.refresh()
+
+
+ # take ownership over ports
+ def take_ownership (self, port_id_array, force = False):
+ if not self.connected:
+ return False, "Not connected to server"
+
+ batch = self.create_batch()
+
+ for port_id in port_id_array:
+ batch.add("acquire", params = {"port_id":port_id, "user":self.user, "force":force})
+
+ rc, resp_list = batch.invoke()
+ if not rc:
+ return rc, resp_list
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ self.port_handlers[port_id_array[i]] = rc[1]
+
+ return True, resp_list
+
+
+ def release_ports (self, port_id_array):
+ batch = self.create_batch()
+
+ for port_id in port_id_array:
+
+ # let the server handle un-acquired errors
+ if self.port_handlers.get(port_id):
+ handler = self.port_handlers[port_id]
+ else:
+ handler = ""
+
+ batch.add("release", params = {"port_id":port_id, "handler":handler})
+
+
+ rc, resp_list = batch.invoke()
+ if not rc:
+ return rc, resp_list
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ self.port_handlers.pop(port_id_array[i])
+
+ return True, resp_list
+
+ def get_owned_ports (self):
+ return self.port_handlers.keys()
+
+ # fetch port stats
+ def get_port_stats (self, port_id_array):
+ if not self.connected:
+ return False, "Not connected to server"
+
+ batch = self.create_batch()
+
+ # empty list means all
+ if port_id_array == []:
+ port_id_array = list([x for x in xrange(0, self.system_info["port_count"])])
+
+ for port_id in port_id_array:
+
+ # let the server handle un-acquired errors
+ if self.port_handlers.get(port_id):
+ handler = self.port_handlers[port_id]
+ else:
+ handler = ""
+
+ batch.add("get_port_stats", params = {"port_id":port_id, "handler":handler})
+
+
+ rc, resp_list = batch.invoke()
+
+ return rc, resp_list
+
+ # snapshot will take a snapshot of all your owned ports for streams and etc.
+ def snapshot(self):
+
+
+ if len(self.get_owned_ports()) == 0:
+ return {}
+
+ snap = {}
+
+ batch = self.create_batch()
+
+ for port_id in self.get_owned_ports():
+
+ batch.add("get_port_stats", params = {"port_id": port_id, "handler": self.port_handlers[port_id]})
+ batch.add("get_stream_list", params = {"port_id": port_id, "handler": self.port_handlers[port_id]})
+
+ rc, resp_list = batch.invoke()
+ if not rc:
+ return rc, resp_list
+
+ # split the list to 2s
+ index = 0
+ for port_id in self.get_owned_ports():
+ if not resp_list[index] or not resp_list[index + 1]:
+ snap[port_id] = None
+ continue
+
+ # fetch the first two
+ stats = resp_list[index][1]
+ stream_list = resp_list[index + 1][1]
+
+ port = {}
+ port['status'] = stats['status']
+ port['stream_list'] = []
+
+ # get all the streams
+ if len(stream_list) > 0:
+ batch = self.create_batch()
+ for stream_id in stream_list:
+ batch.add("get_stream", params = {"port_id": port_id, "stream_id": stream_id, "handler": self.port_handlers[port_id]})
+
+ rc, stream_resp_list = batch.invoke()
+ if not rc:
+ port = {}
+
+ port['streams'] = {}
+ for i, resp in enumerate(stream_resp_list):
+ if resp[0]:
+ port['streams'][stream_list[i]] = resp[1]
+
+ snap[port_id] = port
+
+ # move to next one
+ index += 2
+
+
+ return snap
diff --git a/scripts/automation/trex_control_plane/client_utils/packet_builder.py b/scripts/automation/trex_control_plane/client_utils/packet_builder.py
index 0505d7f1..1c643335 100755
--- a/scripts/automation/trex_control_plane/client_utils/packet_builder.py
+++ b/scripts/automation/trex_control_plane/client_utils/packet_builder.py
@@ -75,6 +75,7 @@ class CTRexPktBuilder(object):
attr: str
a string representation of the sub-field to be set:
+
+ "src" for source
+ "dst" for destination
@@ -84,6 +85,7 @@ class CTRexPktBuilder(object):
ip_type : str
a string representation of the IP version to be set:
+
+ "ipv4" for IPv4
+ "ipv6" for IPv6
@@ -115,6 +117,7 @@ class CTRexPktBuilder(object):
attr: str
a string representation of the sub-field to be set:
+
+ "src" for source
+ "dst" for destination
@@ -227,6 +230,7 @@ class CTRexPktBuilder(object):
val : int
value of attribute.
This value will be set "ontop" of the existing value using bitwise "OR" operation.
+
.. tip:: It is very useful to use dpkt constants to define the values of these fields.
:raises:
@@ -408,9 +412,9 @@ class CTRexPktBuilder(object):
trim_size = val_size*2
hdr_offset, field_abs_offset = self._calc_offset(layer_name, hdr_field, val_size)
self.vm.add_flow_man_inst(range_name, size=val_size, operation=operation,
- init_value=str(init_val),
- min_value=str(start_val),
- max_value=str(end_val))
+ init_value=init_val,
+ min_value=start_val,
+ max_value=end_val)
self.vm.add_write_flow_inst(range_name, field_abs_offset)
self.vm.set_vm_off_inst_field(range_name, "add_value", add_val)
self.vm.set_vm_off_inst_field(range_name, "is_big_endian", is_big_endian)
diff --git a/scripts/automation/trex_control_plane/common/trex_status_e.py b/scripts/automation/trex_control_plane/common/trex_status_e.py
index 3ad85014..fbfe92af 100755
--- a/scripts/automation/trex_control_plane/common/trex_status_e.py
+++ b/scripts/automation/trex_control_plane/common/trex_status_e.py
@@ -1,6 +1,6 @@
#!/router/bin/python
-# import outer_packages
+import outer_packages # import this to overcome doc building import error by sphinx
from enum import Enum
diff --git a/scripts/automation/trex_control_plane/console/trex_console.py b/scripts/automation/trex_control_plane/console/trex_console.py
index 6514a51c..3aeab901 100644
--- a/scripts/automation/trex_control_plane/console/trex_console.py
+++ b/scripts/automation/trex_control_plane/console/trex_console.py
@@ -4,9 +4,13 @@ import cmd
import json
import ast
import argparse
+import random
+import string
+
import sys
import trex_root_path
-from client_utils.jsonrpc_client import JsonRpcClient
+
+from client_utils.jsonrpc_client import TrexStatelessClient
import trex_status
class TrexConsole(cmd.Cmd):
@@ -34,7 +38,7 @@ class TrexConsole(cmd.Cmd):
# set verbose on / off
def do_verbose (self, line):
- '''shows or set verbose mode\n'''
+ '''Shows or set verbose mode\n'''
if line == "":
print "\nverbose is " + ("on\n" if self.verbose else "off\n")
@@ -78,6 +82,98 @@ class TrexConsole(cmd.Cmd):
print "\n*** " + msg + "\n"
return
+ def do_force_acquire (self, line):
+ '''Acquires ports by force\n'''
+
+ self.do_acquire(line, True)
+
+ def parse_ports_from_line (self, line):
+ port_list = set()
+
+ if line:
+ for port_id in line.split(' '):
+ if (not port_id.isdigit()) or (int(port_id) < 0) or (int(port_id) >= self.rpc_client.get_port_count()):
+ print "Please provide a list of ports seperated by spaces between 0 and {0}".format(self.rpc_client.get_port_count() - 1)
+ return None
+
+ port_list.add(int(port_id))
+
+ port_list = list(port_list)
+
+ else:
+ port_list = [i for i in xrange(0, self.rpc_client.get_port_count())]
+
+ return port_list
+
+ def do_acquire (self, line, force = False):
+ '''Acquire ports\n'''
+
+ port_list = self.parse_ports_from_line(line)
+ if not port_list:
+ return
+
+ print "\nTrying to acquire ports: " + (" ".join(str(x) for x in port_list)) + "\n"
+
+ rc, resp_list = self.rpc_client.take_ownership(port_list, force)
+
+ if not rc:
+ print "\n*** " + resp_list + "\n"
+ return
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ print "Port {0} - Acquired".format(port_list[i])
+ else:
+ print "Port {0} - ".format(port_list[i]) + rc[1]
+
+ print "\n"
+
+ def do_release (self, line):
+ '''Release ports\n'''
+
+ if line:
+ port_list = self.parse_ports_from_line(line)
+ else:
+ port_list = self.rpc_client.get_owned_ports()
+
+ if not port_list:
+ return
+
+ rc, resp_list = self.rpc_client.release_ports(port_list)
+
+
+ print "\n"
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ print "Port {0} - Released".format(port_list[i])
+ else:
+ print "Port {0} - Failed to release port, probably not owned by you or port is under traffic"
+
+ print "\n"
+
+ def do_get_port_stats (self, line):
+ '''Get ports stats\n'''
+
+ port_list = self.parse_ports_from_line(line)
+ if not port_list:
+ return
+
+ rc, resp_list = self.rpc_client.get_port_stats(port_list)
+
+ if not rc:
+ print "\n*** " + resp_list + "\n"
+ return
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ print "\nPort {0} stats:\n{1}\n".format(port_list[i], self.rpc_client.pretty_json(json.dumps(rc[1])))
+ else:
+ print "\nPort {0} - ".format(i) + rc[1] + "\n"
+
+ print "\n"
+
+
def do_connect (self, line):
'''Connects to the server\n'''
@@ -97,10 +193,7 @@ class TrexConsole(cmd.Cmd):
print "\n*** " + msg + "\n"
return
- rc, msg = self.rpc_client.query_rpc_server()
-
- if rc:
- self.supported_rpc = [str(x) for x in msg if x]
+ self.supported_rpc = self.rpc_client.get_supported_cmds()
def do_rpc (self, line):
'''Launches a RPC on the server\n'''
@@ -135,7 +228,7 @@ class TrexConsole(cmd.Cmd):
rc, msg = self.rpc_client.invoke_rpc_method(method, params)
if rc:
- print "\nServer Response:\n\n" + json.dumps(msg) + "\n"
+ print "\nServer Response:\n\n" + self.rpc_client.pretty_json(json.dumps(msg)) + "\n"
else:
print "\n*** " + msg + "\n"
#print "Please try 'reconnect' to reconnect to server"
@@ -151,7 +244,7 @@ class TrexConsole(cmd.Cmd):
trex_status.show_trex_status(self.rpc_client)
def do_quit(self, line):
- '''exit the client\n'''
+ '''Exit the client\n'''
return True
def do_disconnect (self, line):
@@ -166,6 +259,10 @@ class TrexConsole(cmd.Cmd):
else:
print msg + "\n"
+ def do_whoami (self, line):
+ '''Prints console user name\n'''
+ print "\n" + self.rpc_client.whoami() + "\n"
+
def postcmd(self, stop, line):
if self.rpc_client.is_connected():
self.prompt = "TRex > "
@@ -216,6 +313,13 @@ class TrexConsole(cmd.Cmd):
print "{:<30} {:<30}".format(cmd + " - ", help)
+ # do
+ #def do_snapshot (self, line):
+
+ #for key, value in self.rpc_client.snapshot()[1]['streams'].iteritems():
+ #print str(key) + " " + str(value)
+
+
# aliasing
do_exit = do_EOF = do_q = do_quit
@@ -230,6 +334,10 @@ def setParserOptions ():
default = 5050,
type = int)
+ parser.add_argument("-u", "--user", help = "User Name [default is random generated]\n",
+ default = 'user_' + ''.join(random.choice(string.digits) for _ in range(5)),
+ type = str)
+
return parser
def main ():
@@ -237,7 +345,7 @@ def main ():
options = parser.parse_args(sys.argv[1:])
# RPC client
- rpc_client = JsonRpcClient(options.server, options.port)
+ rpc_client = TrexStatelessClient(options.server, options.port, options.user)
# console
try:
diff --git a/scripts/automation/trex_control_plane/console/trex_status.py b/scripts/automation/trex_control_plane/console/trex_status.py
index 54853ea3..b881f9f5 100644
--- a/scripts/automation/trex_control_plane/console/trex_status.py
+++ b/scripts/automation/trex_control_plane/console/trex_status.py
@@ -11,13 +11,21 @@ import datetime
g_curses_active = False
-#
+# simple percetange show
def percentage (a, total):
x = int ((float(a) / total) * 100)
return str(x) + "%"
+# simple float to human readable
+def float_to_human_readable (size, suffix = "bps"):
+ for unit in ['','K','M','G']:
+ if abs(size) < 1024.0:
+ return "%3.1f %s%s" % (size, unit, suffix)
+ size /= 1024.0
+ return "NaN"
+
# panel object
-class TrexStatusPanel():
+class TrexStatusPanel(object):
def __init__ (self, h, l, y, x, headline):
self.h = h
self.l = l
@@ -44,12 +52,245 @@ class TrexStatusPanel():
def getwin (self):
return self.win
-def float_to_human_readable (size, suffix = "bps"):
- for unit in ['','K','M','G']:
- if abs(size) < 1024.0:
- return "%3.1f %s%s" % (size, unit, suffix)
- size /= 1024.0
- return "NaN"
+
+# total stats (ports + global)
+class Stats():
+ def __init__ (self, rpc_client, port_list, interval = 100):
+
+ self.rpc_client = rpc_client
+
+ self.port_list = port_list
+ self.port_stats = {}
+
+ self.interval = interval
+ self.delay_count = 0
+
+ def get_port_stats (self, port_id):
+ if self.port_stats.get(port_id):
+ return self.port_stats[port_id]
+ else:
+ return None
+
+ def query_sync (self):
+ self.delay_count += 1
+ if self.delay_count < self.interval:
+ return
+
+ self.delay_count = 0
+
+ # query global stats
+
+ # query port stats
+
+ rc, resp_list = self.rpc_client.get_port_stats(self.port_list)
+ if not rc:
+ return
+
+ for i, rc in enumerate(resp_list):
+ if rc[0]:
+ self.port_stats[self.port_list[i]] = rc[1]
+
+
+# various kinds of panels
+
+# Server Info Panel
+class ServerInfoPanel(TrexStatusPanel):
+ def __init__ (self, h, l, y, x, status_obj):
+
+ super(ServerInfoPanel, self).__init__(h, l, y ,x ,"Server Info:")
+
+ self.status_obj = status_obj
+
+ def draw (self):
+
+ if self.status_obj.server_version == None:
+ return
+
+ self.clear()
+
+ connection_details = self.status_obj.rpc_client.get_connection_details()
+
+ self.getwin().addstr(3, 2, "{:<30} {:30}".format("Server:",self.status_obj.server_sys_info["hostname"] + ":" + str(connection_details['port'])))
+ self.getwin().addstr(4, 2, "{:<30} {:30}".format("Version:", self.status_obj.server_version["version"]))
+ self.getwin().addstr(5, 2, "{:<30} {:30}".format("Build:",
+ self.status_obj.server_version["build_date"] + " @ " +
+ self.status_obj.server_version["build_time"] + " by " +
+ self.status_obj.server_version["built_by"]))
+
+ self.getwin().addstr(6, 2, "{:<30} {:30}".format("Server Uptime:", self.status_obj.server_sys_info["uptime"]))
+ self.getwin().addstr(7, 2, "{:<30} {:<3} / {:<30}".format("DP Cores:", str(self.status_obj.server_sys_info["dp_core_count"]) +
+ " cores", self.status_obj.server_sys_info["core_type"]))
+
+ self.getwin().addstr(9, 2, "{:<30} {:<30}".format("Ports Count:", self.status_obj.server_sys_info["port_count"]))
+
+ ports_owned = " ".join(str(x) for x in self.status_obj.rpc_client.get_owned_ports())
+
+ if not ports_owned:
+ ports_owned = "None"
+
+ self.getwin().addstr(10, 2, "{:<30} {:<30}".format("Ports Owned:", ports_owned))
+
+# general info panel
+class GeneralInfoPanel(TrexStatusPanel):
+ def __init__ (self, h, l, y, x, status_obj):
+
+ super(GeneralInfoPanel, self).__init__(h, l, y ,x ,"General Info:")
+
+ self.status_obj = status_obj
+
+ def draw (self):
+ pass
+
+# all ports stats
+class PortsStatsPanel(TrexStatusPanel):
+ def __init__ (self, h, l, y, x, status_obj):
+
+ super(PortsStatsPanel, self).__init__(h, l, y ,x ,"Trex Ports:")
+
+ self.status_obj = status_obj
+
+ def draw (self):
+
+ self.clear()
+
+ owned_ports = self.status_obj.rpc_client.get_owned_ports()
+ if not owned_ports:
+ self.getwin().addstr(3, 2, "No Owned Ports - Please Acquire One Or More Ports")
+ return
+
+ # table header
+ self.getwin().addstr(3, 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ "Port ID", "Tx [pps]", "Tx [bps]", "Tx [bytes]", "Rx [pps]", "Rx [bps]", "Rx [bytes]"))
+
+ # port loop
+ self.status_obj.stats.query_sync()
+
+ for i, port_index in enumerate(owned_ports):
+
+ port_stats = self.status_obj.stats.get_port_stats(port_index)
+
+ if port_stats:
+ self.getwin().addstr(5 + (i * 4), 2, "{:^15} {:^15,} {:^15,} {:^15,} {:^15,} {:^15,} {:^15,}".format(
+ "{0} ({1})".format(str(port_index), self.status_obj.server_sys_info["ports"][port_index]["speed"]),
+ port_stats["tx_pps"],
+ port_stats["tx_bps"],
+ port_stats["total_tx_bytes"],
+ port_stats["rx_pps"],
+ port_stats["rx_bps"],
+ port_stats["total_rx_bytes"]))
+
+ else:
+ self.getwin().addstr(5 + (i * 4), 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ "{0} ({1})".format(str(port_index), self.status_obj.server_sys_info["ports"][port_index]["speed"]),
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A"))
+
+# control panel
+class ControlPanel(TrexStatusPanel):
+ def __init__ (self, h, l, y, x, status_obj):
+
+ super(ControlPanel, self).__init__(h, l, y, x, "")
+
+ self.status_obj = status_obj
+
+ def draw (self):
+ self.clear()
+
+ self.getwin().addstr(1, 2, "'g' - general, '0-{0}' - specific port, 'f' - freeze, 'c' - clear stats, 'p' - ping server, 'q' - quit"
+ .format(self.status_obj.rpc_client.get_port_count() - 1))
+
+ index = 3
+
+ cut = len(self.status_obj.log) - 4
+ if cut < 0:
+ cut = 0
+
+ for l in self.status_obj.log[cut:]:
+ self.getwin().addstr(index, 2, l)
+ index += 1
+
+# specific ports panels
+class SinglePortPanel(TrexStatusPanel):
+ def __init__ (self, h, l, y, x, status_obj, port_id):
+
+ super(SinglePortPanel, self).__init__(h, l, y, x, "Port {0}".format(port_id))
+
+ self.status_obj = status_obj
+ self.port_id = port_id
+
+ def draw (self):
+ y = 3
+
+ self.clear()
+
+ if not self.port_id in self.status_obj.rpc_client.get_owned_ports():
+ self.getwin().addstr(y, 2, "Port {0} is not owned by you, please acquire the port for more info".format(self.port_id))
+ return
+
+ # streams
+ self.getwin().addstr(y, 2, "Streams:", curses.A_UNDERLINE)
+ y += 2
+
+ # stream table header
+ self.getwin().addstr(y, 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ "Stream ID", "Enabled", "Type", "Self Start", "ISG", "Next Stream", "VM"))
+ y += 2
+
+ # streams
+ if 'streams' in self.status_obj.snapshot[self.port_id]:
+ for stream_id, stream in self.status_obj.snapshot[self.port_id]['streams'].iteritems():
+ self.getwin().addstr(y, 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ stream_id,
+ ("True" if stream['stream']['enabled'] else "False"),
+ stream['stream']['mode']['type'],
+ ("True" if stream['stream']['self_start'] else "False"),
+ stream['stream']['isg'],
+ (stream['stream']['next_stream_id'] if stream['stream']['next_stream_id'] != -1 else "None"),
+ ("{0} instr.".format(len(stream['stream']['vm'])) if stream['stream']['vm'] else "None")))
+
+ y += 1
+
+ # new section - traffic
+ y += 2
+
+ self.getwin().addstr(y, 2, "Traffic:", curses.A_UNDERLINE)
+ y += 2
+
+ self.status_obj.stats.query_sync()
+ port_stats = self.status_obj.stats.get_port_stats(self.port_id)
+
+
+ # table header
+ self.getwin().addstr(y, 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ "Port ID", "Tx [pps]", "Tx [bps]", "Tx [bytes]", "Rx [pps]", "Rx [bps]", "Rx [bytes]"))
+
+ y += 2
+
+ if port_stats:
+ self.getwin().addstr(y, 2, "{:^15} {:^15,} {:^15,} {:^15,} {:^15,} {:^15,} {:^15,}".format(
+ "{0} ({1})".format(str(self.port_id), self.status_obj.server_sys_info["ports"][self.port_id]["speed"]),
+ port_stats["tx_pps"],
+ port_stats["tx_bps"],
+ port_stats["total_tx_bytes"],
+ port_stats["rx_pps"],
+ port_stats["rx_bps"],
+ port_stats["total_rx_bytes"]))
+
+ else:
+ self.getwin().addstr(y, 2, "{:^15} {:^15} {:^15} {:^15} {:^15} {:^15} {:^15}".format(
+ "{0} ({1})".format(str(self.port_id), self.status_obj.server_sys_info["ports"][self.port_id]["speed"]),
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A",
+ "N/A"))
+
+ y += 2
# status object
class TrexStatus():
@@ -58,58 +299,78 @@ class TrexStatus():
self.log = []
self.rpc_client = rpc_client
+ self.snapshot = self.rpc_client.snapshot()
+
+ # fetch server info
self.get_server_info()
- def get_server_info (self):
- rc, msg = self.rpc_client.get_rpc_server_status()
+ # create stats objects
+ self.stats = Stats(rpc_client, self.rpc_client.get_owned_ports())
- if rc:
- self.server_status = msg
- else:
- self.server_status = None
+ # register actions
+ self.actions = {}
+ self.actions[ord('q')] = self.action_quit
+ self.actions[ord('p')] = self.action_ping
+ self.actions[ord('f')] = self.action_freeze
- def add_log_event (self, msg):
- self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
+ self.actions[ord('g')] = self.action_show_ports_stats
- def add_panel (self, h, l, y, x, headline):
- win = curses.newwin(h, l, y, x)
- win.erase()
- win.box()
+ for port_id in xrange(0, self.rpc_client.get_port_count()):
+ self.actions[ord('0') + port_id] = self.action_show_port_generator(port_id)
- win.addstr(1, 2, headline)
- win.refresh()
+
+ # all ports stats
+ def action_show_ports_stats (self):
+ self.add_log_event("Switching to all ports view")
+ self.stats_panel = self.ports_stats_panel
+
+ return True
- panel.new_panel(win)
- panel1 = panel.new_panel(win)
- panel1.top()
+ # function generator for different ports requests
+ def action_show_port_generator (self, port_id):
+ def action_show_port():
+ self.add_log_event("Switching panel to port {0}".format(port_id))
+ self.stats_panel = self.ports_panels[port_id]
- return win, panel1
+ return True
- # static info panel
- def update_info (self):
- if self.server_status == None:
- return
+ return action_show_port
- self.info_panel.clear()
+ def action_freeze (self):
+ self.update_active = not self.update_active
+ self.add_log_event("Update continued" if self.update_active else "Update stopped")
- connection_details = self.rpc_client.get_connection_details()
+ return True
- self.info_panel.getwin().addstr(3, 2, "{:<30} {:30}".format("Server:", connection_details['server'] + ":" + str(connection_details['port'])))
- self.info_panel.getwin().addstr(4, 2, "{:<30} {:30}".format("Version:", self.server_status["general"]["version"]))
- self.info_panel.getwin().addstr(5, 2, "{:<30} {:30}".format("Build:",
- self.server_status["general"]["build_date"] + " @ " + self.server_status["general"]["build_time"] + " by " + self.server_status["general"]["version_user"]))
+ def action_quit(self):
+ return False
- self.info_panel.getwin().addstr(6, 2, "{:<30} {:30}".format("Server Uptime:", self.server_status["general"]["uptime"]))
+ def action_ping (self):
+ self.add_log_event("Pinging RPC server")
+
+ rc, msg = self.rpc_client.ping_rpc_server()
+ if rc:
+ self.add_log_event("Server replied: '{0}'".format(msg))
+ else:
+ self.add_log_event("Failed to get reply")
+
+ return True
+
+ def get_server_info (self):
+
+ self.server_version = self.rpc_client.get_rpc_server_version()
+ self.server_sys_info = self.rpc_client.get_system_info()
- # general stats
- def update_general (self, gen_stats):
- pass
+
+ def add_log_event (self, msg):
+ self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
# control panel
def update_control (self):
self.control_panel.clear()
- self.control_panel.getwin().addstr(1, 2, "'f' - freeze, 'c' - clear stats, 'p' - ping server, 'q' - quit")
+ self.control_panel.getwin().addstr(1, 2, "'g' - general, '0-{0}' - specific port, 'f' - freeze, 'c' - clear stats, 'p' - ping server, 'q' - quit"
+ .format(self.rpc_client.get_port_count() - 1))
index = 3
@@ -125,42 +386,37 @@ class TrexStatus():
self.max_y = self.stdscr.getmaxyx()[0]
self.max_x = self.stdscr.getmaxyx()[1]
- # create cls panel
- self.main_panel = TrexStatusPanel(int(self.max_y * 0.8), self.max_x / 2, 0,0, "Trex Ports:")
+ self.server_info_panel = ServerInfoPanel(int(self.max_y * 0.3), self.max_x / 2, int(self.max_y * 0.5), self.max_x /2, self)
+ self.general_info_panel = GeneralInfoPanel(int(self.max_y * 0.5), self.max_x / 2, 0, self.max_x /2, self)
+ self.control_panel = ControlPanel(int(self.max_y * 0.2), self.max_x , int(self.max_y * 0.8), 0, self)
- self.general_panel = TrexStatusPanel(int(self.max_y * 0.6), self.max_x / 2, 0, self.max_x /2, "General Statistics:")
+ # those can be switched on the same place
+ self.ports_stats_panel = PortsStatsPanel(int(self.max_y * 0.8), self.max_x / 2, 0, 0, self)
- self.info_panel = TrexStatusPanel(int(self.max_y * 0.2), self.max_x / 2, int(self.max_y * 0.6), self.max_x /2, "Server Info:")
+ self.ports_panels = {}
+ for i in xrange(0, self.rpc_client.get_port_count()):
+ self.ports_panels[i] = SinglePortPanel(int(self.max_y * 0.8), self.max_x / 2, 0, 0, self, i)
- self.control_panel = TrexStatusPanel(int(self.max_y * 0.2), self.max_x , int(self.max_y * 0.8), 0, "")
+ # at start time we point to the main one
+ self.stats_panel = self.ports_stats_panel
+ self.stats_panel.panel.top()
panel.update_panels(); self.stdscr.refresh()
+ return
+
def wait_for_key_input (self):
ch = self.stdscr.getch()
- if (ch != curses.ERR):
- # stop/start status
- if (ch == ord('f')):
- self.update_active = not self.update_active
- self.add_log_event("Update continued" if self.update_active else "Update stopped")
-
- elif (ch == ord('p')):
- self.add_log_event("Pinging RPC server")
- rc, msg = self.rpc_client.ping_rpc_server()
- if rc:
- self.add_log_event("Server replied: '{0}'".format(msg))
- else:
- self.add_log_event("Failed to get reply")
-
- # c - clear stats
- elif (ch == ord('c')):
- self.add_log_event("Statistics cleared")
-
- elif (ch == ord('q')):
- return False
- else:
- self.add_log_event("Unknown key pressed {0}".format("'" + chr(ch) + "'" if chr(ch).isalpha() else ""))
+ # no key , continue
+ if ch == curses.ERR:
+ return True
+
+ # check for registered function
+ if ch in self.actions:
+ return self.actions[ch]()
+ else:
+ self.add_log_event("Unknown key pressed, please see legend")
return True
@@ -185,12 +441,17 @@ class TrexStatus():
if not rc:
break
- self.update_control()
- self.update_info()
+ self.server_info_panel.draw()
+ self.general_info_panel.draw()
+ self.control_panel.draw()
+
+ # can be different kinds of panels
+ self.stats_panel.panel.top()
+ self.stats_panel.draw()
panel.update_panels();
self.stdscr.refresh()
- sleep(0.1)
+ sleep(0.01)
def show_trex_status_internal (stdscr, rpc_client):
diff --git a/scripts/automation/trex_control_plane/doc/api/index.rst b/scripts/automation/trex_control_plane/doc/api/index.rst
index 8233a634..cfdc6917 100755
--- a/scripts/automation/trex_control_plane/doc/api/index.rst
+++ b/scripts/automation/trex_control_plane/doc/api/index.rst
@@ -1,9 +1,8 @@
API Reference
=============
-The T-Rex API reference section is currently a work in progress.
-**T-Rex Modules**
+**TRex Modules**
.. toctree::
:maxdepth: 4
@@ -11,7 +10,7 @@ The T-Rex API reference section is currently a work in progress.
client_code
exceptions
-**T-Rex JSON Template**
+**TRex JSON Template**
.. toctree::
:maxdepth: 4
diff --git a/scripts/automation/trex_control_plane/doc/api/json_fields.rst b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
index b1a2af7c..9e32d23e 100755
--- a/scripts/automation/trex_control_plane/doc/api/json_fields.rst
+++ b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
@@ -1,23 +1,23 @@
-T-Rex JSON Template
-===================
+TRex JSON Template
+==================
-Whenever T-Rex is publishing live data, it uses JSON notation to describe the data-object.
+Whenever TRex is publishing live data, it uses JSON notation to describe the data-object.
-Each client may parse it diffrently, however this page will describe the values meaning when published by T-Rex server.
+Each client may parse it differently, however this page will describe the values meaning when published by TRex server.
Main Fields
-----------
-Each T-Rex server-published JSON object contains data divided to main fields under which the actual data lays.
+Each TRex server-published JSON object contains data divided to main fields under which the actual data lays.
These main fields are:
+-----------------------------+----------------------------------------------------+---------------------------+
| Main field | Contains | Comments |
+=============================+====================================================+===========================+
-| :ref:`trex-global-field` | Must-have data on T-Rex run, | |
+| :ref:`trex-global-field` | Must-have data on TRex run, | |
| | mainly regarding Tx/Rx and packet drops | |
+-----------------------------+----------------------------------------------------+---------------------------+
| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
@@ -117,7 +117,7 @@ trex-global field
.. _tx-gen-field:
tx-gen field
-~~~~~~~~~~~~~~
+~~~~~~~~~~~~
+-------------------+-------+-----------------------------------------------------------+
| Sub-key | Type | Meaning |
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst b/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst
index f903feac..bff1ef7f 100755
--- a/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/examples.rst
@@ -2,232 +2,4 @@
Packet Builder Usage Examples
=============================
-Whenever TRex is publishing live data, it uses JSON notation to describe the data-object.
-
-Each client may parse it diffrently, however this page will describe the values meaning when published by TRex server.
-
-
-Main Fields
------------
-
-Each TRex server-published JSON object contains data divided to main fields under which the actual data lays.
-
-These main fields are:
-
-+-----------------------------+----------------------------------------------------+---------------------------+
-| Main field | Contains | Comments |
-+=============================+====================================================+===========================+
-| :ref:`trex-global-field` | Must-have data on TRex run, | |
-| | mainly regarding Tx/Rx and packet drops | |
-+-----------------------------+----------------------------------------------------+---------------------------+
-| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
-| | In case histogram is zero it means that all packets| |
-| | were injected in the right time. | |
-+-----------------------------+----------------------------------------------------+---------------------------+
-| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
-| | generated data and on response traffic | test is enabled (``l`` |
-| | | param) |
-| | | - *typo* on field key: |
-+-----------------------------+----------------------------------------------------+ will be fixed on next |
-| :ref:`trex-latecny-v2-field`| Extended latency information | release |
-+-----------------------------+----------------------------------------------------+---------------------------+
-
-
-Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
-
-For example, in order to access some trex-global data, the access path would look like::
-
- AllData -> trex-global -> data -> desired_info
-
-
-
-
-Detailed explanation
---------------------
-
-.. _trex-global-field:
-
-trex-global field
-~~~~~~~~~~~~~~~~~
-
-
-+--------------------------------+-------+-----------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+================================+=======+===========================================================+
-| m_cpu_util | float | CPU utilization (0-100) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_platform_factor | float | multiplier factor |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_bps | float | total tx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_rx_bps | float | total rx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_pps | float | total tx packet per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_cps | float | total tx connection per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_cps | float | expected tx connection per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_pps | float | expected tx packet per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_tx_expected_bps | float | expected tx bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_rx_drop_bps | float | drop rate in bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_active_flows | float | active trex flows |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_pkts | int | total tx in packets |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_rx_pkts | int | total rx in packets |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_bytes | int | total tx in bytes |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_rx_bytes | int | total rx in bytes |
-+--------------------------------+-------+-----------------------------------------------------------+
-| opackets-# | int | output packets (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| obytes-# | int | output bytes (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ipackets-# | int | input packet (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ibytes-# | int | input bytes (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| ierrors-# | int | input errors (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| oerrors-# | int | input errors (per interface) |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_tx_bps-# | float | total transmitted data in bit per second |
-+--------------------------------+-------+-----------------------------------------------------------+
-| unknown | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_learn_error [#f1]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_active [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_no_fid [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_time_out [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-| m_total_nat_open [#f2]_ | int | |
-+--------------------------------+-------+-----------------------------------------------------------+
-
-
-.. _tx-gen-field:
-
-tx-gen field
-~~~~~~~~~~~~~~
-
-+-------------------+-------+-----------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+===================+=======+===========================================================+
-| realtime-hist | dict | histogram of transmission. See extended information about |
-| | | histogram object under :ref:`histogram-object-fields`. |
-| | | The attribute analyzed is time packet has been sent |
-| | | before/after it was intended to be |
-+-------------------+-------+-----------------------------------------------------------+
-| unknown | int | |
-+-------------------+-------+-----------------------------------------------------------+
-
-.. _trex-latecny-field:
-
-trex-latecny field
-~~~~~~~~~~~~~~~~~~
-
-+---------+-------+---------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+=========+=======+=========================================================+
-| avg-# | float | average latency in usec (per interface) |
-+---------+-------+---------------------------------------------------------+
-| max-# | float | max latency in usec from the test start (per interface) |
-+---------+-------+---------------------------------------------------------+
-| c-max-# | float | max in the last 1 sec window (per interface) |
-+---------+-------+---------------------------------------------------------+
-| error-# | float | errors in latency packets (per interface) |
-+---------+-------+---------------------------------------------------------+
-| unknown | int | |
-+---------+-------+---------------------------------------------------------+
-
-.. _trex-latecny-v2-field:
-
-trex-latecny-v2 field
-~~~~~~~~~~~~~~~~~~~~~
-
-+--------------------------------------+-------+--------------------------------------+
-| Sub-key | Type | Meaning |
-+======================================+=======+======================================+
-| cpu_util | float | rx thread cpu % (this is not trex DP |
-| | | threads cpu%%) |
-+--------------------------------------+-------+--------------------------------------+
-| port-# | | Containing per interface |
-| | dict | information. See extended |
-| | | information under ``port-# -> |
-| | | key_name -> sub_key`` |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->hist | dict | histogram of latency. See extended |
-| | | information about histogram object |
-| | | under :ref:`histogram-object-fields`.|
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats | | Containing per interface |
-| | dict | information. See extended |
-| | | information under ``port-# -> |
-| | | key_name -> sub_key`` |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_no_magic | int | rx error with no magic |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_no_id | int | rx errors with no id |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_seq_error | int | error in seq number |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_length_error | int | |
-+--------------------------------------+-------+--------------------------------------+
-| port-#->stats->m_rx_check | int | packets tested in rx |
-+--------------------------------------+-------+--------------------------------------+
-| unknown | int | |
-+--------------------------------------+-------+--------------------------------------+
-
-
-
-.. _histogram-object-fields:
-
-Histogram object fields
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The histogram object is being used in number of place throughout the JSON object.
-The following section describes its fields in detail.
-
-
-+-----------+-------+-----------------------------------------------------------------------------------+
-| Sub-key | Type | Meaning |
-+===========+=======+===================================================================================+
-| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| max_usec | int | max attribute value in usec |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| high_cnt | int | how many packets on which its attribute > min_usec |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| cnt | int | total packets from test startup |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| s_avg | float | average value from test startup |
-+-----------+-------+-----------------------------------------------------------------------------------+
-| histogram | | histogram of relevant object by the following keys: |
-| | array | - key: value in usec |
-| | | - val: number of packets |
-+-----------+-------+-----------------------------------------------------------------------------------+
-
-
-Access Examples
----------------
-
-
-
-.. rubric:: Footnotes
-
-.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
-
-.. [#f2] Available only in NAT operation (``learn`` flag) \ No newline at end of file
+Here I'll add usage examples, very similar to those I added to RPC document \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
index 21efbee9..eb639f7c 100755
--- a/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
+++ b/scripts/automation/trex_control_plane/doc/packet_generator/stream_export.rst
@@ -12,9 +12,9 @@ The TRex Packet Builder module supports (using ___ method) the export of built s
Guidelines
----------
-1. The YAML file can either contain Byte representation of the packet of refer to a .pcap file that conatains it.
+1. The YAML file can either contain Byte representation of the packet of refer to a .pcap file that contains it.
2. The YAML file is similar as much as possible to the `add_stream method <http://trex-tgn.cisco.com/trex/doc/trex_rpc_server_spec.html#_add_stream>`_ of TRex RPC server spec, which defines the raw interaction with TRex server.
-3. Only packet binary data and VM instructinos are to be saved. Any meta-data packet builder module used while creating the packet will be stripped out.
+3. Only packet binary data and VM instructions are to be saved. Any meta-data packet builder module used while creating the packet will be stripped out.
Export Format
-------------
@@ -26,4 +26,4 @@ Export Format
Example
-------
-The following files snapshot represents each of the options.
+The following files snapshot represents each of the options (Binary/pcap) for the very same HTTP GET request packet.
diff --git a/scripts/automation/trex_control_plane/examples/client_interactive_example.py b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
index 05028463..9ee28898 100755
--- a/scripts/automation/trex_control_plane/examples/client_interactive_example.py
+++ b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
@@ -15,7 +15,7 @@ import errno
class InteractiveTRexClient(cmd.Cmd):
- intro = termstyle.green("\nInteractive shell to play with Cisco's T-Rex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ intro = termstyle.green("\nInteractive shell to play with Cisco's TRex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
prompt = '> '
def __init__(self, trex_host, max_history_size = 100, trex_port = 8090, verbose_mode = False ):
@@ -33,45 +33,45 @@ class InteractiveTRexClient(cmd.Cmd):
def do_push_files (self, filepaths):
- """Pushes a custom file to be stored locally on T-Rex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
+ """Pushes a custom file to be stored locally on TRex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
try:
filepaths = filepaths.split(' ')
- print termstyle.green("*** Starting pushing files ({trex_files}) to T-Rex. ***".format (trex_files = ', '.join(filepaths)) )
+ print termstyle.green("*** Starting pushing files ({trex_files}) to TRex. ***".format (trex_files = ', '.join(filepaths)) )
ret_val = self.trex.push_files(filepaths)
if ret_val:
- print termstyle.green("*** End of T-Rex push_files method (success) ***")
+ print termstyle.green("*** End of TRex push_files method (success) ***")
else:
- print termstyle.magenta("*** End of T-Rex push_files method (failed) ***")
+ print termstyle.magenta("*** End of TRex push_files method (failed) ***")
except IOError as inst:
print termstyle.magenta(inst)
def do_show_default_run_params(self,line):
- """Outputs the default T-Rex running parameters"""
+ """Outputs the default TRex running parameters"""
pprint(self.DEFAULT_RUN_PARAMS)
- print termstyle.green("*** End of default T-Rex running parameters ***")
+ print termstyle.green("*** End of default TRex running parameters ***")
def do_show_run_params(self,line):
- """Outputs the currently configured T-Rex running parameters"""
+ """Outputs the currently configured TRex running parameters"""
pprint(self.run_params)
- print termstyle.green("*** End of T-Rex running parameters ***")
+ print termstyle.green("*** End of TRex running parameters ***")
def do_update_run_params(self, json_str):
- """Updates provided parameters on T-Rex running configuration. Provide using JSON string"""
+ """Updates provided parameters on TRex running configuration. Provide using JSON string"""
if json_str:
try:
upd_params = self.decoder.decode(json_str)
self.run_params.update(upd_params)
- print termstyle.green("*** End of T-Rex parameters update ***")
+ print termstyle.green("*** End of TRex parameters update ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal JSON string. Please try again.\n[", inst,"]")
else:
print termstyle.magenta("JSON configuration string is missing. Please try again.")
def do_show_status (self, line):
- """Prompts T-Rex current status"""
+ """Prompts TRex current status"""
print self.trex.get_running_status()
- print termstyle.green("*** End of T-Rex status prompt ***")
+ print termstyle.green("*** End of TRex status prompt ***")
def do_show_trex_files_path (self, line):
"""Prompts the local path in which files are stored when pushed to t-rex server from client"""
@@ -79,43 +79,43 @@ class InteractiveTRexClient(cmd.Cmd):
print termstyle.green("*** End of trex_files_path prompt ***")
def do_show_reservation_status (self, line):
- """Prompts if T-Rex is currently reserved or not"""
+ """Prompts if TRex is currently reserved or not"""
if self.trex.is_reserved():
- print "T-Rex is reserved"
+ print "TRex is reserved"
else:
- print "T-Rex is NOT reserved"
+ print "TRex is NOT reserved"
print termstyle.green("*** End of reservation status prompt ***")
def do_reserve_trex (self, user):
- """Reserves the usage of T-Rex to a certain user"""
+ """Reserves the usage of TRex to a certain user"""
try:
if not user:
ret = self.trex.reserve_trex()
else:
ret = self.trex.reserve_trex(user.split(' ')[0])
- print termstyle.green("*** T-Rex reserved successfully ***")
+ print termstyle.green("*** TRex reserved successfully ***")
except TRexException as inst:
print termstyle.red(inst)
def do_cancel_reservation (self, user):
- """Cancels a current reservation of T-Rex to a certain user"""
+ """Cancels a current reservation of TRex to a certain user"""
try:
if not user:
ret = self.trex.cancel_reservation()
else:
ret = self.trex.cancel_reservation(user.split(' ')[0])
- print termstyle.green("*** T-Rex reservation canceled successfully ***")
+ print termstyle.green("*** TRex reservation canceled successfully ***")
except TRexException as inst:
print termstyle.red(inst)
def do_restore_run_default (self, line):
- """Restores original T-Rex running configuration"""
+ """Restores original TRex running configuration"""
self.run_params = dict(self.DEFAULT_RUN_PARAMS)
print termstyle.green("*** End of restoring default run parameters ***")
def do_run_until_finish (self, sample_rate):
- """Starts T-Rex and sample server until run is done."""
- print termstyle.green("*** Starting T-Rex run_until_finish scenario ***")
+ """Starts TRex and sample server until run is done."""
+ print termstyle.green("*** Starting TRex run_until_finish scenario ***")
if not sample_rate: # use default sample rate if not passed
sample_rate = 5
@@ -123,15 +123,15 @@ class InteractiveTRexClient(cmd.Cmd):
sample_rate = int(sample_rate)
ret = self.trex.start_trex(**self.run_params)
self.trex.sample_to_run_finish(sample_rate)
- print termstyle.green("*** End of T-Rex run ***")
+ print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
print termstyle.red(inst)
def do_run_and_poll (self, sample_rate):
- """Starts T-Rex and sample server manually until run is done."""
- print termstyle.green("*** Starting T-Rex run and manually poll scenario ***")
+ """Starts TRex and sample server manually until run is done."""
+ print termstyle.green("*** Starting TRex run and manually poll scenario ***")
if not sample_rate: # use default sample rate if not passed
sample_rate = 5
try:
@@ -145,7 +145,7 @@ class InteractiveTRexClient(cmd.Cmd):
# do WHATEVER here
time.sleep(sample_rate)
- print termstyle.green("*** End of T-Rex run ***")
+ print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
@@ -153,8 +153,8 @@ class InteractiveTRexClient(cmd.Cmd):
def do_run_until_condition (self, sample_rate):
- """Starts T-Rex and sample server until condition is satisfied."""
- print termstyle.green("*** Starting T-Rex run until condition is satisfied scenario ***")
+ """Starts TRex and sample server until condition is satisfied."""
+ print termstyle.green("*** Starting TRex run until condition is satisfied scenario ***")
def condition (result_obj):
return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000
@@ -166,55 +166,55 @@ class InteractiveTRexClient(cmd.Cmd):
ret = self.trex.start_trex(**self.run_params)
ret_val = self.trex.sample_until_condition(condition, sample_rate)
print ret_val
- print termstyle.green("*** End of T-Rex run ***")
+ print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
print termstyle.red(inst)
def do_start_and_return (self, line):
- """Start T-Rex run and once in 'Running' mode, return to cmd prompt"""
- print termstyle.green("*** Starting T-Rex run, wait until in 'Running' state ***")
+ """Start TRex run and once in 'Running' mode, return to cmd prompt"""
+ print termstyle.green("*** Starting TRex run, wait until in 'Running' state ***")
try:
ret = self.trex.start_trex(**self.run_params)
- print termstyle.green("*** End of scenario (T-Rex is probably still running!) ***")
+ print termstyle.green("*** End of scenario (TRex is probably still running!) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_poll_once (self, line):
- """Performs a single poll of T-Rex current data dump (if T-Rex is running) and prompts and short version of latest result_obj"""
- print termstyle.green("*** Trying T-Rex single poll ***")
+ """Performs a single poll of TRex current data dump (if TRex is running) and prompts and short version of latest result_obj"""
+ print termstyle.green("*** Trying TRex single poll ***")
try:
last_res = dict()
if self.trex.is_running(dump_out = last_res):
obj = self.trex.get_result_obj()
print obj
else:
- print termstyle.magenta("T-Rex isn't currently running.")
- print termstyle.green("*** End of scenario (T-Rex is posssibly still running!) ***")
+ print termstyle.magenta("TRex isn't currently running.")
+ print termstyle.green("*** End of scenario (TRex is posssibly still running!) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_stop_trex (self, line):
- """Try to stop T-Rex run (if T-Rex is currently running)"""
- print termstyle.green("*** Starting T-Rex termination ***")
+ """Try to stop TRex run (if TRex is currently running)"""
+ print termstyle.green("*** Starting TRex termination ***")
try:
ret = self.trex.stop_trex()
- print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ print termstyle.green("*** End of scenario (TRex is not running now) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_kill_indiscriminately (self, line):
- """Force killing of running T-Rex process (if exists) on the server."""
- print termstyle.green("*** Starting T-Rex termination ***")
+ """Force killing of running TRex process (if exists) on the server."""
+ print termstyle.green("*** Starting TRex termination ***")
ret = self.trex.force_kill()
if ret:
- print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ print termstyle.green("*** End of scenario (TRex is not running now) ***")
elif ret is None:
- print termstyle.magenta("*** End of scenario (T-Rex termination aborted) ***")
+ print termstyle.magenta("*** End of scenario (TRex termination aborted) ***")
else:
- print termstyle.red("*** End of scenario (T-Rex termination failed) ***")
+ print termstyle.red("*** End of scenario (TRex termination failed) ***")
def do_exit(self, arg):
"""Quits the application"""
@@ -223,20 +223,20 @@ class InteractiveTRexClient(cmd.Cmd):
if __name__ == "__main__":
- parser = ArgumentParser(description = termstyle.cyan('Run T-Rex client API demos and scenarios.'),
+ parser = ArgumentParser(description = termstyle.cyan('Run TRex client API demos and scenarios.'),
usage = """client_interactive_example [options]""" )
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
- action="store", help="Specify the hostname or ip to connect with T-Rex server.",
+ action="store", help="Specify the hostname or ip to connect with TRex server.",
metavar="HOST" )
parser.add_argument("-p", "--trex-port", type=int, default = 8090, metavar="PORT", dest="trex_port",
- help="Select port on which the T-Rex server listens. Default port is 8090.", action="store")
+ help="Select port on which the TRex server listens. Default port is 8090.", action="store")
parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
help="Specify maximum history size saved at client side. Default size is 100.", action="store")
parser.add_argument("--verbose", dest="verbose",
- action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
default = False )
args = parser.parse_args()
@@ -248,7 +248,7 @@ if __name__ == "__main__":
exit(-1)
except socket.error, e:
if e.errno == errno.ECONNREFUSED:
- raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was terminated. Please make sure the server is up.")
+ raise socket.error(errno.ECONNREFUSED, "Connection from TRex server was terminated. Please make sure the server is up.")
diff --git a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
index 7e7f6139..acaa95d3 100755
--- a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
+++ b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
@@ -2,7 +2,7 @@
######################################################################################
### ###
-### T-Rex end-to-end demo script, written by T-Rex dev-team ###
+### TRex end-to-end demo script, written by TRex dev-team ###
### THIS SCRIPT ASSUMES PyYaml and Scapy INSTALLED ON PYTHON'S RUNNING MACHINE ###
### (for any question please contact trex-dev team @ trex-dev@cisco.com) ###
### ###
@@ -33,13 +33,13 @@ def pkts_to_pcap (pcap_filename, packets):
def main (args):
- # instantiate T-Rex client
+ # instantiate TRex client
trex = CTRexClient('trex-dan', verbose = args.verbose)
if args.steps:
print "\nNext step: .pcap generation."
raw_input("Press Enter to continue...")
- # generate T-Rex traffic.
+ # generate TRex traffic.
pkts = generate_dns_packets('21.0.0.2', '22.0.0.12') # In this case - DNS traffic (request-response)
print "\ngenerated traffic:"
print "=================="
@@ -50,7 +50,7 @@ def main (args):
print "\nNext step: .yaml generation."
raw_input("Press Enter to continue...")
# Generate .yaml file that uses the generated .pcap file
- trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on T-Rex server
+ trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on TRex server
yaml_obj = CTRexYaml(trex_files_path) # instantiate CTRexYaml obj
# set .yaml file parameters according to need and use
@@ -65,12 +65,12 @@ def main (args):
yaml_obj.dump()
if args.steps:
- print "\nNext step: run T-Rex with provided files."
+ print "\nNext step: run TRex with provided files."
raw_input("Press Enter to continue...")
# push all relevant files to server
trex.push_files( yaml_obj.get_file_list() )
- print "\nStarting T-Rex..."
+ print "\nStarting TRex..."
trex.start_trex(c = 2,
m = 1.5,
nc = True,
@@ -80,8 +80,8 @@ def main (args):
l = 1000)
if args.verbose:
- print "T-Rex state changed to 'Running'."
- print "Sampling T-Rex in 0.2 samples/sec (single sample every 5 secs)"
+ print "TRex state changed to 'Running'."
+ print "Sampling TRex in 0.2 samples/sec (single sample every 5 secs)"
last_res = dict()
while trex.is_running(dump_out = last_res):
@@ -92,14 +92,14 @@ def main (args):
if __name__ == "__main__":
- parser = ArgumentParser(description = 'Run T-Rex client API end-to-end example.',
+ parser = ArgumentParser(description = 'Run TRex client API end-to-end example.',
usage = """pkt_generation_for_trex [options]""" )
parser.add_argument("-s", "--step-by-step", dest="steps",
action="store_false", help="Switch OFF step-by-step script overview. Default is: ON.",
default = True )
parser.add_argument("--verbose", dest="verbose",
- action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
default = False )
args = parser.parse_args()
main(args) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
index 1813ed48..734fa22e 100755
--- a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
+++ b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
@@ -97,9 +97,9 @@ class ExtendedDaemonRunner(runner.DaemonRunner):
@staticmethod
def _show(self):
if self.pidfile.is_locked():
- print termstyle.red("T-Rex server daemon is running")
+ print termstyle.red("TRex server daemon is running")
else:
- print termstyle.red("T-Rex server daemon is NOT running")
+ print termstyle.red("TRex server daemon is NOT running")
def do_action(self):
self.__prevent_duplicate_runs()
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server.py b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
index 5032423a..ec07cb8a 100755
--- a/scripts/automation/trex_control_plane/server/trex_daemon_server.py
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
@@ -54,7 +54,7 @@ def main ():
logger.addHandler(handler)
except EnvironmentError, e:
if e.errno == errno.EACCES: # catching permission denied error
- print "Launching user must have sudo privileges in order to run T-Rex daemon.\nTerminating daemon process."
+ print "Launching user must have sudo privileges in order to run TRex daemon.\nTerminating daemon process."
exit(-1)
try:
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
index b4be60a9..59c382ea 100755
--- a/scripts/automation/trex_control_plane/server/trex_launch_thread.py
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -33,44 +33,44 @@ class AsynchronousTRexSession(threading.Thread):
with open(os.devnull, 'w') as DEVNULL:
self.time_stamps['start'] = self.time_stamps['run_time'] = time.time()
self.session = subprocess.Popen("exec "+self.cmd, cwd = self.launch_path, shell=True, stdin = DEVNULL, stderr = subprocess.PIPE, preexec_fn=os.setsid)
- logger.info("T-Rex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
+ logger.info("TRex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
while self.session.poll() is None: # subprocess is NOT finished
time.sleep(0.5)
if self.stoprequest.is_set():
- logger.debug("Abort request received by handling thread. Terminating T-Rex session." )
+ logger.debug("Abort request received by handling thread. Terminating TRex session." )
os.killpg(self.session.pid, signal.SIGUSR1)
self.trexObj.set_status(TRexStatus.Idle)
- self.trexObj.set_verbose_status("T-Rex is Idle")
+ self.trexObj.set_verbose_status("TRex is Idle")
break
self.time_stamps['run_time'] = time.time() - self.time_stamps['start']
try:
if self.time_stamps['run_time'] < 5:
- logger.error("T-Rex run failed due to wrong input parameters, or due to reachability issues.")
- self.trexObj.set_verbose_status("T-Rex run failed due to wrong input parameters, or due to reachability issues.\n\nT-Rex command: {cmd}\n\nRun output:\n{output}".format(
+ logger.error("TRex run failed due to wrong input parameters, or due to readability issues.")
+ self.trexObj.set_verbose_status("TRex run failed due to wrong input parameters, or due to readability issues.\n\nTRex command: {cmd}\n\nRun output:\n{output}".format(
cmd = self.cmd, output = self.load_trex_output(self.export_path)))
self.trexObj.errcode = -11
elif (self.session.returncode is not None and self.session.returncode < 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
if (self.session.returncode is not None and self.session.returncode < 0):
- logger.debug("Failed T-Rex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
+ logger.debug("Failed TRex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
- logger.debug("Failed T-Rex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
+ logger.debug("Failed TRex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
- logger.warning("T-Rex run was terminated unexpectedly by outer process or by the hosting OS")
- self.trexObj.set_verbose_status("T-Rex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
+ logger.warning("TRex run was terminated unexpectedly by outer process or by the hosting OS")
+ self.trexObj.set_verbose_status("TRex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
output = self.load_trex_output(self.export_path)))
self.trexObj.errcode = -15
else:
- logger.info("T-Rex run session finished.")
- self.trexObj.set_verbose_status('T-Rex finished.')
+ logger.info("TRex run session finished.")
+ self.trexObj.set_verbose_status('TRex finished.')
self.trexObj.errcode = None
finally:
self.trexObj.set_status(TRexStatus.Idle)
logger.info("TRex running state changed to 'Idle'.")
self.trexObj.expect_trex.clear()
- logger.debug("Finished handling a single run of T-Rex.")
+ logger.debug("Finished handling a single run of TRex.")
self.trexObj.zmq_dump = None
def join (self, timeout = None):
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
index 35b2669a..1e5098fb 100755
--- a/scripts/automation/trex_control_plane/server/trex_server.py
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -34,7 +34,7 @@ CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
class CTRexServer(object):
- """This class defines the server side of the RESTfull interaction with T-Rex"""
+ """This class defines the server side of the RESTfull interaction with TRex"""
DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
@@ -53,7 +53,7 @@ class CTRexServer(object):
the port number on which trex's zmq module will interact with daemon server
default value: 4500
- Instantiate a T-Rex client object, and connecting it to listening daemon-server
+ Instantiate a TRex client object, and connecting it to listening daemon-server
"""
self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
@@ -94,17 +94,17 @@ class CTRexServer(object):
"""This method fires up the daemon server based on initialized parameters of the class"""
# initialize the server instance with given reasources
try:
- print "Firing up T-Rex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
- logger.info("Firing up T-Rex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
+ print "Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
+ logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
logger.info("current working dir is: {0}".format(self.TREX_PATH) )
logger.info("current files dir is : {0}".format(self.trex_files_path) )
logger.debug("Starting TRex server. Registering methods to process.")
self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
- logger.error("T-Rex server requested address already in use. Aborting server launching.")
- print "T-Rex server requested address already in use. Aborting server launching."
- raise socket.error(errno.EADDRINUSE, "T-Rex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
+ logger.error("TRex server requested address already in use. Aborting server launching.")
+ print "TRex server requested address already in use. Aborting server launching."
+ raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
# set further functionality and peripherals to server instance
try:
@@ -136,7 +136,7 @@ class CTRexServer(object):
def stop_handler (self, signum, frame):
logger.info("Daemon STOP request detected.")
if self.is_running():
- # in case T-Rex process is currently running, stop it before terminating server process
+ # in case TRex process is currently running, stop it before terminating server process
self.stop_trex(self.trex.get_seq())
sys.exit(0)
@@ -163,25 +163,25 @@ class CTRexServer(object):
def reserve_trex (self, user):
if user == "":
- logger.info("T-Rex reservation cannot apply to empty string user. Request denied.")
- return Fault(-33, "T-Rex reservation cannot apply to empty string user. Request denied.")
+ logger.info("TRex reservation cannot apply to empty string user. Request denied.")
+ return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
with self.start_lock:
logger.info("Processing reserve_trex() command.")
if self.is_reserved():
if user == self.__reservation['user']:
# return True is the same user is asking and already has the resrvation
- logger.info("the same user is asking and already has the resrvation. Re-reserving T-Rex.")
+ logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
return True
- logger.info("T-Rex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
- return Fault(-33, "T-Rex is already reserved to another user ({res_user}). Please make sure T-Rex is free before reserving it.".format(
+ logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
+ return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
res_user = self.__reservation['user']) ) # raise at client TRexInUseError
elif self.trex.get_status() != TRexStatus.Idle:
- logger.info("T-Rex is currently running, cannot reserve T-Rex unless in Idle state.")
- return Fault(-13, 'T-Rex is currently running, cannot reserve T-Rex unless in Idle state. Please try again when T-Rex run finished.') # raise at client TRexInUseError
+ logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
+ return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.') # raise at client TRexInUseError
else:
- logger.info("T-Rex is now reserved for user ({res_user}).".format( res_user = user ))
+ logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
self.__reservation = {'user' : user, 'since' : time.ctime()}
logger.debug("Reservation details: "+ str(self.__reservation))
return True
@@ -191,15 +191,15 @@ class CTRexServer(object):
logger.info("Processing cancel_reservation() command.")
if self.is_reserved():
if self.__reservation['user'] == user:
- logger.info("T-Rex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
+ logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
self.__reservation = None
return True
else:
- logger.warning("T-Rex is reserved to different user than the provided one. Reservation wasn't canceled.")
+ logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
else:
- logger.info("T-Rex is not reserved to anyone. No need to cancel anything")
+ logger.info("TRex is not reserved to anyone. No need to cancel anything")
assert(self.__reservation is None)
return False
@@ -208,21 +208,21 @@ class CTRexServer(object):
with self.start_lock:
logger.info("Processing start_trex() command.")
if self.is_reserved():
- # check if this is not the user to which T-Rex is reserved
+ # check if this is not the user to which TRex is reserved
if self.__reservation['user'] != user:
- logger.info("T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
- return Fault(-33, "T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
+ logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
+ return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
elif self.trex.get_status() != TRexStatus.Idle:
- logger.info("T-Rex is already taken, cannot create another run until done.")
+ logger.info("TRex is already taken, cannot create another run until done.")
return Fault(-13, '') # raise at client TRexInUseError
try:
server_cmd_data = self.generate_run_cmd(**trex_cmd_options)
self.zmq_monitor.first_dump = True
self.trex.start_trex(self.TREX_PATH, server_cmd_data)
- logger.info("T-Rex session has been successfully initiated.")
+ logger.info("TRex session has been successfully initiated.")
if block_to_success:
- # delay server response until T-Rex is at 'Running' state.
+ # delay server response until TRex is at 'Running' state.
start_time = time.time()
trex_state = None
while (time.time() - start_time) < timeout :
@@ -232,20 +232,20 @@ class CTRexServer(object):
else:
time.sleep(0.5)
- # check for T-Rex run started normally
+ # check for TRex run started normally
if trex_state == TRexStatus.Starting: # reached timeout
- logger.warning("TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.")
- return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
elif trex_state == TRexStatus.Idle:
return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
- # reach here only if T-Rex is at 'Running' state
+ # reach here only if TRex is at 'Running' state
self.trex.gen_seq()
return self.trex.get_seq() # return unique seq number to client
except TypeError as e:
- logger.error("T-Rex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
- raise TypeError('T-Rex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
+ logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
+ raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
def stop_trex(self, seq):
@@ -262,11 +262,11 @@ class CTRexServer(object):
return False
def force_trex_kill (self):
- logger.info("Processing force_trex_kill() command. --> Killing T-Rex session indiscriminately.")
+ logger.info("Processing force_trex_kill() command. --> Killing TRex session indiscriminately.")
return self.trex.stop_trex()
def wait_until_kickoff_finish (self, timeout = 40):
- # block until T-Rex exits Starting state
+ # block until TRex exits Starting state
logger.info("Processing wait_until_kickoff_finish() command.")
trex_state = None
start_time = time.time()
@@ -274,7 +274,7 @@ class CTRexServer(object):
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
return
- return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
def get_running_info (self):
logger.info("Processing get_running_info() command.")
@@ -283,7 +283,7 @@ class CTRexServer(object):
def generate_run_cmd (self, f, d, iom = 0, export_path="/tmp/trex.txt", **kwargs):
""" generate_run_cmd(self, trex_cmd_options, export_path) -> str
- Generates a custom running command for the kick-off of the T-Rex traffic generator.
+ Generates a custom running command for the kick-off of the TRex traffic generator.
Returns a tuple of command (string) and export path (string) to be issued on the trex server
Parameters
@@ -325,14 +325,14 @@ class CTRexServer(object):
def __check_trex_path_validity(self):
# check for executable existance
if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
- print "The provided T-Rex path do not contain an executable T-Rex file.\nPlease check the path and retry."
- logger.error("The provided T-Rex path do not contain an executable T-Rex file")
+ print "The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry."
+ logger.error("The provided TRex path do not contain an executable TRex file")
exit(-1)
# check for executable permissions
st = os.stat(self.TREX_PATH+'/t-rex-64')
if not bool(st.st_mode & (stat.S_IXUSR ) ):
- print "The provided T-Rex path do not contain an T-Rex file with execution privileges.\nPlease check the files permissions and retry."
- logger.error("The provided T-Rex path do not contain an T-Rex file with execution privileges")
+ print "The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry."
+ logger.error("The provided TRex path do not contain an TRex file with execution privileges")
exit(-1)
else:
return
@@ -357,7 +357,7 @@ class CTRexServer(object):
class CTRex(object):
def __init__(self):
self.status = TRexStatus.Idle
- self.verbose_status = 'T-Rex is Idle'
+ self.verbose_status = 'TRex is Idle'
self.errcode = None
self.session = None
self.zmq_monitor = None
@@ -388,34 +388,34 @@ class CTRex(object):
if self.status == TRexStatus.Running:
return self.encoder.encode(self.zmq_dump)
else:
- logger.info("T-Rex isn't running. Running information isn't available.")
+ logger.info("TRex isn't running. Running information isn't available.")
if self.status == TRexStatus.Idle:
if self.errcode is not None: # some error occured
- logger.info("T-Rex is in Idle state, with errors. returning fault")
+ logger.info("TRex is in Idle state, with errors. returning fault")
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
else:
- logger.info("T-Rex is in Idle state, no errors. returning {}")
+ logger.info("TRex is in Idle state, no errors. returning {}")
return u'{}'
- return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating T-Rex is back to Idle state or still in Starting state
+ return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
def stop_trex(self):
if self.status == TRexStatus.Idle:
# t-rex isn't running, nothing to abort
- logger.info("T-Rex isn't running. No need to stop anything.")
- if self.errcode is not None: # some error occured, notify client despite T-Rex already stopped
+ logger.info("TRex isn't running. No need to stop anything.")
+ if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
return False
else:
# handle stopping t-rex's run
self.session.join()
- logger.info("T-Rex session has been successfully aborted.")
+ logger.info("TRex session has been successfully aborted.")
return True
def start_trex(self, trex_launch_path, trex_cmd):
self.set_status(TRexStatus.Starting)
logger.info("TRex running state changed to 'Starting'.")
- self.set_verbose_status('T-Rex is starting (data is not available yet)')
+ self.set_verbose_status('TRex is starting (data is not available yet)')
self.errcode = None
self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
@@ -430,7 +430,7 @@ def generate_trex_parser ():
default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
- parser = ArgumentParser(description = 'Run server application for T-Rex traffic generator',
+ parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
formatter_class = RawTextHelpFormatter,
usage = """
trex_daemon_server [options]
@@ -440,10 +440,10 @@ trex_daemon_server [options]
parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
- action="store", help="Select port on which the ZMQ module listens to T-Rex.\nDefault port is 4500.", metavar="PORT",
+ action="store", help="Select port on which the ZMQ module listens to TRex.\nDefault port is 4500.", metavar="PORT",
default = 4500)
parser.add_argument("-t", "--trex-path", dest="trex_path",
- action="store", help="Specify the compiled T-Rex directory from which T-Rex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
+ action="store", help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
metavar="PATH", default = default_path )
parser.add_argument("-f", "--files-path", dest="files_path",
action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
index 7a278af8..db9bf7da 100755
--- a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
+++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
@@ -22,7 +22,7 @@ class ZmqMonitorSession(threading.Thread):
self.zmq_port = zmq_port
self.zmq_publisher = "tcp://localhost:{port}".format(port=self.zmq_port)
self.trexObj = trexObj
- self.expect_trex = self.trexObj.expect_trex # used to signal if T-Rex is expected to run and if data should be considered
+ self.expect_trex = self.trexObj.expect_trex # used to signal if TRex is expected to run and if data should be considered
self.decoder = JSONDecoder()
logger.info("ZMQ monitor initialization finished")
@@ -69,7 +69,7 @@ class ZmqMonitorSession(threading.Thread):
# change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
self.first_dump = False
self.trexObj.set_status(TRexStatus.Running)
- self.trexObj.set_verbose_status("T-Rex is Running")
+ self.trexObj.set_verbose_status("TRex is Running")
logger.info("First ZMQ dump received and successfully parsed. TRex running state changed to 'Running'.")
diff --git a/scripts/avl/sfr_branch_profile_delay_10.yaml b/scripts/avl/sfr_branch_profile_delay_10.yaml
index 71e69212..04671b3e 100755
--- a/scripts/avl/sfr_branch_profile_delay_10.yaml
+++ b/scripts/avl/sfr_branch_profile_delay_10.yaml
@@ -4,7 +4,7 @@
clients_start : "16.0.0.1"
clients_end : "16.0.1.255"
servers_start : "48.0.0.1"
- servers_end : "48.0.62.255"
+ servers_end : "48.0.63.224"
clients_per_gb : 201
min_clients : 101
dual_port_mask : "1.0.0.0"
diff --git a/scripts/avl/sfr_delay_10.yaml b/scripts/avl/sfr_delay_10.yaml
index 1a3f82c3..2bb70fe4 100755
--- a/scripts/avl/sfr_delay_10.yaml
+++ b/scripts/avl/sfr_delay_10.yaml
@@ -4,7 +4,7 @@
clients_start : "16.0.0.1"
clients_end : "16.0.1.255"
servers_start : "48.0.0.1"
- servers_end : "48.0.20.255"
+ servers_end : "48.0.21.245"
clients_per_gb : 201
min_clients : 101
dual_port_mask : "1.0.0.0"
diff --git a/scripts/avl/sfr_delay_10_1g.yaml b/scripts/avl/sfr_delay_10_1g.yaml
index 925531fd..065fe855 100755
--- a/scripts/avl/sfr_delay_10_1g.yaml
+++ b/scripts/avl/sfr_delay_10_1g.yaml
@@ -4,7 +4,7 @@
clients_start : "16.0.0.1"
clients_end : "16.0.1.255"
servers_start : "48.0.0.1"
- servers_end : "48.0.20.255"
+ servers_end : "48.0.21.245"
clients_per_gb : 201
min_clients : 101
dual_port_mask : "1.0.0.0"
diff --git a/scripts/cap2/per_template_gen1.yaml b/scripts/cap2/per_template_gen1.yaml
new file mode 100644
index 00000000..41332518
--- /dev/null
+++ b/scripts/cap2/per_template_gen1.yaml
@@ -0,0 +1,40 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ generator_clients :
+ - name : "c1"
+ distribution : "seq"
+ ip_start : "26.0.0.1"
+ ip_end : "26.0.1.255"
+ - name : "c2"
+ distribution : "seq"
+ ip_start : "36.0.0.1"
+ ip_end : "36.0.1.255"
+ generator_servers :
+ - name : "s1"
+ distribution : "seq"
+ ip_start : "28.0.0.1"
+ ip_end : "28.0.1.255"
+ track_ports : false
+ - name : "s2"
+ distribution : "seq"
+ ip_start : "38.0.0.1"
+ ip_end : "38.0.1.255"
+ track_ports : false
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/per_template_gen2.yaml b/scripts/cap2/per_template_gen2.yaml
new file mode 100644
index 00000000..3349087c
--- /dev/null
+++ b/scripts/cap2/per_template_gen2.yaml
@@ -0,0 +1,41 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "20.0.0.1"
+ clients_end : "20.0.0.255"
+ servers_start : "90.0.0.1"
+ servers_end : "90.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ generator_clients :
+ - name : "c1"
+ distribution : "seq"
+ ip_start : "16.0.0.1"
+ ip_end : "16.0.1.255"
+ - name : "c2"
+ distribution : "seq"
+ ip_start : "36.0.0.1"
+ ip_end : "36.0.1.255"
+ generator_servers :
+ - name : "s1"
+ distribution : "seq"
+ ip_start : "48.0.0.1"
+ ip_end : "48.0.1.255"
+ track_ports : false
+ - name : "s2"
+ distribution : "seq"
+ ip_start : "38.0.0.1"
+ ip_end : "38.0.1.255"
+ track_ports : false
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ client_pool: "c1"
+ server_pool: "s1"
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
diff --git a/scripts/cap2/per_template_gen3.yaml b/scripts/cap2/per_template_gen3.yaml
new file mode 100644
index 00000000..2bf428d0
--- /dev/null
+++ b/scripts/cap2/per_template_gen3.yaml
@@ -0,0 +1,41 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "20.0.0.1"
+ clients_end : "20.0.0.255"
+ servers_start : "90.0.0.1"
+ servers_end : "90.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ generator_clients :
+ - name : "c1"
+ distribution : "seq"
+ ip_start : "26.0.0.1"
+ ip_end : "26.0.1.255"
+ - name : "c2"
+ distribution : "seq"
+ ip_start : "36.0.0.1"
+ ip_end : "36.0.1.255"
+ generator_servers :
+ - name : "s1"
+ distribution : "seq"
+ ip_start : "28.0.0.1"
+ ip_end : "28.0.1.255"
+ track_ports : false
+ - name : "s2"
+ distribution : "seq"
+ ip_start : "38.0.0.1"
+ ip_end : "38.0.1.255"
+ track_ports : false
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ client_pool: "c2"
+ server_pool: "s2"
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
diff --git a/scripts/cap2/per_template_gen4.yaml b/scripts/cap2/per_template_gen4.yaml
new file mode 100644
index 00000000..8a8a61b9
--- /dev/null
+++ b/scripts/cap2/per_template_gen4.yaml
@@ -0,0 +1,41 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "20.0.0.1"
+ clients_end : "20.0.0.255"
+ servers_start : "90.0.0.1"
+ servers_end : "90.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ generator_clients :
+ - name : "c1"
+ distribution : "seq"
+ ip_start : "26.0.0.1"
+ ip_end : "26.0.1.255"
+ - name : "c2"
+ distribution : "seq"
+ ip_start : "36.0.0.1"
+ ip_end : "36.0.1.255"
+ generator_servers :
+ - name : "s1"
+ distribution : "seq"
+ ip_start : "28.0.0.1"
+ ip_end : "28.0.1.255"
+ track_ports : false
+ - name : "s2"
+ distribution : "seq"
+ ip_start : "38.0.0.1"
+ ip_end : "38.0.1.255"
+ track_ports : false
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ client_pool: "c2"
+ server_pool: "s1"
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
diff --git a/scripts/cap2/per_template_gen5.yaml b/scripts/cap2/per_template_gen5.yaml
new file mode 100755
index 00000000..e29a2bfc
--- /dev/null
+++ b/scripts/cap2/per_template_gen5.yaml
@@ -0,0 +1,51 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "20.0.0.1"
+ clients_end : "20.0.0.255"
+ servers_start : "90.0.0.1"
+ servers_end : "90.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ generator_clients :
+ - name : "c1"
+ distribution : "seq"
+ ip_start : "26.0.0.1"
+ ip_end : "26.0.1.255"
+ - name : "c2"
+ distribution : "seq"
+ ip_start : "36.0.0.1"
+ ip_end : "36.0.1.254"
+ generator_servers :
+ - name : "s1"
+ distribution : "seq"
+ ip_start : "28.0.0.1"
+ ip_end : "28.0.1.255"
+ track_ports : false
+ - name : "s2"
+ distribution : "seq"
+ ip_start : "38.0.0.1"
+ ip_end : "38.0.3.255"
+ track_ports : false
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ client_pool: "c2"
+ server_pool: "s1"
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ client_pool: "c1"
+ server_pool: "s2"
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+
diff --git a/scripts/trex-console b/scripts/trex-console
index 6eab77dd..22a47eb8 100755
--- a/scripts/trex-console
+++ b/scripts/trex-console
@@ -1,2 +1,2 @@
#!/bin/bash
-../scripts/automation/trex_control_plane/console/trex_console.py $@
+automation/trex_control_plane/console/trex_console.py $@
diff --git a/src/bp_gtest.cpp b/src/bp_gtest.cpp
index 78efbecb..a529d637 100755
--- a/src/bp_gtest.cpp
+++ b/src/bp_gtest.cpp
@@ -223,10 +223,10 @@ public:
if ( m_req_ports ){
int i;
- fl.m_threads_info[0]->m_smart_gen.FreePort(tuple.getClient(),tuple.getClientPort());
+ fl.m_threads_info[0]->m_smart_gen.FreePort(0, tuple.getClientId(),tuple.getClientPort());
for (i=0 ; i<m_req_ports;i++) {
- fl.m_threads_info[0]->m_smart_gen.FreePort(tuple.getClient(),ports[i]);
+ fl.m_threads_info[0]->m_smart_gen.FreePort(0,tuple.getClientId(),ports[i]);
}
delete []ports;
}
@@ -527,6 +527,44 @@ TEST_F(basic, sfr4) {
EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
}
+TEST_F(basic, per_template_gen1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/per_template_gen1.yaml";
+ po->out_file ="exp/sfr_4";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+TEST_F(basic, per_template_gen2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/per_template_gen2.yaml";
+ po->out_file ="exp/sfr_4";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+/*
+TEST_F(basic, sfr5) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sfr5.yaml";
+ po->out_file ="exp/sfr_5";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+*/
+
TEST_F(basic, ipv6_convert) {
diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp
index 8a8bc5f9..7cbeb09d 100755
--- a/src/bp_sim.cpp
+++ b/src/bp_sim.cpp
@@ -2372,6 +2372,18 @@ void operator >> (const YAML::Node& node, CVlanYamlInfo & fi) {
void operator >> (const YAML::Node& node, CFlowYamlInfo & fi) {
node["name"] >> fi.m_name;
+
+ try {
+ node["client_pool"] >> fi.m_client_pool_name;
+ } catch ( const std::exception& e ) {
+ fi.m_client_pool_name = "default";
+ }
+ try {
+ node["server_pool"] >> fi.m_server_pool_name;
+ } catch ( const std::exception& e ) {
+ fi.m_server_pool_name = "default";
+ }
+
node["cps"] >> fi.m_k_cps;
fi.m_k_cps = fi.m_k_cps/1000.0;
double t;
@@ -2470,7 +2482,7 @@ void operator >> (const YAML::Node& node, CFlowsYamlInfo & flows_info) {
node["generator"] >> flows_info.m_tuple_gen;
flows_info.m_tuple_gen_was_set =true;
} catch ( const std::exception& e ) {
- flows_info.m_tuple_gen_was_set =false;
+ flows_info.m_tuple_gen_was_set =false;
}
@@ -2587,6 +2599,10 @@ void operator >> (const YAML::Node& node, CFlowsYamlInfo & flows_info) {
for(unsigned i=0;i<cap_info.size();i++) {
CFlowYamlInfo fi;
cap_info[i] >> fi;
+ fi.m_client_pool_idx =
+ flows_info.m_tuple_gen.get_client_pool_id(fi.m_client_pool_name);
+ fi.m_server_pool_idx =
+ flows_info.m_tuple_gen.get_server_pool_id(fi.m_server_pool_name);
flows_info.m_vec.push_back(fi);
}
}
@@ -2599,7 +2615,6 @@ void CVlanYamlInfo::Dump(FILE *fd){
void CFlowsYamlInfo::Dump(FILE *fd){
fprintf(fd," duration : %f sec \n",m_duration_sec);
- m_tuple_gen.Dump(fd);
fprintf(fd,"\n");
if (CGlobalInfo::is_ipv6_enable()) {
@@ -2703,6 +2718,38 @@ bool CFlowsYamlInfo::verify_correctness(uint32_t num_threads) {
if ( !m_tuple_gen.is_valid(num_threads,is_any_plugin_configured()) ){
return (false);
}
+ /* patch defect trex-54 */
+ if ( is_any_plugin_configured() ){
+ /*Plugin is configured. in that case due to a limitation ( defect trex-54 )
+ the number of servers should be bigger than number of clients */
+
+ int i;
+ for (i=0; i<(int)m_vec.size(); i++) {
+ CFlowYamlInfo * lp=&m_vec[i];
+ if ( lp->m_plugin_id ){
+ uint8_t c_idx = lp->m_client_pool_idx;
+ uint8_t s_idx = lp->m_server_pool_idx;
+ uint32_t total_clients = m_tuple_gen.m_client_pool[c_idx].getTotalIps();
+ uint32_t total_servers = m_tuple_gen.m_server_pool[s_idx].getTotalIps();
+ if ( total_servers < total_clients ){
+ printf(" Plugin is configured. in that case due to a limitation ( defect trex-54 ) \n");
+ printf(" the number of servers should be bigger than number of clients \n");
+ printf(" client_pool_name : %s \n", lp->m_client_pool_name.c_str());
+ printf(" server_pool_name : %s \n", lp->m_server_pool_name.c_str());
+ return (false);
+ }
+ uint32_t mul = total_servers / total_clients;
+ uint32_t new_server_num = mul * total_clients;
+ if ( new_server_num != total_servers ) {
+ printf(" Plugin is configured. in that case due to a limitation ( defect trex-54 ) \n");
+ printf(" the number of servers should be exact multiplication of the number of clients \n");
+ printf(" client_pool_name : %s clients %d \n", lp->m_client_pool_name.c_str(),total_clients);
+ printf(" server_pool_name : %s servers %d should be %d \n", lp->m_server_pool_name.c_str(),total_servers,new_server_num);
+ return (false);
+ }
+ }
+ }
+ }
return(true);
}
@@ -2815,18 +2862,20 @@ bool CFlowGeneratorRecPerThread::Create(CTupleGeneratorSmart * global_gen,
CFlowsYamlInfo * yaml_flow_info,
CCapFileFlowInfo * flow_info,
uint16_t _id,
- uint32_t thread_id ){
+ uint32_t thread_id){
BP_ASSERT(info);
m_thread_id =thread_id ;
- tuple_gen.Create(global_gen);
- CTupleGenYamlInfo * lpt=&yaml_flow_info->m_tuple_gen;
+ tuple_gen.Create(global_gen, info->m_client_pool_idx,
+ info->m_server_pool_idx);
+ CTupleGenYamlInfo * lpt;
+ lpt = &yaml_flow_info->m_tuple_gen;
tuple_gen.SetSingleServer(info->m_one_app_server,
info->m_server_addr,
getDualPortId(thread_id),
- lpt->m_dual_interface_mask
+ lpt->m_client_pool[info->m_client_pool_idx].getDualMask()
);
tuple_gen.SetW(info->m_w);
@@ -3128,25 +3177,39 @@ bool CFlowGenListPerThread::Create(uint32_t thread_id,
/* split the clients to threads */
CTupleGenYamlInfo * tuple_gen = &m_flow_list->m_yaml_info.m_tuple_gen;
+ m_smart_gen.Create(0,m_thread_id,m_flow_list->is_mac_info_configured);
+
/* split the clients to threads using the mask */
- CClientPortion portion;
- split_clients(m_thread_id,
- m_max_threads,
- getDualPortId(),
- *tuple_gen,
+ CIpPortion portion;
+ for (int i=0;i<tuple_gen->m_client_pool.size();i++) {
+ split_ips(m_thread_id, m_max_threads, getDualPortId(),
+ tuple_gen->m_client_pool[i],
portion);
- init_from_global(portion);
- m_smart_gen.Create(0,m_thread_id,
- cdSEQ_DIST,
- portion.m_client_start,
- portion.m_client_end,
- portion.m_server_start,
- portion.m_server_end,
- get_longest_flow(),
- get_total_kcps()*1000,
- m_flow_list);
+ m_smart_gen.add_client_pool(tuple_gen->m_client_pool[i].m_dist,
+ portion.m_ip_start,
+ portion.m_ip_end,
+ get_longest_flow(i,true),
+ get_total_kcps(i,true)*1000,
+ m_flow_list,
+ tuple_gen->m_client_pool[i].m_tcp_aging_sec,
+ tuple_gen->m_client_pool[i].m_udp_aging_sec
+ );
+ }
+ for (int i=0;i<tuple_gen->m_server_pool.size();i++) {
+ split_ips(m_thread_id, m_max_threads, getDualPortId(),
+ tuple_gen->m_server_pool[i],
+ portion);
+ m_smart_gen.add_server_pool(tuple_gen->m_server_pool[i].m_dist,
+ portion.m_ip_start,
+ portion.m_ip_end,
+ get_longest_flow(i,false),
+ get_total_kcps(i,false)*1000,
+ tuple_gen->m_server_pool[i].m_is_bundling);
+ }
+
+ init_from_global(portion);
CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
@@ -3163,7 +3226,8 @@ FORCE_NO_INLINE void CFlowGenListPerThread::handler_defer_job(CGenNode *p){
CGenNodeDeferPort * defer=(CGenNodeDeferPort *)p;
int i;
for (i=0; i<defer->m_cnt; i++) {
- m_smart_gen.FreePort(defer->m_clients[i],defer->m_ports[i]);
+ m_smart_gen.FreePort(defer->m_pool_idx[i],
+ defer->m_clients[i],defer->m_ports[i]);
}
}
@@ -3183,32 +3247,34 @@ FORCE_NO_INLINE void CFlowGenListPerThread::handler_defer_job_flush(void){
void CFlowGenListPerThread::defer_client_port_free(bool is_tcp,
- uint32_t c_ip,
- uint16_t port){
- /* free is not required in this case */
- if (!m_smart_gen.IsFreePortRequired() ){
+ uint32_t c_idx,
+ uint16_t port,
+ uint8_t c_pool_idx,
+ CTupleGeneratorSmart * gen){
+ /* free is not required in this case */
+ if (!gen->IsFreePortRequired(c_pool_idx) ){
return;
}
CGenNodeDeferPort * defer;
if (is_tcp) {
- if (CGlobalInfo::m_options.m_tcp_aging==0) {
- m_smart_gen.FreePort(c_ip,port);
+ if (gen->get_tcp_aging(c_pool_idx)==0) {
+ gen->FreePort(c_pool_idx,c_idx,port);
return;
}
defer=get_tcp_defer();
}else{
- if (CGlobalInfo::m_options.m_udp_aging==0) {
- m_smart_gen.FreePort(c_ip,port);
+ if (gen->get_udp_aging(c_pool_idx)==0) {
+ gen->FreePort(c_pool_idx, c_idx,port);
return;
}
defer=get_udp_defer();
}
- if ( defer->add_client(c_ip,port) ){
+ if ( defer->add_client(c_pool_idx, c_idx,port) ){
if (is_tcp) {
- m_node_gen.schedule_node((CGenNode *)defer,CGlobalInfo::m_options.m_tcp_aging);
+ m_node_gen.schedule_node((CGenNode *)defer,gen->get_tcp_aging(c_pool_idx));
m_tcp_dpc=0;
}else{
- m_node_gen.schedule_node((CGenNode *)defer,CGlobalInfo::m_options.m_udp_aging);
+ m_node_gen.schedule_node((CGenNode *)defer,gen->get_udp_aging(c_pool_idx));
m_udp_dpc=0;
}
}
@@ -3216,13 +3282,15 @@ void CFlowGenListPerThread::defer_client_port_free(bool is_tcp,
void CFlowGenListPerThread::defer_client_port_free(CGenNode *p){
- defer_client_port_free(p->m_pkt_info->m_pkt_indication.m_desc.IsTcp(),p->m_src_ip,p->m_src_port);
+ defer_client_port_free(p->m_pkt_info->m_pkt_indication.m_desc.IsTcp(),
+ p->m_src_idx,p->m_src_port,p->m_template_info->m_client_pool_idx,
+ p->m_tuple_gen);
}
/* copy all info from global and div by num of threads */
-void CFlowGenListPerThread::init_from_global(CClientPortion& portion){
+void CFlowGenListPerThread::init_from_global(CIpPortion& portion){
/* copy generator , it is the same */
m_yaml_info =m_flow_list->m_yaml_info;
@@ -3245,7 +3313,10 @@ void CFlowGenListPerThread::init_from_global(CClientPortion& portion){
yaml_info->m_one_app_server = lp->m_info->m_one_app_server;
yaml_info->m_server_addr = lp->m_info->m_server_addr;
yaml_info->m_dpPkt =lp->m_info->m_dpPkt;
-
+ yaml_info->m_server_pool_idx=lp->m_info->m_server_pool_idx;
+ yaml_info->m_client_pool_idx=lp->m_info->m_client_pool_idx;
+ yaml_info->m_server_pool_name=lp->m_info->m_server_pool_name;
+ yaml_info->m_client_pool_name=lp->m_info->m_client_pool_name;
/* fix this */
assert(m_max_threads>0);
if ( m_max_threads == 1 ) {
@@ -3267,8 +3338,7 @@ void CFlowGenListPerThread::init_from_global(CClientPortion& portion){
yaml_info->m_restart_time = ( yaml_info->m_limit_was_set ) ?
(yaml_info->m_limit / (yaml_info->m_k_cps * 1000.0)) : 0;
-
- lp_thread->Create( &m_smart_gen,
+ lp_thread->Create(&m_smart_gen,
yaml_info,
lp->m_flows_info,
&lp->m_flow_info,
@@ -3303,6 +3373,12 @@ void CFlowGenListPerThread::Clean(){
int i;
for (i=0; i<(int)m_cap_gen.size(); i++) {
CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ if (lp->m_tuple_gen_was_set) {
+ CTupleGeneratorSmart *gen;
+ gen = lp->tuple_gen.get_gen();
+ gen->Delete();
+ delete gen;
+ }
lp->Delete();
delete lp;
}
@@ -3559,6 +3635,27 @@ uint32_t CFlowGenListPerThread::getDualPortId(){
return ( ::getDualPortId(m_thread_id) );
}
+double CFlowGenListPerThread::get_longest_flow(uint8_t pool_idx, bool is_client){
+ int i;
+ double longest_flow = 0.0;
+ for (i=0;i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ if (is_client &&
+ lp->m_info->m_client_pool_idx != pool_idx)
+ continue;
+ if (!is_client &&
+ lp->m_info->m_server_pool_idx != pool_idx)
+ continue;
+ double tmp_len;
+ tmp_len = lp->m_flow_info->get_cap_file_length_sec();
+ if (longest_flow < tmp_len ) {
+ longest_flow = tmp_len;
+ }
+ }
+ return longest_flow;
+}
+
+
double CFlowGenListPerThread::get_longest_flow(){
int i;
double longest_flow = 0.0;
@@ -3573,6 +3670,22 @@ double CFlowGenListPerThread::get_longest_flow(){
return longest_flow;
}
+double CFlowGenListPerThread::get_total_kcps(uint8_t pool_idx, bool is_client){
+ int i;
+ double total=0.0;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ if (is_client &&
+ lp->m_info->m_client_pool_idx != pool_idx)
+ continue;
+ if (!is_client &&
+ lp->m_info->m_server_pool_idx != pool_idx)
+ continue;
+ total +=lp->m_info->m_k_cps;
+ }
+ return (total);
+}
+
double CFlowGenListPerThread::get_total_kcps(){
int i;
double total=0.0;
@@ -3881,9 +3994,6 @@ int CFlowGenList::load_from_yaml(std::string file_name,
CGlobalInfo::m_options.m_vlan_port[0] = m_yaml_info.m_vlan_info.m_vlan_per_port[0];
CGlobalInfo::m_options.m_vlan_port[1] = m_yaml_info.m_vlan_info.m_vlan_per_port[1];
CGlobalInfo::m_options.preview.set_mac_ip_overide_enable(m_yaml_info.m_mac_replace_by_ip);
- CGlobalInfo::m_options.m_tcp_aging = m_yaml_info.m_tuple_gen.m_tcp_aging_sec;
- CGlobalInfo::m_options.m_udp_aging = m_yaml_info.m_tuple_gen.m_udp_aging_sec;
-
if ( m_yaml_info.m_mac_base.size() != 6 ){
printf(" mac addr is not valid \n");
@@ -5431,8 +5541,11 @@ void CPluginCallbackSimple::on_node_last(uint8_t plugin_id,CGenNode * node){
/* free the ports */
CFlowGenListPerThread * flow_gen=(CFlowGenListPerThread *) lpP->m_gen;
bool is_tcp=node->m_pkt_info->m_pkt_indication.m_desc.IsTcp();
- flow_gen->defer_client_port_free(is_tcp,node->m_src_ip,lpP->rtp_client_0);
- flow_gen->defer_client_port_free(is_tcp,node->m_src_ip,lpP->rtp_client_1);
+ flow_gen->defer_client_port_free(is_tcp,node->m_src_idx,lpP->rtp_client_0,
+ node->m_template_info->m_client_pool_idx,node->m_tuple_gen);
+ flow_gen->defer_client_port_free(is_tcp,node->m_src_idx,lpP->rtp_client_1,
+ node->m_template_info->m_client_pool_idx, node->m_tuple_gen);
+
assert(lpP);
delete lpP;
node->m_plugin_info=0;
diff --git a/src/bp_sim.h b/src/bp_sim.h
index 7d659d1c..29b9a724 100755
--- a/src/bp_sim.h
+++ b/src/bp_sim.h
@@ -716,8 +716,6 @@ public:
uint16_t m_vlan_port[2]; /* vlan value */
uint16_t m_src_ipv6[6]; /* Most signficant 96-bits */
uint16_t m_dst_ipv6[6]; /* Most signficant 96-bits */
- uint16_t m_tcp_aging;
- uint16_t m_udp_aging;
uint32_t m_latency_rate; /* pkt/sec for each thread/port zero disable */
uint32_t m_latency_mask;
@@ -1249,10 +1247,14 @@ struct CFlowYamlInfo {
CFlowYamlInfo(){
m_dpPkt=0;
m_server_addr=0;
+ m_client_pool_idx = 0;
+ m_server_pool_idx = 0;
m_cap_mode=false;
}
std::string m_name;
+ std::string m_client_pool_name;
+ std::string m_server_pool_name;
double m_k_cps; //k CPS
double m_restart_time; /* restart time of this template */
dsec_t m_ipg_sec; // ipg in sec
@@ -1262,6 +1264,8 @@ struct CFlowYamlInfo {
uint32_t m_limit;
uint32_t m_flowcnt;
uint8_t m_plugin_id; /* 0 - default , 1 - RTSP160 , 2- RTSP250 */
+ uint8_t m_client_pool_idx;
+ uint8_t m_server_pool_idx;
bool m_one_app_server;
uint32_t m_server_addr;
bool m_one_app_server_was_set;
@@ -1372,14 +1376,17 @@ public:
//private:
+ CTupleGeneratorSmart *m_tuple_gen;
// cache line 1 - 64bytes waste of space !
uint32_t m_nat_external_ipv4; /* client */
uint32_t m_nat_external_ipv4_server;
uint16_t m_nat_external_port;
- uint16_t m_nat_pad;
+ uint16_t m_nat_pad[3];
mac_addr_align_t m_src_mac;
- uint32_t m_end_of_cache_line[11];
+ uint32_t m_src_idx;
+ uint32_t m_dest_idx;
+ uint32_t m_end_of_cache_line[6];
public:
bool operator <(const CGenNode * rsh ) const {
@@ -1585,7 +1592,7 @@ public:
#if __x86_64__
/* size of 64 bytes */
- #define DEFER_CLIENTS_NUM (18)
+ #define DEFER_CLIENTS_NUM (16)
#else
#define DEFER_CLIENTS_NUM (16)
#endif
@@ -1601,6 +1608,7 @@ struct CGenNodeDeferPort {
uint32_t m_clients[DEFER_CLIENTS_NUM];
uint16_t m_ports[DEFER_CLIENTS_NUM];
+ uint8_t m_pool_idx[DEFER_CLIENTS_NUM];
public:
void init(void){
m_type=CGenNode::FLOW_DEFER_PORT_RELEASE;
@@ -1608,10 +1616,11 @@ public:
}
/* return true if object is full */
- bool add_client(uint32_t client,
+ bool add_client(uint8_t pool_idx, uint32_t client,
uint16_t port){
m_clients[m_cnt]=client;
m_ports[m_cnt]=port;
+ m_pool_idx[m_cnt] = pool_idx;
m_cnt++;
if ( m_cnt == DEFER_CLIENTS_NUM ) {
return (true);
@@ -3155,6 +3164,7 @@ public:
CPolicer m_policer;
uint16_t m_id ;
uint32_t m_thread_id;
+ bool m_tuple_gen_was_set;
} __rte_cache_aligned;
@@ -3287,8 +3297,10 @@ public:
uint32_t getDualPortId();
public :
double get_total_kcps();
+ double get_total_kcps(uint8_t pool_idx, bool is_client);
double get_delta_flow_is_sec();
double get_longest_flow();
+ double get_longest_flow(uint8_t pool_idx, bool is_client);
void inc_current_template(void);
int generate_flows_roundrobin(bool *done);
int reschedule_flow(CGenNode *node);
@@ -3320,9 +3332,10 @@ private:
void terminate_nat_flows(CGenNode *node);
- void init_from_global(CClientPortion &);
+ void init_from_global(CIpPortion &);
void defer_client_port_free(CGenNode *p);
- void defer_client_port_free(bool is_tcp,uint32_t c_ip,uint16_t port);
+ void defer_client_port_free(bool is_tcp,uint32_t c_ip,uint16_t port,
+ uint8_t pool_idx, CTupleGeneratorSmart*gen);
FORCE_NO_INLINE void handler_defer_job(CGenNode *p);
@@ -3417,12 +3430,6 @@ inline void CFlowGenListPerThread::free_last_flow_node(CGenNode *p){
free_node( p);
}
-
-typedef struct mac_mapping_ {
- mac_addr_align_t mac;
- uint32_t ip;
-} mac_mapping_t;
-
class CFlowGenList {
public:
@@ -3449,10 +3456,6 @@ public:
double get_total_pps();
double get_total_tx_bps();
uint32_t get_total_repeat_flows();
- bool is_ip_legal(uint32_t ip) {
- return (ip >= m_yaml_info.m_tuple_gen.m_clients_ip_start &&
- ip <= m_yaml_info.m_tuple_gen.m_clients_ip_end );
- }
double get_delta_flow_is_sec();
public:
std::vector<CFlowGeneratorRec *> m_cap_gen; /* global info */
@@ -3493,8 +3496,11 @@ inline void CCapFileFlowInfo::generate_flow(CTupleTemplateGeneratorSmart * tup
node->m_flow_info = this;
node->m_flags=0;
node->m_template_info =template_info;
+ node->m_tuple_gen = tuple_gen->get_gen();
node->m_src_ip= tuple.getClient();
node->m_dest_ip = tuple.getServer();
+ node->m_src_idx = tuple.getClientId();
+ node->m_dest_idx = tuple.getServerId();
node->m_src_port = tuple.getClientPort();
memcpy(&node->m_src_mac,
tuple.getClientMac(),
diff --git a/src/gtest/rpc_test.cpp b/src/gtest/rpc_test.cpp
index 168ee936..38d34320 100644
--- a/src/gtest/rpc_test.cpp
+++ b/src/gtest/rpc_test.cpp
@@ -353,7 +353,7 @@ TEST_F(RpcTest, get_version) {
EXPECT_TRUE(response["result"] != Json::nullValue);
EXPECT_TRUE(response["result"]["built_by"] == "MOCK");
- EXPECT_TRUE(response["result"]["version"] == "v0.0");
+ EXPECT_TRUE(response["result"]["version"] == "v1.75");
}
/* get system info */
diff --git a/src/gtest/tuple_gen_test.cpp b/src/gtest/tuple_gen_test.cpp
index 6419ced1..8791b67d 100755
--- a/src/gtest/tuple_gen_test.cpp
+++ b/src/gtest/tuple_gen_test.cpp
@@ -153,12 +153,12 @@ TEST(CClientInfoLTest, get_new_free_port) {
-/* UIT of CTupleGeneratorSmart */
-TEST(tuple_gen,GenerateTuple) {
- CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x10000f01, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT);
+/* UIT of CClientPool, using CClientInfoL */
+TEST(tuple_gen,clientPoolL) {
+ CClientPool gen;
+ gen.Create(cdSEQ_DIST,
+ 0x10000001, 0x10000f01, 64000,1,NULL,false,
+ 0,0);
CTupleBase result;
uint32_t result_src;
uint32_t result_dest;
@@ -166,13 +166,11 @@ TEST(tuple_gen,GenerateTuple) {
for(int i=0;i<10;i++) {
gen.GenerateTuple(result);
- printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
+ printf(" C:%x P:%d \n",result.getClient(),result.getClientPort());
result_src = result.getClient();
- result_dest = result.getServer();
result_port = result.getClientPort();
EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
- EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
EXPECT_EQ(result_port, 1024);
}
@@ -180,164 +178,229 @@ TEST(tuple_gen,GenerateTuple) {
// EXPECT_EQ((size_t)0, gen.m_clients.size());
}
-TEST(tuple_gen,GenerateTuple2) {
- CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT);
+/* UIT of CClientPool, using CClientInfo */
+TEST(tuple_gen,clientPool) {
+ CClientPool gen;
+ gen.Create(cdSEQ_DIST,
+ 0x10000001, 0x10000021, 64000,1000,NULL,false,
+ 0,0);
CTupleBase result;
uint32_t result_src;
uint32_t result_dest;
uint16_t result_port;
- for(int i=0;i<200;i++) {
+ for(int i=0;i<10;i++) {
gen.GenerateTuple(result);
- // gen.Dump(stdout);
- // fprintf(stdout, "i:%d\n",i);
+ printf(" C:%x P:%d \n",result.getClient(),result.getClientPort());
+
result_src = result.getClient();
- result_dest = result.getServer();
result_port = result.getClientPort();
- EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
- EXPECT_EQ(result_dest, (uint32_t)((0x30000001+i) ) );
- EXPECT_EQ(result_port, 1024+i/15);
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
+ EXPECT_EQ(result_port, 1024);
}
gen.Delete();
// EXPECT_EQ((size_t)0, gen.m_clients.size());
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT,MAX_PORT);
- for(int i=0;i<200;i++) {
+}
+
+/* UIT of CServerPool */
+TEST(tuple_gen,serverPool) {
+ CServerPool gen;
+ gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x30000ff1, 64000,10);
+ CTupleBase result;
+ uint32_t result_dest;
+
+ for(int i=0;i<10;i++) {
gen.GenerateTuple(result);
- // gen.Dump(stdout);
- // fprintf(stdout, "i:%d\n",i);
- result_src = result.getClient();
+ printf(" S:%x \n",result.getServer());
+
result_dest = result.getServer();
- result_port = result.getClientPort();
- EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
- EXPECT_EQ(result_port, 1024+i/15);
}
+ gen.Delete();
+ gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x30000003, 64000,1000);
-}
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" S:%x \n",result.getServer());
-TEST(tuple_gen,GenerateTupleMac) {
- CFlowGenList fl;
- fl.Create();
- fl.load_from_mac_file("avl/mac_uit.yaml");
- fl.m_yaml_info.m_tuple_gen.m_clients_ip_start = 0x10000001;
- fl.m_yaml_info.m_tuple_gen.m_clients_ip_end = 0x1000000f;
+ result_dest = result.getServer();
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i%3)) ) );
+ }
+ gen.Delete();
+ // EXPECT_EQ((size_t)0, gen.m_clients.size());
+}
- CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT, &fl);
+TEST(tuple_gen,servePoolSim) {
+ CServerPoolSimple gen;
+ gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x40000001, 64000,10);
CTupleBase result;
- uint32_t result_src;
uint32_t result_dest;
- uint16_t result_port;
for(int i=0;i<10;i++) {
gen.GenerateTuple(result);
- printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
+ printf(" S:%x \n",result.getServer());
- result_src = result.getClient();
result_dest = result.getServer();
- result_port = result.getClientPort();
- EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%2));
EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
- EXPECT_EQ(result_port, 1024+i/2);
}
gen.Delete();
-// EXPECT_EQ((size_t)0, gen.m_clients.size());
+
+ gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x30000003, 64000,1000);
+
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" S:%x \n",result.getServer());
+
+ result_dest = result.getServer();
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i%3)) ) );
+ }
+
+ gen.Delete();
+ // EXPECT_EQ((size_t)0, gen.m_clients.size());
}
-TEST(tuple_gen,GenerateTupleEx) {
- CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT);
+
+TEST(tuple_gen,GenerateTuple2) {
+ CClientPool c_gen;
+ CClientPool c_gen_2;
+ c_gen.Create(cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 64000,4,NULL,false,
+ 0,0);
+ CServerPool s_gen;
+ CServerPool s_gen_2;
+ s_gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x30000ff1, 64000,10);
CTupleBase result;
+
uint32_t result_src;
uint32_t result_dest;
uint16_t result_port;
- uint16_t ex_port[2];
- for(int i=0;i<20;i++) {
- gen.GenerateTupleEx(result,2,ex_port);
- fprintf(stdout, "i:%d\n",i);
+ for(int i=0;i<200;i++) {
+ c_gen.GenerateTuple(result);
+ s_gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
result_src = result.getClient();
result_dest = result.getServer();
result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t)((0x30000001+i) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+ s_gen.Delete();
+ c_gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+ c_gen.Create(cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 64000,400,NULL,false,
+ 0,0);
+ s_gen.Create(cdSEQ_DIST,
+ 0x30000001, 0x30000001, 64000,10);
+ for(int i=0;i<200;i++) {
+ s_gen.GenerateTuple(result);
+ c_gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
- EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+i)) ));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001)) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+
+ s_gen.Delete();
+ c_gen.Delete();
+
+
+}
+
+TEST(tuple_gen,GenerateTupleMac) {
+ CFlowGenList fl;
+ fl.Create();
+ fl.load_from_mac_file("avl/mac_uit.yaml");
- EXPECT_EQ(result_port, 1024+(i/15)*3);
- EXPECT_EQ(ex_port[0], 1025+(i/15)*3);
- EXPECT_EQ(ex_port[1], 1026+(i/15)*3);
+ CClientPool gen;
+ gen.Create(cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 64000,2, &fl,true,0,0);
+ CTupleBase result;
+ uint32_t result_src;
+ uint16_t result_port;
+ mac_addr_align_t* result_mac;
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" C:%x P:%d \n",result.getClient(),result.getClientPort());
+
+ result_src = result.getClient();
+ result_port = result.getClientPort();
+ result_mac = result.getClientMac();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%2));
+ EXPECT_EQ(result_port, 1024+i/2);
+ if (i%2==0)
+ EXPECT_EQ(result_mac->mac[3], 5);
+ else
+ EXPECT_EQ(result_mac->mac[3], 1);
}
gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
}
-TEST(tuple_gen,split1) {
- CClientPortion portion;
- CTupleGenYamlInfo fi;
- fi.m_clients_ip_start =0x10000000;
- fi.m_clients_ip_end =0x100000ff;
+TEST(tuple_gen,split1) {
+ CIpPortion portion;
- fi.m_servers_ip_start =0x20000000;
- fi.m_servers_ip_end =0x200000ff;
+ CTupleGenPoolYaml fi;
+ fi.m_ip_start =0x10000000;
+ fi.m_ip_end =0x100000ff;
fi.m_dual_interface_mask =0x01000000;
- split_clients(0,
+ split_ips(0,
1,
0,
fi,
portion);
- EXPECT_EQ(portion.m_client_start, (uint32_t)(0x10000000));
- EXPECT_EQ(portion.m_client_end, (uint32_t)(0x100000ff ));
- EXPECT_EQ(portion.m_server_start , (uint32_t)(0x20000000));
- EXPECT_EQ(portion.m_server_end , (uint32_t)(0x200000ff));
- printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+ EXPECT_EQ(portion.m_ip_start, (uint32_t)(0x10000000));
+ EXPECT_EQ(portion.m_ip_end, (uint32_t)(0x100000ff ));
+ printf(" %x %x \n",portion.m_ip_start,portion.m_ip_end);
- split_clients(2,
+ split_ips(2,
4,
1,
fi,
portion);
- EXPECT_EQ(portion.m_client_start, (uint32_t)(0x11000080));
- EXPECT_EQ(portion.m_client_end, (uint32_t)(0x110000bf ));
- EXPECT_EQ(portion.m_server_start , (uint32_t)(0x21000080));
- EXPECT_EQ(portion.m_server_end , (uint32_t)(0x210000bf));
- printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+ EXPECT_EQ(portion.m_ip_start, (uint32_t)(0x11000080));
+ EXPECT_EQ(portion.m_ip_end, (uint32_t)(0x110000bf ));
+ printf(" %x %x \n",portion.m_ip_start,portion.m_ip_end);
}
TEST(tuple_gen,split2) {
- CClientPortion portion;
+ CIpPortion portion;
- CTupleGenYamlInfo fi;
- fi.m_clients_ip_start =0x10000000;
- fi.m_clients_ip_end =0x100001ff;
+ CTupleGenPoolYaml fi;
- fi.m_servers_ip_start =0x20000000;
- fi.m_servers_ip_end =0x200001ff;
+ fi.m_ip_start =0x20000000;
+ fi.m_ip_end =0x200001ff;
fi.m_dual_interface_mask =0x01000000;
int i;
for (i=0; i<8; i++) {
- split_clients(i,
+ split_ips(i,
8,
(i&1),
fi,
@@ -345,31 +408,23 @@ TEST(tuple_gen,split2) {
if ( (i&1) ) {
- EXPECT_EQ(portion.m_client_start, (uint32_t)(0x11000000)+(0x40*i));
- EXPECT_EQ(portion.m_client_end, (uint32_t)(0x11000000 +(0x40*i+0x40-1)));
- EXPECT_EQ(portion.m_server_start , (uint32_t)(0x21000000)+ (0x40*i) );
- EXPECT_EQ(portion.m_server_end , (uint32_t)(0x21000000)+(0x40*i+0x40-1) );
+ EXPECT_EQ(portion.m_ip_start , (uint32_t)(0x21000000)+ (0x40*i) );
+ EXPECT_EQ(portion.m_ip_end , (uint32_t)(0x21000000)+(0x40*i+0x40-1) );
}else{
- EXPECT_EQ(portion.m_client_start, (uint32_t)(0x10000000)+ (0x40*i) );
- EXPECT_EQ(portion.m_client_end, (uint32_t)(0x10000000 + (0x40*i+0x40-1) ) );
- EXPECT_EQ(portion.m_server_start , (uint32_t)(0x20000000) + (0x40*i) );
- EXPECT_EQ(portion.m_server_end , (uint32_t)(0x20000000) + (0x40*i+0x40-1) );
+ EXPECT_EQ(portion.m_ip_start , (uint32_t)(0x20000000) + (0x40*i) );
+ EXPECT_EQ(portion.m_ip_end , (uint32_t)(0x20000000) + (0x40*i+0x40-1) );
}
- printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+ printf(" %x %x \n",portion.m_ip_start,portion.m_ip_end);
}
}
-
-
-
-
TEST(tuple_gen,template1) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
template_1.SetSingleServer(true,0x12121212,0,0);
CTupleBase result;
@@ -391,11 +446,11 @@ TEST(tuple_gen,template1) {
TEST(tuple_gen,template2) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- MAX_PORT, MAX_PORT);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
template_1.SetW(10);
CTupleBase result;
@@ -420,11 +475,11 @@ TEST(tuple_gen,template2) {
TEST(tuple_gen,no_free) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x10000001, 0x30000001, 0x300000ff,
- MAX_PORT, MAX_PORT);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x10000001,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x400000ff,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
CTupleBase result;
@@ -445,11 +500,11 @@ TEST(tuple_gen,no_free) {
TEST(tuple_gen,try_to_free) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x10000001, 0x30000001, 0x300000ff,
- MAX_PORT, MAX_PORT);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x10000001,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x400000ff,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
CTupleBase result;
@@ -460,7 +515,7 @@ TEST(tuple_gen,try_to_free) {
uint32_t result_src = result.getClient();
uint32_t result_dest = result.getServer();
uint16_t result_port = result.getClientPort();
- gen.FreePort(result_src,result_port);
+ gen.FreePort(0,result.getClientId(),result_port);
}
// should have error
EXPECT_FALSE((gen.getErrorAllocationCounter()>0)?true:false);
@@ -474,16 +529,18 @@ TEST(tuple_gen,try_to_free) {
/* tuple generator using CClientInfoL*/
TEST(tuple_gen_2,GenerateTuple) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x10000f01, 0x30000001, 0x40000001,
- 0,0);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x10000f01,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen,0,0);
CTupleBase result;
uint32_t result_src;
uint32_t result_dest;
uint16_t result_port;
for(int i=0;i<10;i++) {
- gen.GenerateTuple(result);
+ template_1.GenerateTuple(result);
printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
result_src = result.getClient();
@@ -500,16 +557,18 @@ TEST(tuple_gen_2,GenerateTuple) {
TEST(tuple_gen_2,GenerateTuple2) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- 0,0);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen,0,0);
CTupleBase result;
uint32_t result_src;
uint32_t result_dest;
uint16_t result_port;
for(int i=0;i<200;i++) {
- gen.GenerateTuple(result);
+ template_1.GenerateTuple(result);
// gen.Dump(stdout);
// fprintf(stdout, "i:%d\n",i);
result_src = result.getClient();
@@ -522,11 +581,12 @@ TEST(tuple_gen_2,GenerateTuple2) {
gen.Delete();
// EXPECT_EQ((size_t)0, gen.m_clients.size());
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- 0,0);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
+ template_1.Create(&gen,0,0);
for(int i=0;i<200;i++) {
- gen.GenerateTuple(result);
+ template_1.GenerateTuple(result);
// gen.Dump(stdout);
// fprintf(stdout, "i:%d\n",i);
result_src = result.getClient();
@@ -542,43 +602,13 @@ TEST(tuple_gen_2,GenerateTuple2) {
}
-
-TEST(tuple_gen_2,GenerateTupleEx) {
- CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- 0,0);
- CTupleBase result;
- uint32_t result_src;
- uint32_t result_dest;
- uint16_t result_port;
- uint16_t ex_port[2];
- for(int i=0;i<20;i++) {
-
- gen.GenerateTupleEx(result,2,ex_port);
- fprintf(stdout, "i:%d\n",i);
- result_src = result.getClient();
- result_dest = result.getServer();
- result_port = result.getClientPort();
-
- EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
- EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+i)) ));
-
- EXPECT_EQ(result_port, 1024+(i/15)*3);
- EXPECT_EQ(ex_port[0], 1025+(i/15)*3);
- EXPECT_EQ(ex_port[1], 1026+(i/15)*3);
- }
-
- gen.Delete();
-}
-
TEST(tuple_gen_2,template1) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- 0,0);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
template_1.SetSingleServer(true,0x12121212,0,0);
CTupleBase result;
@@ -601,11 +631,11 @@ TEST(tuple_gen_2,template1) {
TEST(tuple_gen_2,template2) {
CTupleGeneratorSmart gen;
- gen.Create(1, 1,cdSEQ_DIST,
- 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
- 0,0);
+ gen.Create(1, 1);
+ gen.add_client_pool(cdSEQ_DIST,0x10000001,0x1000000f,64000,4,NULL,0,0);
+ gen.add_server_pool(cdSEQ_DIST,0x30000001,0x40000001,64000,4,false);
CTupleTemplateGeneratorSmart template_1;
- template_1.Create(&gen);
+ template_1.Create(&gen,0,0);
template_1.SetW(10);
CTupleBase result;
@@ -646,45 +676,40 @@ TEST(tuple_gen_yaml,yam_reader1) {
std::cout << e.what() << "\n";
exit(-1);
}
- fi.Dump(stdout);
}
TEST(tuple_gen_yaml,yam_is_valid) {
CTupleGenYamlInfo fi;
+ CTupleGenPoolYaml c_pool;
+ CTupleGenPoolYaml s_pool;
+ fi.m_client_pool.push_back(c_pool);
+ fi.m_server_pool.push_back(s_pool);
+
+ fi.m_client_pool[0].m_ip_start = 0x10000001;
+ fi.m_client_pool[0].m_ip_end = 0x100000ff;
- fi.m_clients_ip_start = 0x10000001;
- fi.m_clients_ip_end = 0x100000ff;
-
- fi.m_servers_ip_start = 0x10000001;
- fi.m_servers_ip_end = 0x100001ff;
+ fi.m_server_pool[0].m_ip_start = 0x10000001;
+ fi.m_server_pool[0].m_ip_end = 0x100001ff;
EXPECT_EQ(fi.is_valid(8,true)?1:0, 1);
- EXPECT_EQ(fi.m_servers_ip_start, 0x10000001);
- EXPECT_EQ(fi.m_servers_ip_end, 0x100001fe);
- printf(" start:%x end:%x \n",fi.m_servers_ip_start,fi.m_servers_ip_end);
- fi.m_clients_ip_start = 0x10000001;
- fi.m_clients_ip_end = 0x100000ff;
+ fi.m_client_pool[0].m_ip_start = 0x10000001;
+ fi.m_client_pool[0].m_ip_end = 0x100000ff;
- fi.m_servers_ip_start = 0x10000001;
- fi.m_servers_ip_end = 0x10000009;
+ fi.m_server_pool[0].m_ip_start = 0x10000001;
+ fi.m_server_pool[0].m_ip_end = 0x10000007;
EXPECT_EQ(fi.is_valid(8,true)?1:0, 0);
- fi.m_clients_ip_start = 0x10000001;
- fi.m_clients_ip_end = 0x100000ff;
+ fi.m_client_pool[0].m_ip_start = 0x10000001;
+ fi.m_client_pool[0].m_ip_end = 0x100000ff;
- fi.m_servers_ip_start = 0x10000001;
- fi.m_servers_ip_end = 0x100003ff;
+ fi.m_server_pool[0].m_ip_start = 0x10000001;
+ fi.m_server_pool[0].m_ip_end = 0x100003ff;
EXPECT_EQ(fi.is_valid(8,true)?1:0, 1);
- EXPECT_EQ(fi.m_servers_ip_start, 0x10000001);
- EXPECT_EQ(fi.m_servers_ip_end, 0x100003fc);
-
- printf(" start:%x end:%x \n",fi.m_servers_ip_start,fi.m_servers_ip_end);
-
}
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index a748178d..820fb3fa 100755
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -4115,11 +4115,10 @@ int CGlobalPortCfg::start_send_master(){
CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
- m_mg.set_ip( tg->m_clients_ip_start,
- tg->m_servers_ip_start,
- tg->m_dual_interface_mask
- );
-
+ m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
+ tg->m_server_pool[0].get_ip_start(),
+ tg->m_client_pool[0].getDualMask()
+ );
if ( CGlobalInfo::m_options.preview.getVMode() >0 ) {
m_fl.DumpCsv(stdout);
diff --git a/src/rpc-server/commands/trex_rpc_cmd_general.cpp b/src/rpc-server/commands/trex_rpc_cmd_general.cpp
index 106a167a..0c9f2c49 100644
--- a/src/rpc-server/commands/trex_rpc_cmd_general.cpp
+++ b/src/rpc-server/commands/trex_rpc_cmd_general.cpp
@@ -81,7 +81,7 @@ TrexRpcCmdGetVersion::_run(const Json::Value &params, Json::Value &result) {
#else
- section["version"] = "v0.0";
+ section["version"] = "v1.75";
section["build_date"] = __DATE__;
section["build_time"] = __TIME__;
section["built_by"] = "MOCK";
@@ -177,19 +177,7 @@ TrexRpcCmdGetSysInfo::_run(const Json::Value &params, Json::Value &result) {
section["ports"][i]["owner"] = port->get_owner();
- switch (port->get_state()) {
- case TrexStatelessPort::PORT_STATE_DOWN:
- section["ports"][i]["status"] = "down";
- break;
-
- case TrexStatelessPort::PORT_STATE_UP_IDLE:
- section["ports"][i]["status"] = "idle";
- break;
-
- case TrexStatelessPort::PORT_STATE_TRANSMITTING:
- section["ports"][i]["status"] = "transmitting";
- break;
- }
+ section["ports"][i]["status"] = port->get_state_as_string();
}
@@ -234,7 +222,7 @@ TrexRpcCmdAcquire::_run(const Json::Value &params, Json::Value &result) {
TrexStatelessPort *port = TrexStateless::get_instance().get_port_by_id(port_id);
if ( (!port->is_free_to_aquire()) && (port->get_owner() != new_owner) && (!force)) {
- generate_execute_err(result, "device is already taken by '" + port->get_owner() + "'");
+ generate_execute_err(result, "port is already taken by '" + port->get_owner() + "'");
}
port->set_owner(new_owner);
@@ -265,3 +253,36 @@ TrexRpcCmdRelease::_run(const Json::Value &params, Json::Value &result) {
return (TREX_RPC_CMD_OK);
}
+
+/**
+ * get port stats
+ *
+ */
+trex_rpc_cmd_rc_e
+TrexRpcCmdGetPortStats::_run(const Json::Value &params, Json::Value &result) {
+
+ uint8_t port_id = parse_port(params, result);
+
+ TrexStatelessPort *port = TrexStateless::get_instance().get_port_by_id(port_id);
+
+ if (port->get_state() == TrexStatelessPort::PORT_STATE_DOWN) {
+ generate_execute_err(result, "cannot get stats - port is down");
+ }
+
+ result["result"]["status"] = port->get_state_as_string();
+
+ result["result"]["tx_bps"] = Json::Value::UInt64(port->get_port_stats().tx_bps);
+ result["result"]["tx_pps"] = Json::Value::UInt64(port->get_port_stats().tx_pps);
+ result["result"]["total_tx_pkts"] = Json::Value::UInt64(port->get_port_stats().total_tx_pkts);
+ result["result"]["total_tx_bytes"] = Json::Value::UInt64(port->get_port_stats().total_tx_bytes);
+
+ result["result"]["rx_bps"] = Json::Value::UInt64(port->get_port_stats().rx_bps);
+ result["result"]["rx_pps"] = Json::Value::UInt64(port->get_port_stats().rx_pps);
+ result["result"]["total_rx_pkts"] = Json::Value::UInt64(port->get_port_stats().total_rx_pkts);
+ result["result"]["total_rx_bytes"] = Json::Value::UInt64(port->get_port_stats().total_rx_bytes);
+
+ result["result"]["tx_rx_error"] = Json::Value::UInt64(port->get_port_stats().tx_rx_errors);
+
+ return (TREX_RPC_CMD_OK);
+}
+
diff --git a/src/rpc-server/commands/trex_rpc_cmds.h b/src/rpc-server/commands/trex_rpc_cmds.h
index e261d1c6..5926a8d8 100644
--- a/src/rpc-server/commands/trex_rpc_cmds.h
+++ b/src/rpc-server/commands/trex_rpc_cmds.h
@@ -75,6 +75,12 @@ TREX_RPC_CMD_DEFINE(TrexRpcCmdRelease, "release", 1, true);
/**
+ * port commands
+ */
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetPortStats, "get_port_stats", 1, true);
+
+
+/**
* stream cmds
*/
TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveAllStreams, "remove_all_streams", 1, true);
@@ -100,5 +106,4 @@ TREX_RPC_CMD_DEFINE(TrexRpcCmdStartTraffic, "start_traffic", 1, true);
TREX_RPC_CMD_DEFINE(TrexRpcCmdStopTraffic, "stop_traffic", 1, true);
-
#endif /* __TREX_RPC_CMD_H__ */
diff --git a/src/rpc-server/trex_rpc_cmds_table.cpp b/src/rpc-server/trex_rpc_cmds_table.cpp
index 170f0de1..c1c546f3 100644
--- a/src/rpc-server/trex_rpc_cmds_table.cpp
+++ b/src/rpc-server/trex_rpc_cmds_table.cpp
@@ -40,6 +40,7 @@ TrexRpcCommandsTable::TrexRpcCommandsTable() {
register_command(new TrexRpcCmdGetOwner());
register_command(new TrexRpcCmdAcquire());
register_command(new TrexRpcCmdRelease());
+ register_command(new TrexRpcCmdGetPortStats());
/* stream commands */
register_command(new TrexRpcCmdAddStream());
diff --git a/src/stateless/trex_stateless.cpp b/src/stateless/trex_stateless.cpp
index 6a3169d4..0eb96f05 100644
--- a/src/stateless/trex_stateless.cpp
+++ b/src/stateless/trex_stateless.cpp
@@ -80,6 +80,7 @@ uint8_t TrexStateless::get_port_count() {
TrexStatelessPort::TrexStatelessPort(uint8_t port_id) : m_port_id(port_id) {
m_port_state = PORT_STATE_UP_IDLE;
clear_owner();
+ m_stats = {0};
}
@@ -121,12 +122,30 @@ TrexStreamTable * TrexStatelessPort::get_stream_table() {
return &m_stream_table;
}
+
+std::string
+TrexStatelessPort::get_state_as_string() {
+
+ switch (get_state()) {
+ case PORT_STATE_DOWN:
+ return "down";
+
+ case PORT_STATE_UP_IDLE:
+ return "idle";
+
+ case PORT_STATE_TRANSMITTING:
+ return "transmitting";
+ }
+
+ return "unknown";
+}
+
void
TrexStatelessPort::get_properties(string &driver, string &speed) {
/* take this from DPDK */
- driver = "Unknown Driver";
- speed = "Unknown Speed";
+ driver = "e1000";
+ speed = "1 Gbps";
}
diff --git a/src/stateless/trex_stateless_api.h b/src/stateless/trex_stateless_api.h
index e02e93da..7a9080aa 100644
--- a/src/stateless/trex_stateless_api.h
+++ b/src/stateless/trex_stateless_api.h
@@ -49,6 +49,20 @@ public:
class TrexStatelessPort {
public:
+ struct TrexPortStats {
+ uint64_t tx_pps;
+ uint64_t tx_bps;
+ uint64_t total_tx_pkts;
+ uint64_t total_tx_bytes;
+
+ uint64_t rx_pps;
+ uint64_t rx_bps;
+ uint64_t total_rx_pkts;
+ uint64_t total_rx_bytes;
+
+ uint64_t tx_rx_errors;
+ };
+
/**
* port state
*/
@@ -97,6 +111,12 @@ public:
}
/**
+ * port state as string
+ *
+ */
+ std::string get_state_as_string();
+
+ /**
* fill up properties of the port
*
* @author imarom (16-Sep-15)
@@ -149,6 +169,16 @@ public:
}
+ const TrexPortStats & get_port_stats(void) {
+ /* scrabble */
+ m_stats.tx_bps += 1 + rand() % 100;
+ m_stats.tx_pps += 1 + rand() % 10;
+ m_stats.total_tx_bytes += 1 + rand() % 10;
+ m_stats.total_tx_pkts += 1 + rand() % 5;
+
+ return m_stats;
+ }
+
private:
std::string generate_handler();
@@ -158,6 +188,7 @@ private:
port_state_e m_port_state;
std::string m_owner;
std::string m_owner_handler;
+ TrexPortStats m_stats;
};
/**
diff --git a/src/tuple_gen.cpp b/src/tuple_gen.cpp
index 0faa6b63..e408f275 100755
--- a/src/tuple_gen.cpp
+++ b/src/tuple_gen.cpp
@@ -1,6 +1,6 @@
/*
- Wenxian Li
+ Wenxian Li
Hanoh Haim
Cisco Systems, Inc.
*/
@@ -21,287 +21,364 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+
#include "tuple_gen.h"
#include <string.h>
#include "utl_yaml.h"
-
-
-
-/* simple tuple genertion for one low*/
-void CTupleGeneratorSmart::GenerateTuple(CTupleBase & tuple) {
- BP_ASSERT(m_was_init);
- Generate_client_server();
- m_was_generated = true;
- m_result_client_port = GenerateOneClientPort(m_client_ip);
- tuple.setClient(m_result_client_ip);
- tuple.setServer(m_result_server_ip);
- tuple.setClientPort(m_result_client_port);
- tuple.setClientMac(&m_result_client_mac);
-// printf(" alloc %x %d mac:%x,%x\n",m_result_client_ip,m_result_client_port, m_result_client_mac.mac[0], m_result_client_mac.mac[1]);
+void CServerPool::Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps) {
+ gen = new CIpPool();
+ gen->set_dist(dist_value);
+ uint32_t total_ip = max_ip - min_ip +1;
+ gen->m_ip_info.resize(total_ip);
+
+ if (total_ip > ((l_flow*t_cps/MAX_PORT))) {
+ for(int idx=0;idx<total_ip;idx++){
+ gen->m_ip_info[idx] = new CServerInfoL();
+ gen->m_ip_info[idx]->set_ip(min_ip+idx);
+ }
+ } else {
+ for(int idx=0;idx<total_ip;idx++){
+ gen->m_ip_info[idx] = new CServerInfo();
+ gen->m_ip_info[idx]->set_ip(min_ip+idx);
+ }
+ }
+ gen->CreateBase();
}
+void CClientPool::Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps,
+ CFlowGenList* fl_list,
+ bool has_mac_map,
+ uint16_t tcp_aging,
+ uint16_t udp_aging) {
+ assert(max_ip>=min_ip);
+ set_dist(dist_value);
+ uint32_t total_ip = max_ip - min_ip +1;
+ uint32_t avail_ip = total_ip;
+ if (has_mac_map && (fl_list!=NULL)) {
+ for(int idx=0;idx<total_ip;idx++){
+ mac_addr_align_t *mac_adr = NULL;
+ mac_adr = get_mac_addr_by_ip(fl_list, min_ip+idx);
+ if (mac_adr == NULL) {
+ avail_ip--;
+ }
+ }
+ }
+ if (avail_ip!=0) {
+ m_ip_info.resize(avail_ip);
+ } else {
+ printf("\n Error, empty mac file is configured.\n"
+ "Will ignore the mac file configuration.\n");
+ m_ip_info.resize(total_ip);
+ }
-/*
- * allocate base tuple with n exta ports, used by bundels SIP
- * for example need to allocat 3 ports for this C/S
- */
-void CTupleGeneratorSmart::GenerateTupleEx(CTupleBase & tuple,
- uint8_t extra_ports_no,
- uint16_t * extra_ports) {
- GenerateTuple(tuple) ;
- for (int idx=0;idx<extra_ports_no;idx++) {
- extra_ports[idx] = GenerateOneClientPort(m_client_ip);
+ if (total_ip > ((l_flow*t_cps/MAX_PORT))) {
+ if (has_mac_map) {
+ for(int idx=0;idx<total_ip;idx++){
+ mac_addr_align_t *mac_adr = NULL;
+ mac_adr = get_mac_addr_by_ip(fl_list, min_ip+idx);
+ if (mac_adr != NULL) {
+ m_ip_info[idx] = new CClientInfoL(has_mac_map);
+ m_ip_info[idx]->set_ip(min_ip+idx);
+ m_ip_info[idx]->set_mac(mac_adr);
+ }
+ }
+ } else {
+ for(int idx=0;idx<total_ip;idx++){
+ m_ip_info[idx] = new CClientInfoL(has_mac_map);
+ m_ip_info[idx]->set_ip(min_ip+idx);
+ }
+ }
+ } else {
+ if (has_mac_map) {
+ for(int idx=0;idx<total_ip;idx++){
+ mac_addr_align_t *mac_adr = NULL;
+ mac_adr = get_mac_addr_by_ip(fl_list, min_ip+idx);
+ if (mac_adr != NULL) {
+ m_ip_info[idx] = new CClientInfo(has_mac_map);
+ m_ip_info[idx]->set_ip(min_ip+idx);
+ m_ip_info[idx]->set_mac(mac_adr);
+ }
+ }
+ } else {
+ for(int idx=0;idx<total_ip;idx++){
+ m_ip_info[idx] = new CClientInfo(has_mac_map);
+ m_ip_info[idx]->set_ip(min_ip+idx);
+ }
+ }
+
}
+ m_tcp_aging = tcp_aging;
+ m_udp_aging = udp_aging;
+ CreateBase();
}
-void CTupleGeneratorSmart::Dump(FILE *fd){
- fprintf(fd," id: %x, %x:%x - %x \n client:%x - %x, server:%x-%x\n",m_id,m_result_client_ip,m_result_server_ip,m_result_client_port,m_min_client_ip, m_max_client_ip, m_min_server_ip, m_max_server_ip);
+void delay(int msec);
+
+bool CTupleGeneratorSmart::add_client_pool(IP_DIST_t client_dist,
+ uint32_t min_client,
+ uint32_t max_client,
+ double l_flow,
+ double t_cps,
+ CFlowGenList* fl_list,
+ uint16_t tcp_aging,
+ uint16_t udp_aging){
+ assert(max_client>=min_client);
+ CClientPool* pool = new CClientPool();
+ pool->Create(client_dist, min_client, max_client,
+ l_flow, t_cps, fl_list, has_mac_mapping,
+ tcp_aging, udp_aging);
+
+ m_client_pool.push_back(pool);
+ return(true);
+}
+
+bool CTupleGeneratorSmart::add_server_pool(IP_DIST_t server_dist,
+ uint32_t min_server,
+ uint32_t max_server,
+ double l_flow,
+ double t_cps,
+ bool is_bundling){
+ assert(max_server>=min_server);
+ CServerPoolBase* pool;
+ if (is_bundling)
+ pool = new CServerPool();
+ else
+ pool = new CServerPoolSimple();
+ // we currently only supports mac mapping file for client
+ pool->Create(server_dist, min_server, max_server,
+ l_flow, t_cps);
+ m_server_pool.push_back(pool);
+ return(true);
}
-void delay(int msec);
bool CTupleGeneratorSmart::Create(uint32_t _id,
- uint32_t thread_id,
- IP_DIST_t dist,
- uint32_t min_client,
- uint32_t max_client,
- uint32_t min_server,
- uint32_t max_server,
- double l_flow,
- double t_cps,
- CFlowGenList* fl_list){
-
- m_active_alloc=0;
- if (dist>=cdMAX_DIST) {
- m_client_dist = cdSEQ_DIST;
- } else {
- m_client_dist = dist;
- }
- m_min_client_ip = min_client;
- m_max_client_ip = max_client;
- m_min_server_ip = min_server;
- m_max_server_ip = max_server;
- assert(m_max_client_ip>=m_min_client_ip);
- assert(m_max_server_ip>=m_min_server_ip);
- assert((m_max_client_ip- m_min_client_ip)<50000);
-
- uint32_t total_clients = getTotalClients();
- /*printf("\ntotal_clients:%d, longest_flow:%f sec, total_cps:%f\n",
- total_clients, l_flow, t_cps);*/
- m_client.resize(m_max_client_ip-m_min_client_ip+1);
- if (fl_list == NULL || !is_mac_info_conf(fl_list)) {
- if (total_clients > ((l_flow*t_cps/MAX_PORT))) {
- for (int idx=0;idx<m_client.size();idx++)
- m_client[idx] = new CClientInfoL();
- } else {
- for (int idx=0;idx<m_client.size();idx++)
- m_client[idx] = new CClientInfo();
- }
- } else {
- if (total_clients > ((l_flow*t_cps/MAX_PORT))) {
- for (int idx=0;idx<m_client.size();idx++) {
- m_client[idx] = new CClientInfoL(
- get_mac_addr_by_ip(fl_list, min_client+idx));
- }
- } else {
- for (int idx=0;idx<m_client.size();idx++)
- m_client[idx] = new CClientInfo(
- get_mac_addr_by_ip(fl_list, min_client+idx));
- }
- }
- m_was_generated = false;
+ uint32_t thread_id,
+ bool has_mac)
+{
m_thread_id = thread_id;
-
m_id = _id;
m_was_init=true;
- m_port_allocation_error=0;
+ has_mac_mapping = has_mac;
return(true);
}
void CTupleGeneratorSmart::Delete(){
- m_was_generated = false;
m_was_init=false;
- m_client_dist = cdSEQ_DIST;
+ has_mac_mapping = false;
- for (int idx=0;idx<m_client.size();idx++){
- delete m_client[idx];
+ for (int idx=0;idx<m_client_pool.size();idx++) {
+ m_client_pool[idx]->Delete();
+ delete m_client_pool[idx];
}
- m_client.clear();
-}
+ m_client_pool.clear();
-void CTupleGeneratorSmart::Generate_client_server(){
- if (m_was_generated == false) {
- /*first time */
- m_was_generated = true;
- m_cur_client_ip = m_min_client_ip;
- m_cur_server_ip = m_min_server_ip;
+ for (int idx=0;idx<m_server_pool.size();idx++) {
+ m_server_pool[idx]->Delete();
+ delete m_server_pool[idx];
}
+ m_server_pool.clear();
+}
- uint32_t client_ip;
- int i=0;
- for (;i<100;i++) {
- if (is_client_available(m_cur_client_ip)) {
- break;
- }
- if (m_cur_client_ip >= m_max_client_ip) {
- m_cur_client_ip = m_min_client_ip;
- } else {
- m_cur_client_ip++;
- }
- }
- if (i>=100) {
- printf(" ERROR ! sparse mac-ip files is not supported yet !\n");
- exit(-1);
- }
+void CTupleGenPoolYaml::Dump(FILE *fd){
+ fprintf(fd," dist : %d \n",m_dist);
+ fprintf(fd," IPs : %08x -%08x \n",m_ip_start,m_ip_end);
+ fprintf(fd," clients per gb : %d \n",m_number_of_clients_per_gb);
+ fprintf(fd," min clients : %d \n",m_min_clients);
+ fprintf(fd," tcp aging : %d sec \n",m_tcp_aging_sec);
+ fprintf(fd," udp aging : %d sec \n",m_udp_aging_sec);
+}
- m_client_ip = m_cur_client_ip;
- CClientInfoBase* client = get_client_by_ip(m_client_ip);
- memcpy(&m_result_client_mac,
- client->get_mac_addr(),
- sizeof(mac_addr_align_t));
- m_result_client_ip = m_client_ip;
- m_result_server_ip = m_cur_server_ip ;
-/*
-printf("ip:%x,mac:%x,%x,%x,%x,%x,%x, inused:%x\n",m_client_ip,
- m_result_client_mac.mac[0],
- m_result_client_mac.mac[1],
- m_result_client_mac.mac[2],
- m_result_client_mac.mac[3],
- m_result_client_mac.mac[4],
- m_result_client_mac.mac[5],
- m_result_client_mac.inused);
-*/
- m_cur_client_ip ++;
- m_cur_server_ip ++;
- if (m_cur_client_ip > m_max_client_ip) {
- m_cur_client_ip = m_min_client_ip;
+bool CTupleGenPoolYaml::is_valid(uint32_t num_threads,bool is_plugins){
+ if ( m_ip_start > m_ip_end ){
+ printf(" ERROR The ip_start must be bigger than ip_end \n");
+ return(false);
}
- if (m_cur_server_ip > m_max_server_ip) {
- m_cur_server_ip = m_min_server_ip;
+
+ uint32_t ips= (m_ip_end - m_ip_start +1);
+ if ( ips < num_threads ) {
+ printf(" ERROR The number of ips should be at least number of threads %d \n",num_threads);
+ return (false);
}
-}
-void CTupleGeneratorSmart::return_all_client_ports() {
- for(int idx=0;idx<m_client.size();++idx) {
- m_client.at(idx)->return_all_ports();
+ if (ips > 1000000) {
+ printf(" The number of clients requested is %d maximum supported : %d \n",ips,1000000);
+ return (false);
}
+ return (true);
}
-void CTupleGenYamlInfo::Dump(FILE *fd){
- fprintf(fd," dist : %d \n",m_client_dist);
- fprintf(fd," clients : %08x -%08x \n",m_clients_ip_start,m_clients_ip_end);
- fprintf(fd," servers : %08x -%08x \n",m_servers_ip_start,m_servers_ip_end);
- fprintf(fd," clients per gb : %d \n",m_number_of_clients_per_gb);
- fprintf(fd," min clients : %d \n",m_min_clients);
- fprintf(fd," tcp aging : %d sec \n",m_tcp_aging_sec);
- fprintf(fd," udp aging : %d sec \n",m_udp_aging_sec);
-}
+
+
+void operator >> (const YAML::Node& node, CTupleGenPoolYaml & fi) {
+ std::string tmp;
+ node["name"] >> fi.m_name;
+ node["distribution"] >> tmp ;
+ if (tmp == "random") {
+ fi.m_dist=cdRANDOM_DIST;
+ }else if (tmp == "normal") {
+ fi.m_dist=cdNORMAL_DIST;
+ } else {
+ fi.m_dist=cdSEQ_DIST;
+ }
+ utl_yaml_read_ip_addr(node,"ip_start",fi.m_ip_start);
+ utl_yaml_read_ip_addr(node,"ip_end",fi.m_ip_end);
+ fi.m_number_of_clients_per_gb = 0;
+
+ fi.m_min_clients = 0;
+ fi.m_is_bundling = false;
+ fi.m_tcp_aging_sec = 0;
+ fi.m_udp_aging_sec = 0;
+ fi.m_dual_interface_mask = 0;
+ try {
+ utl_yaml_read_uint32(node,"clients_per_gb",fi.m_number_of_clients_per_gb);
+ } catch ( const std::exception& e ) {
+ ;}
+ try {
+ utl_yaml_read_uint32(node,"min_clients",fi.m_min_clients);
+ } catch ( const std::exception& e ) {
+ ;}
+ try {
+ utl_yaml_read_ip_addr(node,"dual_port_mask",fi.m_dual_interface_mask);
+ } catch ( const std::exception& e ) {
+ ;}
+ try {
+ utl_yaml_read_uint16(node,"tcp_aging",fi.m_tcp_aging_sec);
+ } catch ( const std::exception& e ) {
+ ;}
+ try {
+ utl_yaml_read_uint16(node,"udp_aging",fi.m_udp_aging_sec);
+ } catch ( const std::exception& e ) {
+ ;}
+ try {
+ node["track_ports"] >> fi.m_is_bundling;
+ } catch ( const std::exception& e ) {
+ ;}
+}
+void copy_global_pool_para(CTupleGenPoolYaml & src, CTupleGenPoolYaml & dst) {
+ if (src.m_number_of_clients_per_gb == 0)
+ src.m_number_of_clients_per_gb = dst.m_number_of_clients_per_gb;
+ if (src.m_min_clients == 0)
+ src.m_min_clients = dst.m_min_clients;
+ if (src.m_dual_interface_mask == 0)
+ src.m_dual_interface_mask = dst.m_dual_interface_mask;
+ if (src.m_tcp_aging_sec == 0)
+ src.m_tcp_aging_sec = dst.m_tcp_aging_sec;
+ if (src.m_udp_aging_sec == 0)
+ src.m_udp_aging_sec = dst.m_udp_aging_sec;
+}
void operator >> (const YAML::Node& node, CTupleGenYamlInfo & fi) {
std::string tmp;
try {
- node["distribution"] >> tmp ;
- if (tmp == "seq" ) {
- fi.m_client_dist=cdSEQ_DIST;
- }else{
- if (tmp == "random") {
- fi.m_client_dist=cdRANDOM_DIST;
- }else{
- if (tmp == "normal") {
- fi.m_client_dist=cdNORMAL_DIST;
- }
- }
- }
+ CTupleGenPoolYaml c_pool;
+ CTupleGenPoolYaml s_pool;
+ node["distribution"] >> tmp ;
+ if (tmp == "random") {
+ c_pool.m_dist=cdRANDOM_DIST;
+ }else if (tmp == "normal") {
+ c_pool.m_dist=cdNORMAL_DIST;
+ } else {
+ c_pool.m_dist=cdSEQ_DIST;
+ }
+ s_pool.m_dist = c_pool.m_dist;
+ utl_yaml_read_ip_addr(node,"clients_start",c_pool.m_ip_start);
+ utl_yaml_read_ip_addr(node,"clients_end",c_pool.m_ip_end);
+ utl_yaml_read_ip_addr(node,"servers_start",s_pool.m_ip_start);
+ utl_yaml_read_ip_addr(node,"servers_end",s_pool.m_ip_end);
+ utl_yaml_read_uint32(node,"clients_per_gb",c_pool.m_number_of_clients_per_gb);
+ utl_yaml_read_uint32(node,"min_clients",c_pool.m_min_clients);
+ utl_yaml_read_ip_addr(node,"dual_port_mask",c_pool.m_dual_interface_mask);
+ utl_yaml_read_uint16(node,"tcp_aging",c_pool.m_tcp_aging_sec);
+ utl_yaml_read_uint16(node,"udp_aging",c_pool.m_udp_aging_sec);
+ s_pool.m_dual_interface_mask = c_pool.m_dual_interface_mask;
+ s_pool.m_is_bundling = false;
+ fi.m_client_pool.push_back(c_pool);
+ fi.m_server_pool.push_back(s_pool);
}catch ( const std::exception& e ) {
- fi.m_client_dist=cdSEQ_DIST;
+ printf("No default generator defined.\n");
}
- utl_yaml_read_ip_addr(node,"clients_start",fi.m_clients_ip_start);
- utl_yaml_read_ip_addr(node,"clients_end",fi.m_clients_ip_end);
- utl_yaml_read_ip_addr(node,"servers_start",fi.m_servers_ip_start);
- utl_yaml_read_ip_addr(node,"servers_end",fi.m_servers_ip_end);
- utl_yaml_read_uint32(node,"clients_per_gb",fi.m_number_of_clients_per_gb);
- utl_yaml_read_uint32(node,"min_clients",fi.m_min_clients);
- utl_yaml_read_ip_addr(node,"dual_port_mask",fi.m_dual_interface_mask);
- utl_yaml_read_uint16(node,"tcp_aging",fi.m_tcp_aging_sec);
- utl_yaml_read_uint16(node,"udp_aging",fi.m_udp_aging_sec);
-
+ try{
+ const YAML::Node& c_pool_info = node["generator_clients"];
+ for (uint16_t idx=0;idx<c_pool_info.size();idx++) {
+ CTupleGenPoolYaml pool;
+ try {
+ c_pool_info[idx] >> pool;
+ if (fi.m_client_pool.size()>0) {
+ copy_global_pool_para(pool, fi.m_client_pool[0]);
+ }
+ fi.m_client_pool.push_back(pool);
+ } catch ( const std::exception& e ) {
+ printf("client pool in YAML is wrong\n");
+ }
+ }
+ }catch ( const std::exception& e ) {
+ printf("no client generator pool configured, using default pool\n");
+ }
+ try {
+ const YAML::Node& s_pool_info = node["generator_servers"];
+ for (uint16_t idx=0;idx<s_pool_info.size();idx++) {
+ CTupleGenPoolYaml pool;
+ try {
+ s_pool_info[idx] >> pool;
+ } catch ( const std::exception& e ) {
+ printf("server pool in YAML is wrong\n");
+ }
+ if (fi.m_server_pool.size()>0) {
+ copy_global_pool_para(pool, fi.m_server_pool[0]);
+ }
+ fi.m_server_pool.push_back(pool);
+ }
+ }catch ( const std::exception& e ) {
+ printf("no server generator pool configured, using default pool\n");
+ }
}
bool CTupleGenYamlInfo::is_valid(uint32_t num_threads,bool is_plugins){
- if ( m_servers_ip_start > m_servers_ip_end ){
- printf(" ERROR The servers_ip_start must be bigger than servers_ip_end \n");
- return(false);
- }
-
- if ( m_clients_ip_start > m_clients_ip_end ){
- printf(" ERROR The clients_ip_start must be bigger than clients_ip_end \n");
- return(false);
+ for (int i=0;i<m_client_pool.size();i++) {
+ if (m_client_pool[i].is_valid(num_threads, is_plugins)==false)
+ return false;
}
- uint32_t servers= (m_servers_ip_end - m_servers_ip_start +1);
- if ( servers < num_threads ) {
- printf(" ERROR The number of servers should be at least number of threads %d \n",num_threads);
- return (false);
+ for (int i=0;i<m_server_pool.size();i++) {
+ if (m_server_pool[i].is_valid(num_threads, is_plugins)==false)
+ return false;
}
- uint32_t clients= (m_clients_ip_end - m_clients_ip_start +1);
- if ( clients < num_threads ) {
- printf(" ERROR The number of clients should be at least number of threads %d \n",num_threads);
- return (false);
- }
-
- /* defect for plugin */
- if (is_plugins) {
- if ( getTotalServers() < getTotalClients() ){
- printf(" Plugin is configured. in that case due to a limitation ( defect trex-54 ) \n");
- printf(" the number of servers should be bigger than number of clients \n");
- return (false);
- }
-
- /* update number of servers in a way that it would be exact multiplication */
- uint32_t mul=getTotalServers() / getTotalClients();
- uint32_t new_server_num=mul*getTotalClients();
- m_servers_ip_end = m_servers_ip_start + new_server_num-1 ;
-
- assert(getTotalServers() %getTotalClients() ==0);
- }
-
-/* if (clients > 00000) {
- printf(" The number of clients requested is %d maximum supported : %d \n",clients,100000);
- return (false);
- }
- */ return (true);
+ return true;
}
/* split the clients and server by dual_port_id and thread_id ,
clients is splited by threads and dual_port_id
servers is spliteed by dual_port_id */
-void split_clients(uint32_t thread_id,
- uint32_t total_threads,
- uint32_t dual_port_id,
- CTupleGenYamlInfo & fi,
- CClientPortion & portion){
-
- uint32_t clients_chunk = fi.getTotalClients()/total_threads;
- // FIXME need to fix this when fixing the server
- uint32_t servers_chunk = fi.getTotalServers()/total_threads;
-
- assert(clients_chunk>0);
- assert(servers_chunk>0);
+void split_ips(uint32_t thread_id,
+ uint32_t total_threads,
+ uint32_t dual_port_id,
+ CTupleGenPoolYaml& poolinfo,
+ CIpPortion & portion){
- uint32_t dual_if_mask=(dual_port_id*fi.m_dual_interface_mask);
+ uint32_t chunks = poolinfo.getTotalIps()/total_threads;
- portion.m_client_start = fi.m_clients_ip_start + thread_id*clients_chunk + dual_if_mask;
- portion.m_client_end = portion.m_client_start + clients_chunk -1 ;
+ assert(chunks>0);
- portion.m_server_start = fi.m_servers_ip_start + thread_id*servers_chunk +dual_if_mask;
- portion.m_server_end = portion.m_server_start + servers_chunk -1;
+ uint32_t dual_if_mask=(dual_port_id*poolinfo.getDualMask());
+
+ portion.m_ip_start = poolinfo.get_ip_start() + thread_id*chunks + dual_if_mask;
+ portion.m_ip_end = portion.m_ip_start + chunks -1 ;
}
diff --git a/src/tuple_gen.h b/src/tuple_gen.h
index 96b9b01a..fb856538 100755
--- a/src/tuple_gen.h
+++ b/src/tuple_gen.h
@@ -1,8 +1,9 @@
#ifndef TUPLE_GEN_H_
#define TUPLE_GEN_H_
+
/*
- Wenxian Li
-
+ Wenxian Li
+
Cisco Systems, Inc.
*/
@@ -22,7 +23,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-
#include <stdio.h>
#include <stdint.h>
#include <string.h>
@@ -39,9 +39,14 @@ limitations under the License.
#include <yaml-cpp/yaml.h>
+#include <random>
+
+
+
/*
* Class that handle the client info
*/
+#define MAX_CLIENTS 1000000
#define MAX_PORT (64000)
#define MIN_PORT (1024)
#define ILLEGAL_PORT (0)
@@ -52,6 +57,8 @@ limitations under the License.
/*FIXME*/
#define VLAN_SIZE (2)
+#define FOREACH(vector) for(int i=0;i<vector.size();i++)
+
/* Client distribution */
@@ -62,14 +69,21 @@ typedef enum {
cdMAX_DIST = 3
} IP_DIST_t ;
+#define INUSED 0
+#define UNUSED 1
typedef struct mac_addr_align_ {
public:
uint8_t mac[6];
uint8_t inused;
uint8_t pad;
} mac_addr_align_t;
-#define INUSED 0
-#define UNUSED 1
+
+typedef struct mac_mapping_ {
+ mac_addr_align_t mac;
+ uint32_t ip;
+} mac_mapping_t;
+
+
/* For type 1, we generator port by maintaining a 64K bit array for each port.
* In this case, we cannot support large number of clients due to memory exhausted.
@@ -85,39 +99,31 @@ public:
#define TYPE2 1
#define MAX_TYPE 3
-class CClientInfoBase {
+
+class CIpInfoBase {
public:
+ virtual mac_addr_align_t* get_mac() { return NULL;}
+ virtual void set_mac(mac_addr_align_t*){;}
virtual uint16_t get_new_free_port() = 0;
virtual void return_port(uint16_t a) = 0;
virtual void return_all_ports() = 0;
- virtual bool is_client_available() = 0;
- virtual mac_addr_align_t* get_mac_addr() = 0;
+ uint32_t get_ip() {
+ return m_ip;
+ }
+ void set_ip(uint32_t ip) {
+ m_ip = ip;
+ }
+ public:
+ uint32_t m_ip;
};
//CClientInfo for large amount of clients support
-class CClientInfoL : public CClientInfoBase {
- mac_addr_align_t mac;
+class CIpInfoL : public CIpInfoBase {
private:
uint16_t m_curr_port;
public:
- CClientInfoL(mac_addr_align_t* mac_adr) {
- m_curr_port = MIN_PORT;
- if (mac_adr) {
- mac = *mac_adr;
- mac.inused = INUSED;
- } else {
- memset(&mac, 0, sizeof(mac_addr_align_t));
- mac.inused = UNUSED;
- }
- }
-
- CClientInfoL() {
+ CIpInfoL() {
m_curr_port = MIN_PORT;
- memset(&mac, 0, sizeof(mac_addr_align_t));
- mac.inused = INUSED;
- }
- mac_addr_align_t* get_mac_addr() {
- return &mac;
}
uint16_t get_new_free_port() {
if (m_curr_port>MAX_PORT) {
@@ -132,22 +138,13 @@ class CClientInfoL : public CClientInfoBase {
void return_all_ports() {
m_curr_port = MIN_PORT;
}
-
- bool is_client_available() {
- if (mac.inused == INUSED) {
- return true;
- } else {
- return false;
- }
- }
};
-class CClientInfo : public CClientInfoBase {
+class CIpInfo : public CIpInfoBase {
private:
std::bitset<MAX_PORT> m_bitmap_port;
uint16_t m_head_port;
- mac_addr_align_t mac;
friend class CClientInfoUT;
private:
@@ -200,27 +197,9 @@ class CClientInfo : public CClientInfoBase {
public:
- CClientInfo() {
+ CIpInfo() {
m_head_port = MIN_PORT;
m_bitmap_port.reset();
- memset(&mac, 0, sizeof(mac_addr_align_t));
- mac.inused = INUSED;
- }
- CClientInfo(mac_addr_align_t* mac_info) {
- m_head_port = MIN_PORT;
- m_bitmap_port.reset();
- if (mac_info) {
- mac = *mac_info;
- mac.inused = INUSED;
- } else {
- memset(&mac, 0, sizeof(mac_addr_align_t));
- mac.inused = UNUSED;
- }
-
- }
-
- mac_addr_align_t* get_mac_addr() {
- return &mac;
}
uint16_t get_new_free_port() {
@@ -251,30 +230,109 @@ class CClientInfo : public CClientInfoBase {
m_head_port = MIN_PORT;
m_bitmap_port.reset();
}
- bool is_client_available() {
- if (mac.inused == INUSED) {
- return true;
+};
+
+class CClientInfo : public CIpInfo {
+ public:
+ CClientInfo (bool has_mac) {
+ if (has_mac==true) {
+ m_mac = new mac_addr_align_t();
+ } else {
+ m_mac = NULL;
+ }
+ }
+ CClientInfo () {
+ m_mac = NULL;
+ }
+
+ mac_addr_align_t* get_mac() {
+ return m_mac;
+ }
+ void set_mac(mac_addr_align_t *mac) {
+ memcpy(m_mac, mac, sizeof(mac_addr_align_t));
+ }
+ ~CClientInfo() {
+ if (m_mac!=NULL){
+ delete m_mac;
+ }
+ }
+ private:
+ mac_addr_align_t *m_mac;
+};
+
+class CClientInfoL : public CIpInfoL {
+public:
+ CClientInfoL (bool has_mac) {
+ if (has_mac==true) {
+ m_mac = new mac_addr_align_t();
} else {
- return false;
+ m_mac = NULL;
}
}
+ CClientInfoL () {
+ m_mac = NULL;
+ }
+
+ mac_addr_align_t* get_mac() {
+ return m_mac;
+ }
+ void set_mac(mac_addr_align_t *mac) {
+ memcpy(m_mac, mac, sizeof(mac_addr_align_t));
+ }
+ ~CClientInfoL() {
+ if (m_mac!=NULL) {
+ delete m_mac;
+ }
+ }
+private:
+ mac_addr_align_t *m_mac;
+};
+class CServerInfo : public CIpInfo {
+ ;
};
+class CServerInfoL : public CIpInfoL {
+ ;
+};
+
+
class CTupleBase {
public:
+ CTupleBase() {
+ m_client_mac.inused = UNUSED;
+ }
uint32_t getClient() {
return m_client_ip;
}
void setClient(uint32_t ip) {
m_client_ip = ip;
}
+ uint32_t getClientId() {
+ return m_client_idx;
+ }
+ void setClientId(uint32_t id) {
+ m_client_idx = id;
+ }
+
uint32_t getServer(){
return m_server_ip;
}
void setServer(uint32_t ip) {
m_server_ip = ip;
}
+ uint32_t getServerId(){
+ return m_server_idx;
+ }
+ void setServerId(uint32_t id) {
+ m_server_idx = id;
+ }
+ uint16_t getServerPort() {
+ return m_server_port;
+ }
+ void setServerPort(uint16_t port) {
+ m_server_port = port;
+ }
uint16_t getClientPort() {
return m_client_port;
}
@@ -285,16 +343,21 @@ public:
return &m_client_mac;
}
void setClientMac(mac_addr_align_t* mac_info) {
- memcpy(&m_client_mac, mac_info, sizeof(mac_addr_align_t));
+ if (mac_info != NULL) {
+ memcpy(&m_client_mac, mac_info, sizeof(mac_addr_align_t));
+ m_client_mac.inused = INUSED;
+ } else {
+ m_client_mac.inused = UNUSED;
+ }
}
private:
uint32_t m_client_ip;
+ uint32_t m_client_idx;
uint32_t m_server_ip;
- uint16_t m_client_port;
- uint16_t pad1;
- uint32_t pad2;
+ uint32_t m_server_idx;
mac_addr_align_t m_client_mac;
- uint32_t pad3[3];
+ uint16_t m_client_port;
+ uint16_t m_server_port;
};
@@ -304,43 +367,259 @@ mac_addr_align_t * get_mac_addr_by_ip(CFlowGenList *fl_list,
uint32_t ip);
bool is_mac_info_conf(CFlowGenList *fl_list);
-/* generate for each template */
-class CTupleGeneratorSmart {
+class CIpPool {
+ public:
+ uint16_t GenerateOnePort(uint32_t idx) {
+ CIpInfoBase* ip_info = m_ip_info[idx];
+ uint16_t port;
+ port = ip_info->get_new_free_port();
+
+ //printf(" alloc extra %x %d \n",c_ip,port);
+ if (port==ILLEGAL_PORT) {
+ m_port_allocation_error++;
+ }
+ m_active_alloc++;
+ return (port);
+ }
+ bool is_valid_ip(uint32_t ip){
+ CIpInfoBase* ip_front = m_ip_info.front();
+ CIpInfoBase* ip_back = m_ip_info.back();
+ if ((ip>=ip_front->get_ip()) &&
+ (ip<=ip_back->get_ip())) {
+ return(true);
+ }
+ printf("invalid ip:%x, min_ip:%x, max_ip:%x, this:%x\n",
+ ip, ip_front->get_ip(),
+ ip_back->get_ip(),this);
+ return(false);
+ }
+
+ uint32_t get_curr_ip() {
+ return m_ip_info[m_cur_idx]->get_ip();
+ }
+ uint32_t get_ip(uint32_t idx) {
+ return m_ip_info[idx]->get_ip();
+ }
+ CIpInfoBase* get_ip_info_by_idx(uint32_t idx) {
+ return m_ip_info[idx];
+ }
+
+ void inc_cur_idx() {
+ switch (m_dist) {
+ case cdRANDOM_DIST:
+ m_cur_idx = get_random_idx();
+ break;
+ case cdSEQ_DIST :
+ default:
+ m_cur_idx++;
+ if (m_cur_idx >= m_ip_info.size())
+ m_cur_idx = 0;
+ }
+ }
+ //return a valid client idx in this pool
+ uint32_t generate_ip() {
+ uint32_t res_idx = m_cur_idx;
+ inc_cur_idx();
+ return res_idx;
+ }
+ void set_dist(IP_DIST_t dist) {
+ if (dist>=cdMAX_DIST) {
+ m_dist = cdSEQ_DIST;
+ } else {
+ m_dist = dist;
+ }
+ }
+ void Delete() {
+ FOREACH(m_ip_info) {
+ delete m_ip_info[i];
+ }
+ m_ip_info.clear();
+ }
+ uint32_t get_total_ips() {
+ return m_ip_info.size();
+ }
+ void return_all_ports() {
+ FOREACH(m_ip_info) {
+ m_ip_info[i]->return_all_ports();
+ }
+ }
+ void FreePort(uint32_t id, uint16_t port) {
+ // assert(id<m_ip_info.size());
+ m_active_alloc--;
+ CIpInfoBase* client = m_ip_info[id];
+ client->return_port(port);
+ }
+
+ mac_addr_align_t * get_curr_mac() {
+ return m_ip_info[m_cur_idx]->get_mac();
+ }
+ mac_addr_align_t *get_mac(uint32_t idx) {
+ return m_ip_info[idx]->get_mac();
+ }
+
+ public:
+ std::vector<CIpInfoBase*> m_ip_info;
+ IP_DIST_t m_dist;
+ uint32_t m_cur_idx;
+ uint32_t m_active_alloc;
+ uint32_t m_port_allocation_error;
+ std::default_random_engine generator;
+ std::uniform_int_distribution<int> *rand_dis;
+ void CreateBase() {
+ switch (m_dist) {
+ case cdRANDOM_DIST:
+ rand_dis = new std::uniform_int_distribution<int>
+ (0,get_total_ips()-1);
+ break;
+ default:
+ break;
+ }
+ m_cur_idx = 0;
+ m_active_alloc = 0;
+ m_port_allocation_error = 0;
+ }
+ uint32_t get_random_idx() {
+ uint32_t res = (*rand_dis)(generator);
+ return (res);
+ }
+ bool IsFreePortRequired(void){
+ return(true);
+ }
+
+
+};
+
+class CClientPool : public CIpPool {
public:
- /* simple tuple genertion for one low*/
- void GenerateTuple(CTupleBase & tuple);
- /*
- * allocate base tuple with n exta ports, used by bundels SIP
- * for example need to allocat 3 ports for this C/S
- */
- void GenerateTupleEx(CTupleBase & tuple,uint8_t extra_ports_no,
- uint16_t * extra_ports);
+ void GenerateTuple(CTupleBase & tuple) {
+ uint32_t idx = generate_ip();
+ tuple.setClientId(idx);
+ tuple.setClient(get_ip(idx));
+ tuple.setClientMac(get_mac(idx));
+ tuple.setClientPort(GenerateOnePort(idx));
+ }
+ uint16_t get_tcp_aging() {
+ return m_tcp_aging;
+ }
+ uint16_t get_udp_aging() {
+ return m_udp_aging;
+ }
+ void Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps,
+ CFlowGenList* fl_list,
+ bool has_mac_map,
+ uint16_t tcp_aging,
+ uint16_t udp_aging);
+public:
+ uint16_t m_tcp_aging;
+ uint16_t m_udp_aging;
+};
+
+class CServerPoolBase {
+ public:
+ virtual void GenerateTuple(CTupleBase& tuple) = 0;
+ virtual uint16_t GenerateOnePort(uint32_t idx) = 0;
+ virtual void Delete() = 0;
+ virtual uint32_t get_total_ips()=0;
+ virtual void Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps) = 0;
+
+};
- /* free client port */
- void FreePort(uint32_t c_ip,
- uint16_t port){
- //printf(" free %x %d \n",c_ip,port);
- m_active_alloc--;
- CClientInfoBase* client = get_client_by_ip(c_ip);
- client->return_port(port);
+class CServerPoolSimple : public CServerPoolBase {
+public:
+ void Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps) {
+ m_max_server_ip = max_ip;
+ m_min_server_ip = min_ip;
+ m_cur_server_ip = min_ip;
+ }
+ void Delete() {
+ return ;
+ }
+ void GenerateTuple(CTupleBase& tuple) {
+ tuple.setServer(m_cur_server_ip);
+ m_cur_server_ip ++;
+ if (m_cur_server_ip > m_max_server_ip) {
+ m_cur_server_ip = m_min_server_ip;
+ }
+ }
+ uint16_t GenerateOnePort(uint32_t idx) {
+ // do nothing
+ return 0;
+ }
+ uint32_t get_total_ips() {
+ return (m_max_server_ip-m_min_server_ip+1);
}
+private:
+ uint32_t m_max_server_ip;
+ uint32_t m_min_server_ip;
+ uint32_t m_cur_server_ip;
+};
- /* return true if this type of generator require to free resource */
- bool IsFreePortRequired(void){
- return(true);
+class CServerPool : public CServerPoolBase {
+public:
+ CIpPool *gen;
+ void GenerateTuple(CTupleBase & tuple) {
+ uint32_t idx = gen->generate_ip();
+ tuple.setServerId(idx);
+ tuple.setServer(gen->get_ip(idx));
+ }
+ uint16_t GenerateOnePort(uint32_t idx) {
+ return gen->GenerateOnePort(idx);
+ }
+ void Create(IP_DIST_t dist_value,
+ uint32_t min_ip,
+ uint32_t max_ip,
+ double l_flow,
+ double t_cps);
+
+ void Delete() {
+ if (gen!=NULL) {
+ gen->Delete();
+ delete gen;
+ }
+ }
+ uint32_t get_total_ips() {
+ return gen->m_ip_info.size();
}
+};
+/* generate for each template */
+class CTupleGeneratorSmart {
+public:
/* return the active socket */
uint32_t ActiveSockets(void){
- return (m_active_alloc);
+ uint32_t total_active_alloc = 0;
+ FOREACH(m_client_pool) {
+ total_active_alloc += m_client_pool[i]->m_active_alloc;
+ }
+ return (total_active_alloc);
}
uint32_t getTotalClients(void){
- return (m_max_client_ip -m_min_client_ip +1);
+ uint32_t total_clients = 0;
+ FOREACH(m_client_pool) {
+ total_clients += m_client_pool[i]->get_total_ips();
+ }
+ return (total_clients);
}
uint32_t getTotalServers(void){
- return (m_max_server_ip -m_min_server_ip +1);
+ uint32_t total_servers = 0;
+ FOREACH(m_server_pool) {
+ total_servers += m_server_pool[i]->get_total_ips();
+ }
+ return total_servers;
}
uint32_t SocketsPerClient(void){
@@ -351,136 +630,103 @@ public:
return (SocketsPerClient() * getTotalClients());
}
+
+ void FreePort(uint8_t pool_idx, uint32_t id, uint16_t port) {
+ get_client_pool(pool_idx)->FreePort(id, port);
+ }
+
+ bool IsFreePortRequired(uint8_t pool_idx){
+ return(get_client_pool(pool_idx)->IsFreePortRequired());
+ }
+ uint16_t get_tcp_aging(uint8_t pool_idx) {
+ return (get_client_pool(pool_idx)->get_tcp_aging());
+ }
+ uint16_t get_udp_aging(uint8_t pool_idx) {
+ return (get_client_pool(pool_idx)->get_udp_aging());
+ }
public:
CTupleGeneratorSmart(){
m_was_init=false;
- m_client_dist = cdSEQ_DIST;
+ has_mac_mapping = false;
}
bool Create(uint32_t _id,
- uint32_t thread_id,
- IP_DIST_t dist,
- uint32_t min_client,
- uint32_t max_client,
- uint32_t min_server,
- uint32_t max_server,
- double longest_flow,
- double total_cps,
- CFlowGenList * fl_list = NULL);
+ uint32_t thread_id, bool has_mac=false);
void Delete();
- void Dump(FILE *fd);
-
- void SetClientDist(IP_DIST_t dist) {
- m_client_dist = dist;
- }
-
- IP_DIST_t GetClientDist() {
- return (m_client_dist);
- }
-
inline uint32_t GetThreadId(){
return ( m_thread_id );
}
- bool is_valid_client(uint32_t c_ip){
- if ((c_ip>=m_min_client_ip) && (c_ip<=m_max_client_ip)) {
- return(true);
- }
- printf("invalid client ip:%x, min_ip:%x, max_ip:%x\n",
- c_ip, m_min_client_ip, m_max_client_ip);
- return(false);
- }
-
- CClientInfoBase* get_client_by_ip(uint32_t c_ip){
- BP_ASSERT( is_valid_client(c_ip) );
- return m_client.at(c_ip-m_min_client_ip);
- }
-
- bool is_client_available (uint32_t c_ip) {
- CClientInfoBase* client = get_client_by_ip(c_ip);
- if (client) {
- return client->is_client_available();
- }
- return false;
- }
-
- uint16_t GenerateOneClientPort(uint32_t c_ip) {
- CClientInfoBase* client = get_client_by_ip(c_ip);
- uint16_t port;
- port = client->get_new_free_port();
-
- //printf(" alloc extra %x %d \n",c_ip,port);
- if (port==ILLEGAL_PORT) {
- m_port_allocation_error++;
- }
- m_active_alloc++;
- return (port);
- }
-
uint32_t getErrorAllocationCounter(){
- return ( m_port_allocation_error );
+ uint32_t total_alloc_error = 0;
+ FOREACH(m_client_pool) {
+ total_alloc_error += m_client_pool[i]->m_port_allocation_error;
+ }
+ return (total_alloc_error);
+ }
+
+ bool add_client_pool(IP_DIST_t client_dist,
+ uint32_t min_client,
+ uint32_t max_client,
+ double l_flow,
+ double t_cps,
+ CFlowGenList* fl_list,
+ uint16_t tcp_aging,
+ uint16_t udp_aging);
+ bool add_server_pool(IP_DIST_t server_dist,
+ uint32_t min_server,
+ uint32_t max_server,
+ double l_flow,
+ double t_cps,
+ bool is_bundling);
+ CClientPool* get_client_pool(uint8_t idx) {
+ return m_client_pool[idx];
+ }
+ uint8_t get_client_pool_num() {
+ return m_client_pool.size();
+ }
+ uint8_t get_server_pool_num() {
+ return m_server_pool.size();
+ }
+ CServerPoolBase* get_server_pool(uint8_t idx) {
+ return m_server_pool[idx];
}
-
-private:
- void return_all_client_ports();
-
-
- void Generate_client_server();
-
-
private:
- std::vector<CClientInfoBase*> m_client;
-
uint32_t m_id;
- bool m_was_generated;
- bool m_was_init;
-
- IP_DIST_t m_client_dist;
-
- uint32_t m_cur_server_ip;
- uint32_t m_cur_client_ip;
- // min-max client ip +1 and get back
- uint32_t m_min_client_ip;
- uint32_t m_max_client_ip;
-
- // min max server ip ( random )
- uint32_t m_min_server_ip;
- uint32_t m_max_server_ip;
-
uint32_t m_thread_id;
-
- // result of the generator FIXME need to clean this
- uint32_t m_client_ip;
- uint32_t m_result_client_ip;
- uint32_t m_result_server_ip;
- uint32_t m_active_alloc;
- mac_addr_align_t m_result_client_mac;
- uint16_t m_result_client_port;
-
- uint32_t m_port_allocation_error;
-
+ std::vector<CClientPool*> m_client_pool;
+ std::vector<CServerPoolBase*> m_server_pool;
+ bool m_was_init;
+ bool has_mac_mapping;
};
-
class CTupleTemplateGeneratorSmart {
public:
/* simple tuple genertion for one low*/
void GenerateTuple(CTupleBase & tuple){
if (m_w==1) {
/* new client each tuple generate */
- m_gen->GenerateTuple(tuple);
- m_cache_client_ip=tuple.getClient();
+ m_client_gen->GenerateTuple(tuple);
+ m_server_gen->GenerateTuple(tuple);
+ m_cache_client_ip = tuple.getClient();
+ m_cache_client_idx = tuple.getClientId();
}else{
if (m_cnt==0) {
- m_gen->GenerateTuple(tuple);
+ m_client_gen->GenerateTuple(tuple);
+ m_server_gen->GenerateTuple(tuple);
m_cache_client_ip = tuple.getClient();
+ m_cache_client_idx = tuple.getClientId();
m_cache_server_ip = tuple.getServer();
+ m_cache_server_idx = tuple.getServerId();
}else{
tuple.setServer(m_cache_server_ip);
+ tuple.setServerId(m_cache_server_idx);
tuple.setClient(m_cache_client_ip);
- tuple.setClientPort( m_gen->GenerateOneClientPort(m_cache_client_ip));
+ tuple.setClientId(m_cache_client_idx);
+ tuple.setClientPort(
+ m_client_gen->GenerateOnePort(m_cache_client_idx));
}
m_cnt++;
if (m_cnt>=m_w) {
@@ -493,7 +739,7 @@ public:
}
uint16_t GenerateOneSourcePort(){
- return ( m_gen->GenerateOneClientPort(m_cache_client_ip) );
+ return ( m_client_gen->GenerateOnePort(m_cache_client_idx) );
}
inline uint32_t GetThreadId(){
@@ -502,12 +748,13 @@ public:
public:
- bool Create( CTupleGeneratorSmart * gen
- ){
+ bool Create( CTupleGeneratorSmart * gen,uint8_t c_pool,uint8_t s_pool){
m_gen=gen;
m_is_single_server=false;
m_server_ip=0;
SetW(1);
+ m_client_gen = gen->get_client_pool(c_pool);
+ m_server_gen = gen->get_server_pool(s_pool);
return (true);
}
@@ -535,15 +782,21 @@ public:
return (m_is_single_server);
}
+ CTupleGeneratorSmart * get_gen() {
+ return m_gen;
+ }
private:
CTupleGeneratorSmart * m_gen;
- bool m_is_single_server;
+ CClientPool * m_client_gen;
+ CServerPoolBase * m_server_gen;
uint16_t m_w;
uint16_t m_cnt;
uint32_t m_server_ip;
uint32_t m_cache_client_ip;
+ uint32_t m_cache_client_idx;
uint32_t m_cache_server_ip;
-
+ uint32_t m_cache_server_idx;
+ bool m_is_single_server;
};
@@ -559,61 +812,70 @@ private:
- dual_interface_mask : 1.0.0.0 // each dual ports will add this to the pool of clients
#endif
-struct CTupleGenYamlInfo {
- CTupleGenYamlInfo(){
- m_client_dist=cdSEQ_DIST;
- m_clients_ip_start =0x11000000;
- m_clients_ip_end =0x21000000;
-
- m_servers_ip_start = 0x30000000;
- m_servers_ip_end = 0x40000000;
- m_number_of_clients_per_gb=10;
- m_min_clients=100;
- m_dual_interface_mask=0x10000000;
- m_tcp_aging_sec=2;
- m_udp_aging_sec=5;
- }
-
- IP_DIST_t m_client_dist;
- uint32_t m_clients_ip_start;
- uint32_t m_clients_ip_end;
-
- uint32_t m_servers_ip_start;
- uint32_t m_servers_ip_end;
+struct CTupleGenPoolYaml {
+ IP_DIST_t m_dist;
+ uint32_t m_ip_start;
+ uint32_t m_ip_end;
uint32_t m_number_of_clients_per_gb;
uint32_t m_min_clients;
uint32_t m_dual_interface_mask;
uint16_t m_tcp_aging_sec; /* 0 means there is no aging */
uint16_t m_udp_aging_sec;
+ std::string m_name;
+ bool m_is_bundling;
+ public:
+ uint32_t getTotalIps(void){
+ return ( m_ip_end-m_ip_start+1);
+ }
+ uint32_t getDualMask() {
+ return m_dual_interface_mask;
+ }
+ uint32_t get_ip_start() {
+ return m_ip_start;
+ }
+ bool is_valid(uint32_t num_threads,bool is_plugins);
+ void Dump(FILE *fd);
+};
+
+struct CTupleGenYamlInfo {
+ std::vector<CTupleGenPoolYaml> m_client_pool;
+ std::vector<CTupleGenPoolYaml> m_server_pool;
+
public:
- void Dump(FILE *fd);
- uint32_t getTotalClients(void){
- return ( m_clients_ip_end-m_clients_ip_start+1);
+ bool is_valid(uint32_t num_threads,bool is_plugins);
+ uint8_t get_server_pool_id(std::string name){
+ for (uint8_t i=0;i<m_server_pool.size();i++) {
+ if (m_server_pool[i].m_name==name)
+ return i;
+ }
+ return 0;
}
- uint32_t getTotalServers(void){
- return ( m_servers_ip_end-m_servers_ip_start+1);
+
+ uint8_t get_client_pool_id(std::string name){
+ for (uint8_t i=0;i<m_client_pool.size();i++) {
+ if (m_client_pool[i].m_name==name)
+ return i;
+ }
+ return 0;
}
+};
- bool is_valid(uint32_t num_threads,bool is_plugins);
-};
+void operator >> (const YAML::Node& node, CTupleGenPoolYaml & fi) ;
void operator >> (const YAML::Node& node, CTupleGenYamlInfo & fi) ;
-struct CClientPortion {
- uint32_t m_client_start;
- uint32_t m_client_end;
- uint32_t m_server_start;
- uint32_t m_server_end;
+struct CIpPortion {
+ uint32_t m_ip_start;
+ uint32_t m_ip_end;
};
-
-void split_clients(uint32_t thread_id,
- uint32_t total_threads,
+void split_ips(uint32_t thread_id,
+ uint32_t total_threads,
uint32_t dual_port_id,
- CTupleGenYamlInfo & fi,
- CClientPortion & portion);
+ CTupleGenPoolYaml& poolinfo,
+ CIpPortion & portion);