summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xVERSION3
-rwxr-xr-xlinux_dpdk/ws_main.py18
-rw-r--r--scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py3
-rwxr-xr-xscripts/automation/regression/misc_methods.py8
-rwxr-xr-xscripts/automation/regression/outer_packages.py4
-rw-r--r--scripts/automation/regression/setups/trex09/config.yaml31
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml26
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_client_pkg_test.py31
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py38
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py38
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py294
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py32
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py31
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py61
-rwxr-xr-xscripts/automation/trex_control_plane/client/outer_packages.py31
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_client.py1209
-rw-r--r--scripts/automation/trex_control_plane/doc/_templates/layout.html17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/conf.py2
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/_templates/layout.html17
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/doc_stl/api/client_code.rst36
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst14
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/doc_stl/api/index.rst0
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst8
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst10
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/conf.py2
-rw-r--r--scripts/automation/trex_control_plane/doc_stl/index.rst12
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py4
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py15
-rwxr-xr-xscripts/automation/trex_control_plane/stf/examples/stf_example.py54
-rwxr-xr-xscripts/automation/trex_control_plane/stf/examples/stf_path.py4
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py (renamed from scripts/automation/trex_control_plane/stf/CCustomLogger.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py (renamed from scripts/automation/trex_control_plane/stf/__init__.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py (renamed from scripts/automation/trex_control_plane/stf/external_packages.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py (renamed from scripts/automation/trex_control_plane/stf/general_utils.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py (renamed from scripts/automation/trex_control_plane/stf/outer_packages.py)10
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py (renamed from scripts/automation/trex_control_plane/stf/text_opts.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py (renamed from scripts/automation/trex_control_plane/stf/trex_client.py)62
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py (renamed from scripts/automation/trex_control_plane/stf/trex_daemon_server.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py (renamed from scripts/automation/trex_control_plane/stf/trex_exceptions.py)0
-rw-r--r--scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py (renamed from scripts/automation/trex_control_plane/stf/trex_status.py)0
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py (renamed from scripts/automation/trex_control_plane/stf/trex_status_e.py)0
-rw-r--r--scripts/automation/trex_control_plane/stl/console/stl_path.py7
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py10
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix.py19
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py11
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_path.py7
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_profile.py3
-rw-r--r--scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py3
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py293
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py2
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py25
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py185
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py80
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py5
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py43
-rwxr-xr-x[-rw-r--r--]scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py117
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py2
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py2
-rw-r--r--scripts/external_libs/texttable-0.8.4/texttable.py5
-rwxr-xr-xscripts/find_python.sh65
-rw-r--r--scripts/ko/4.2.3-300.fc23.x86_64/igb_uio.kobin0 -> 249256 bytes
-rwxr-xr-xscripts/run_functional_tests46
-rwxr-xr-xscripts/run_regression2
-rwxr-xr-xscripts/stl-sim18
-rw-r--r--scripts/stl/flow_stats.py6
-rw-r--r--scripts/stl/udp_1pkt_pcap.py5
-rw-r--r--scripts/stl/udp_1pkt_simple_burst.py24
-rw-r--r--scripts/stl/udp_3pkt_pcap.py9
-rw-r--r--scripts/stl/yaml/imix_1pkt_vm_minus.yaml3
-rwxr-xr-xscripts/t-rex-648
-rwxr-xr-xscripts/trex-console18
-rwxr-xr-xsrc/bp_sim.h411
-rwxr-xr-xsrc/common/Network/Packet/EthernetHeader.h3
-rwxr-xr-xsrc/common/basic_utils.cpp23
-rwxr-xr-xsrc/common/basic_utils.h4
-rw-r--r--src/flow_stat.cpp312
-rw-r--r--src/flow_stat.h21
-rw-r--r--src/flow_stat_parser.cpp67
-rw-r--r--src/flow_stat_parser.h36
-rw-r--r--src/gtest/trex_stateless_gtest.cpp2
-rw-r--r--src/internal_api/trex_platform_api.h5
-rw-r--r--src/latency.h1
-rwxr-xr-xsrc/main.cpp4
-rw-r--r--src/main_dpdk.cpp69
-rw-r--r--src/main_dpdk.h1
-rw-r--r--src/rpc-server/commands/trex_rpc_cmd_general.cpp44
-rw-r--r--src/rpc-server/commands/trex_rpc_cmd_stream.cpp27
-rw-r--r--src/rpc-server/commands/trex_rpc_cmds.h76
-rw-r--r--src/rpc-server/trex_rpc_cmd.cpp46
-rw-r--r--src/rpc-server/trex_rpc_cmd_api.h39
-rw-r--r--src/rpc-server/trex_rpc_cmds_table.cpp3
-rw-r--r--src/rpc-server/trex_rpc_exception_api.h10
-rw-r--r--src/sim/trex_sim_stateless.cpp1
-rw-r--r--src/stateless/cp/trex_api_class.h110
-rw-r--r--src/stateless/cp/trex_exception.h41
-rw-r--r--src/stateless/cp/trex_stateless.cpp3
-rw-r--r--src/stateless/cp/trex_stateless.h39
-rw-r--r--src/stateless/cp/trex_stateless_port.cpp40
-rw-r--r--src/stateless/cp/trex_stateless_port.h12
-rw-r--r--src/stateless/cp/trex_stream.cpp9
-rw-r--r--src/stateless/cp/trex_streams_compiler.cpp6
-rw-r--r--src/stateless/dp/trex_stateless_dp_core.cpp1
-rw-r--r--src/stateless/rx/trex_stateless_rx_core.cpp43
-rw-r--r--src/stateless/rx/trex_stateless_rx_core.h8
104 files changed, 2433 insertions, 2232 deletions
diff --git a/VERSION b/VERSION
index a08a8dd4..b893cb46 100755
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,5 @@
-v1.98
+v1.99
+
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index 6310a8c2..b1e1f6bd 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -963,17 +963,25 @@ def release(bld, custom_dir = None):
os.system("cp -rv %s %s " %(src_file,dest_file));
os.system("chmod 755 %s " %(dest_file));
+ rel=get_build_num ()
+
# create client package
os.system('mkdir -p %s/trex_client/external_libs' % exec_p)
- os.system('touch %s/trex_client/__init__.py' % exec_p)
for ext_lib in client_external_libs:
os.system('cp ../scripts/external_libs/%s %s/trex_client/external_libs/ -r' % (ext_lib, exec_p))
os.system('cp ../scripts/automation/trex_control_plane/stf %s/trex_client/ -r' % exec_p)
- os.system('cp ../scripts/automation/trex_control_plane/stl/trex_stl_lib %s/trex_client/stl -r' % exec_p)
- shutil.make_archive(os.path.join(exec_p, 'trex_client'), 'gztar', exec_p, 'trex_client')
+ os.system('cp ../scripts/automation/trex_control_plane/stl %s/trex_client/ -r' % exec_p)
+ with open('%s/trex_client/stl/examples/stl_path.py' % exec_p) as f:
+ stl_path_content = f.read()
+ if 'STL_PROFILES_PATH' not in stl_path_content:
+ raise Exception('Could not find STL_PROFILES_PATH in stl/examples/stl_path.py')
+ stl_path_content = re.sub('STL_PROFILES_PATH.*?\n', "STL_PROFILES_PATH = os.path.join(os.pardir, 'profiles')\n", stl_path_content)
+ with open('%s/trex_client/stl/examples/stl_path.py' % exec_p, 'w') as f:
+ f.write(stl_path_content)
+ os.system('cp ../scripts/stl %s/trex_client/stl/profiles -r' % exec_p)
+ shutil.make_archive(os.path.join(exec_p, 'trex_client_%s' % rel), 'gztar', exec_p, 'trex_client')
os.system('rm -r %s/trex_client' % exec_p)
- rel=get_build_num ()
os.system('cd %s/..;tar --exclude="*.pyc" -zcvf %s/%s.tar.gz %s' %(exec_p,os.getcwd(),rel,rel))
os.system("mv %s/%s.tar.gz %s" % (os.getcwd(),rel,exec_p));
@@ -1007,7 +1015,7 @@ def publish_ext(bld, custom_source = None):
print cmd
os.system( cmd )
os.system("ssh -i %s -l %s %s 'cd %s/release/;rm be_latest; ln -P %s be_latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
- #os.system("ssh -i %s -l %s %s 'cd %s/release/;rm latest; ln -P %s latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
+ os.system("ssh -i %s -l %s %s 'cd %s/release/;rm latest; ln -P %s latest' " %(Env().get_trex_ex_web_key(),Env().get_trex_ex_web_user(),Env().get_trex_ex_web_srv(),Env().get_trex_ex_web_path(),release_name))
# publish latest passed regression package (or custom commit from --publish_commit option) as be_latest to trex-tgn.cisco.com and internal wiki
def publish_both(self):
diff --git a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
index a3fcd091..5d34e5df 100644
--- a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
+++ b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
@@ -238,7 +238,8 @@ class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
d= pkt_builder.get_vm_data()
except CTRexPacketBuildException as e:
- assert_equal(str(e), "[errcode:-11] 'variable my_valn_err does not exists '")
+ error=str(e)
+ assert_equal(error.find("[errcode:-11]"),0);
def test_simple_tuple_gen(self):
vm = STLScVmRaw( [ STLVmTupleGen(name="tuple"), # define tuple gen
diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py
index 54e3ba5d..6873622e 100755
--- a/scripts/automation/regression/misc_methods.py
+++ b/scripts/automation/regression/misc_methods.py
@@ -25,12 +25,12 @@ def mix_string (str):
# executes given command, returns tuple (return_code, stdout, stderr)
def run_command(cmd, background = False):
if background:
- print('Running command in background:', cmd)
+ print('Running command in background: %s' % cmd)
with open(os.devnull, 'w') as tempf:
subprocess.Popen(shlex.split(cmd), stdin=tempf, stdout=tempf, stderr=tempf)
return (None,)*3
else:
- print('Running command:', cmd)
+ print('Running command: %s' % cmd)
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if stdout:
@@ -42,8 +42,8 @@ def run_command(cmd, background = False):
return (proc.returncode, stdout, stderr)
-def run_remote_command(host, command_string, background = False):
- cmd = 'ssh -tt %s \'sudo sh -ec "%s"\'' % (host, command_string)
+def run_remote_command(host, command_string, background = False, timeout = 20):
+ cmd = 'ssh -tt %s \'sudo%s sh -ec "%s"\'' % (host, (' timeout %s' % timeout) if (timeout and not background) else '', command_string)
return run_command(cmd, background)
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
index bec9fe21..61ddc5cd 100755
--- a/scripts/automation/regression/outer_packages.py
+++ b/scripts/automation/regression/outer_packages.py
@@ -9,6 +9,7 @@ if not TREX_PATH or not os.path.isfile('%s/trex_daemon_server' % TREX_PATH):
TREX_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir))
PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs'))
PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane'))
+PATH_STF_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stf'))
PATH_STL_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stl'))
@@ -57,8 +58,9 @@ def import_module_list(modules_list):
def import_nightly_modules ():
sys.path.append(TREX_PATH)
- sys.path.append(PATH_TO_CTRL_PLANE)
+ #sys.path.append(PATH_TO_CTRL_PLANE)
sys.path.append(PATH_STL_API)
+ sys.path.append(PATH_STF_API)
import_module_list(NIGHTLY_MODULES)
diff --git a/scripts/automation/regression/setups/trex09/config.yaml b/scripts/automation/regression/setups/trex09/config.yaml
index 9820ce6e..585ca17a 100644
--- a/scripts/automation/regression/setups/trex09/config.yaml
+++ b/scripts/automation/regression/setups/trex09/config.yaml
@@ -35,33 +35,4 @@
trex:
hostname : csi-trex-09
cores : 2
-
-router:
- model : 1RU
- hostname : ASR1001_T-Rex
- ip_address : 10.56.199.247
- image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin
- #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin
- #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin
- line_password : lab
- en_password : lab
- mgmt_interface : GigabitEthernet0/0/0
- clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
- intf_masking : 255.255.255.0
- ipv6_mask : 64
- interfaces :
- - client :
- name : GigabitEthernet0/0/1
- src_mac_addr : 0000.0001.0000
- dest_mac_addr : 0000.0001.0000
- server :
- name : GigabitEthernet0/0/2
- src_mac_addr : 0000.0001.0000
- dest_mac_addr : 0000.0001.0000
- vrf_name : null
-
-tftp:
- hostname : ats-asr-srv-1
- ip_address : 10.56.128.23
- root_dir : /auto/avc-devtest/
- images_path : /images/1RU/
+ modes : ['loopback']
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index 28e287bf..e602ad1a 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -12,18 +12,20 @@ test_nbar_simple :
exp_max_latency : 1000
nbar_classification:
- rtp : 35.24
- http : 30.41
- oracle_sqlnet : 11.3
- ssl : 6.03
+ http : 32.58
+ rtp-audio : 21.21
+ oracle_sqlnet : 11.41
+ exchange : 11.22
+ rtp : 11.2
citrix : 5.65
- exchange : 4.99
+ rtsp : 2.87
dns : 1.96
smtp : 0.57
pop3 : 0.37
+ ssl : 0.28
sctp : 0.13
sip : 0.09
- unknown : 3.22
+ unknown : 0.45
test_rx_check :
multiplier : 13
@@ -109,15 +111,15 @@ test_static_routing_imix_asymmetric:
exp_latency : 1
test_ipv6_simple :
- multiplier : 18
- cores : 4
+ multiplier : 9
+ cores : 2
cpu_to_core_ratio : 30070000
cpu2core_custom_dev: YES
cpu2core_dev : 0.07
test_rx_check_sfr:
- multiplier : 15
+ multiplier : 10
cores : 3
rx_sample_rate : 16
# allow 0.03% errors, bad router
@@ -127,11 +129,11 @@ test_rx_check_http:
multiplier : 15000
cores : 1
rx_sample_rate : 16
- # allow 0.03% errors, bad router
+ # allow 0.03% errors, bad routerifconfig
error_tolerance : 0.03
test_rx_check_sfr_ipv6:
- multiplier : 15
+ multiplier : 10
cores : 3
rx_sample_rate : 16
# allow 0.03% errors, bad router
@@ -165,5 +167,5 @@ test_rx_check_http_negative:
test_jumbo:
- multiplier : 28
+ multiplier : 17
cores : 1
diff --git a/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..e2040e73
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+
+@attr('client_package')
+class CTRexClientPKG_Test(CTRexGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+ self.unzip_client_package()
+
+ def run_client_package_stf_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stf/examples',
+ '$PYTHON stf_example.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, _, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, stderr))
+
+ def test_client_python2(self):
+ self.run_client_package_stf_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stf_example(python_version = 'python3')
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 21f5d8aa..5a13e5ff 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -37,6 +37,7 @@ import threading
from tests_exceptions import *
from platform_cmd_link import *
import unittest
+from glob import glob
def setUpModule(module):
pass
@@ -224,13 +225,21 @@ class CTRexGeneral_Test(unittest.TestCase):
#trex_exp_gbps = trex_exp_rate/(10**9)
if check_latency:
- # check that max latency does not exceed 1 msec in regular setup or 100ms in VM
- allowed_latency = 9999999 if self.is_VM else 1000
+ # check that max latency does not exceed 1 msec
+ if self.configuration.trex['trex_name'] == '10.56.217.210': # temporary workaround for latency issue in kiwi02, remove it ASAP. http://trex-tgn.cisco.com/youtrack/issue/trex-194
+ allowed_latency = 8000
+ elif self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
if max(trex_res.get_max_latency().values()) > allowed_latency:
self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
- # check that avg latency does not exceed 1 msec in regular setup or 3ms in VM
- allowed_latency = 9999999 if self.is_VM else 1000
+ # check that avg latency does not exceed 1 msec
+ if self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
if max(trex_res.get_avg_latency().values()) > allowed_latency:
self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
@@ -253,6 +262,21 @@ class CTRexGeneral_Test(unittest.TestCase):
# e.args += ('T-Rex has crashed!')
# raise
+ def unzip_client_package(self):
+ client_pkg_files = glob('%s/trex_client*.tar.gz' % CTRexScenario.scripts_path)
+ if not len(client_pkg_files):
+ raise Exception('Could not find client package')
+ if len(client_pkg_files) > 1:
+ raise Exception('Found more than one client packages')
+ client_pkg_name = os.path.basename(client_pkg_files[0])
+ if not os.path.exists('%s/trex_client' % CTRexScenario.scripts_path):
+ print('\nUnzipping package')
+ return_code, _, stderr = misc_methods.run_command("sh -ec 'cd %s; tar -xzf %s'" % (CTRexScenario.scripts_path, client_pkg_name))
+ if return_code:
+ raise Exception('Could not untar the client package: %s' % stderr)
+ else:
+ print('\nClient package is untarred')
+
# We encountered error, don't fail the test immediately
def fail(self, reason = 'Unknown error'):
print 'Error: %s' % reason
@@ -291,14 +315,12 @@ class CTRexGeneral_Test(unittest.TestCase):
# def test_isInitialized(self):
# assert CTRexScenario.is_init == True
def tearDown(self):
- if not self.trex:
- return
- if not self.trex.is_idle():
+ if self.trex and not self.trex.is_idle():
print 'Warning: TRex is not idle at tearDown, trying to stop it.'
self.trex.force_kill(confirm = False)
if not self.skipping:
# print server logs of test run
- if CTRexScenario.server_logs:
+ if self.trex and CTRexScenario.server_logs:
try:
print termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<')
daemon_log = self.trex.get_trex_daemon_log()
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
index 43dea900..c93480c3 100755
--- a/scripts/automation/regression/stateful_tests/trex_imix_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
@@ -4,6 +4,7 @@ from CPlatform import CStaticRouteConfig
from tests_exceptions import *
#import sys
import time
+from nose.tools import nottest
class CTRexIMIX_Test(CTRexGeneral_Test):
"""This class defines the IMIX testcase of the T-Rex traffic generator"""
@@ -50,20 +51,18 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# the name intentionally not matches nose default pattern, including the test should be specified explicitly
def dummy(self):
- self.assertEqual(1, 2, 'boo')
- self.assertEqual(2, 2, 'boo')
- self.assertEqual(2, 3, 'boo')
- #print ''
- #print dir(self)
- #print locals()
- #print ''
- #print_r(unittest.TestCase)
- #print ''
- #print_r(self)
- print ''
- #print unittest.TestCase.shortDescription(self)
- #self.skip("I'm just a dummy test")
+ ret = self.trex.start_trex(
+ c = 1,
+ m = 1,
+ p = True,
+ nc = True,
+ d = 5,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000,
+ trex_development = True)
+ trex_res = self.trex.sample_to_run_finish()
+ print trex_res
def test_routing_imix (self):
# test initializtion
@@ -166,7 +165,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
self.check_CPU_benchmark(trex_res)
- def test_jumbo(self):
+ def test_jumbo(self, duration = 100):
if not self.is_loopback:
self.router.configure_basic_interfaces(mtu = 9216)
self.router.config_pbr(mode = "config")
@@ -179,7 +178,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
m = mult,
p = True,
nc = True,
- d = 100,
+ d = duration,
f = 'cap2/imix_9k.yaml',
l = 1000)
@@ -193,6 +192,15 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+ # don't include it to regular nose search
+ @nottest
+ def test_warm_up(self):
+ try:
+ self._testMethodName = 'test_jumbo'
+ self.test_jumbo(duration = 30)
+ except Exception as e:
+ print('Ignoring this error: %s' % e)
+
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
# remove nbar config here
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
new file mode 100644
index 00000000..01a90250
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -0,0 +1,294 @@
+#!/router/bin/python
+from stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+import glob
+
+
+def get_error_in_percentage (golden, value):
+ return abs(golden - value) / float(golden)
+
+def get_stl_profiles ():
+ profiles_path = os.path.join(CTRexScenario.scripts_path, 'stl/')
+ profiles = glob.glob(profiles_path + "/*.py") + glob.glob(profiles_path + "yaml/*.yaml")
+
+ return profiles
+
+
+class STLClient_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+
+ if self.is_virt_nics:
+ self.percentage = 5
+ self.pps = 500
+ else:
+ self.percentage = 50
+ self.pps = 50000
+
+ # strict mode is only for 'wire only' connection
+ self.strict = True if self.is_loopback and not self.is_virt_nics else False
+
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ self.c.connect()
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ self.profiles = get_stl_profiles()
+
+
+ @classmethod
+ def tearDownClass(cls):
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def verify (self, expected, got):
+ if self.strict:
+ assert expected == got
+ else:
+ assert get_error_in_percentage(expected, got) < 0.05
+
+
+ def test_basic_connect_disconnect (self):
+ try:
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_basic_single_burst (self):
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXSingleBurst(total_pkts = 100,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(100, stats[self.tx_port]['opackets'])
+ self.verify(100, stats[self.rx_port]['ipackets'])
+
+ self.verify(100, stats[self.rx_port]['opackets'])
+ self.verify(100, stats[self.tx_port]['ipackets'])
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_multi_burst (self):
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXMultiBurst(pkts_per_burst = 10,
+ count = 20,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(200, stats[self.tx_port]['opackets'])
+ self.verify(200, stats[self.rx_port]['ipackets'])
+
+ self.verify(200, stats[self.rx_port]['opackets'])
+ self.verify(200, stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_cont (self):
+ pps = self.pps
+ duration = 0.1
+ golden = pps * duration
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXCont(pps = pps)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port], duration = duration)
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ # cont. with duration should be quite percise - 5% error is relaxed enough
+
+ assert get_error_in_percentage(stats[self.tx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.rx_port]['ipackets'], golden) < 0.05
+
+ assert get_error_in_percentage(stats[self.rx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.tx_port]['ipackets'], golden) < 0.05
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_stress_connect_disconnect (self):
+ try:
+ for i in range(0, 100):
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_stress_tx (self):
+ try:
+ s1 = STLStream(name = 'stress',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port, self.rx_port])
+ for i in range(0, 100):
+
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_paused(), 'port should be paused'
+ assert self.c.ports[self.rx_port].is_paused(), 'port should be paused'
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ assert not self.c.ports[self.tx_port].is_active(), 'port should be idle'
+ assert not self.c.ports[self.rx_port].is_active(), 'port should be idle'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_all_profiles (self):
+ # need promiscious for this one...
+ if self.is_virt_nics or not self.is_loopback:
+ self.skip('skipping profile tests for virtual NICs')
+ return
+
+ try:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True)
+
+ for profile in self.profiles:
+ print("now testing profile {0}...\n").format(profile)
+
+ p1 = STLProfile.load(profile, port_id = self.tx_port)
+ p2 = STLProfile.load(profile, port_id = self.rx_port)
+
+ if p1.has_flow_stats():
+ print("profile needs RX caps - skipping...")
+ continue
+
+ self.c.add_streams(p1, ports = self.tx_port)
+ self.c.add_streams(p2, ports = self.rx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ time.sleep(100 / 1000.0)
+
+ if p1.is_pauseable() and p2.is_pauseable():
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats, '{0} - no stats for TX port'.format(profile)
+ assert self.rx_port in stats, '{0} - no stats for RX port'.format(profile)
+
+ assert stats[self.tx_port]['opackets'] == stats[self.rx_port]['ipackets'], '{0} - number of TX packets differ from RX packets'.format(profile)
+
+ assert stats[self.rx_port]['opackets'] == stats[self.tx_port]['ipackets'], '{0} - number of TX packets differ from RX packets'.format(profile)
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ finally:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 90082c59..bb682b6c 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -7,6 +7,9 @@ class STLRX_Test(CStlGeneral_Test):
"""Tests for RX feature"""
def setUp(self):
+ per_driver_params = {"rte_vmxnet3_pmd": [1, 50], "rte_ixgbe_pmd": [30, 5000], "rte_i40e_pmd": [80, 5000],
+ "rte_igb_pmd": [80, 500], "rte_em_pmd": [1, 50], "rte_virtio_pmd": [1, 50]}
+
CStlGeneral_Test.setUp(self)
assert 'bi' in CTRexScenario.stl_ports_map
@@ -14,11 +17,13 @@ class STLRX_Test(CStlGeneral_Test):
self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
- cap = self.c.get_port_info(ports = self.rx_port)[0]['rx']['caps']
+ port_info = self.c.get_port_info(ports = self.rx_port)[0]
+ cap = port_info['rx']['caps']
if cap != 1:
self.skip('port {0} does not support RX'.format(self.rx_port))
-
+ self.rate_percent = per_driver_params[port_info['driver']][0]
+ self.total_pkts = per_driver_params[port_info['driver']][1]
self.c.reset(ports = [self.tx_port, self.rx_port])
self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
@@ -68,14 +73,14 @@ class STLRX_Test(CStlGeneral_Test):
# one simple stream on TX --> RX
def test_one_stream(self):
- total_pkts = 500000
+ total_pkts = self.total_pkts * 10
try:
s1 = STLStream(name = 'rx',
packet = self.pkt,
flow_stats = STLFlowStats(pg_id = 5),
mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = 80
+ percentage = self.rate_percent
))
# add both streams to ports
@@ -94,21 +99,26 @@ class STLRX_Test(CStlGeneral_Test):
# one simple stream on TX --> RX
def test_multiple_streams(self):
- total_pkts = 500000
+ num_streams = 10
+ total_pkts = self.total_pkts / num_streams
+ if total_pkts == 0:
+ total_pkts = 1
+ percent = self.rate_percent / num_streams
+ if percent == 0:
+ percent = 1
try:
streams = []
exp = []
# 10 identical streams
- for pg_id in range(1, 10):
+ for pg_id in range(1, num_streams):
streams.append(STLStream(name = 'rx {0}'.format(pg_id),
packet = self.pkt,
flow_stats = STLFlowStats(pg_id = pg_id),
- mode = STLTXSingleBurst(total_pkts = total_pkts * pg_id,
- pps = total_pkts * pg_id)))
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
- exp.append({'pg_id': pg_id, 'total_pkts': total_pkts * pg_id, 'pkt_len': self.pkt.get_pkt_len()})
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': self.pkt.get_pkt_len()})
# add both streams to ports
self.c.add_streams(streams, ports = [self.tx_port])
@@ -120,14 +130,14 @@ class STLRX_Test(CStlGeneral_Test):
assert False , '{0}'.format(e)
def test_1_stream_many_iterations (self):
- total_pkts = 50000
+ total_pkts = self.total_pkts
try:
s1 = STLStream(name = 'rx',
packet = self.pkt,
flow_stats = STLFlowStats(pg_id = 5),
mode = STLTXSingleBurst(total_pkts = total_pkts,
- percentage = 80
+ percentage = self.rate_percent
))
# add both streams to ports
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..6e2de230
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from stl_general_test import CStlGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+
+@attr('client_package')
+class CTRexClientPKG_Test(CStlGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+ self.unzip_client_package()
+
+ def run_client_package_stf_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stl/examples',
+ '$PYTHON stl_imix.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, _, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, stderr))
+
+ def test_client_python2(self):
+ self.run_client_package_stf_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stf_example(python_version = 'python3')
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index fb666382..2be3c051 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -36,8 +36,8 @@ import misc_methods
from rednose import RedNose
import termstyle
from trex import CTRexScenario
-from stf.trex_client import *
-from stf.trex_exceptions import *
+from trex_stf_lib.trex_client import *
+from trex_stf_lib.trex_exceptions import *
from trex_stl_lib.api import *
import trex
import socket
@@ -67,10 +67,10 @@ STATEFUL_STOP_COMMAND = './trex_daemon_server stop; sleep 1; ./trex_daemon_serve
STATEFUL_RUN_COMMAND = 'rm /var/log/trex/trex_daemon_server.log; ./trex_daemon_server start; sleep 2; ./trex_daemon_server show'
TREX_FILES = ('_t-rex-64', '_t-rex-64-o', '_t-rex-64-debug', '_t-rex-64-debug-o')
-def trex_remote_command(trex_data, command, background = False, from_scripts = True):
+def trex_remote_command(trex_data, command, background = False, from_scripts = True, timeout = 20):
if from_scripts:
- return misc_methods.run_remote_command(trex_data['trex_name'], ('cd %s; ' % CTRexScenario.scripts_path)+ command, background)
- return misc_methods.run_remote_command(trex_data['trex_name'], command, background)
+ return misc_methods.run_remote_command(trex_data['trex_name'], ('cd %s; ' % CTRexScenario.scripts_path)+ command, background, timeout)
+ return misc_methods.run_remote_command(trex_data['trex_name'], command, background, timeout)
# 1 = running, 0 - not running
def check_trex_running(trex_data):
@@ -141,6 +141,15 @@ class CTRexTestConfiguringPlugin(Plugin):
parser.add_option('--no-ssh', '--no_ssh', action="store_true", default = False,
dest="no_ssh",
help="Flag to disable any ssh to server machine.")
+ parser.add_option('--collect', action="store_true", default = False,
+ dest="collect",
+ help="Alias to --collect-only.")
+ parser.add_option('--warmup', action="store_true", default = False,
+ dest="warmup",
+ help="Warm up the system for stateful: run 30 seconds 9k imix test without check of results.")
+ parser.add_option('--test-client-package', '--test_client_package', action="store_true", default = False,
+ dest="test_client_package",
+ help="Includes tests of client package.")
def configure(self, options, conf):
self.collect_only = options.collect_only
@@ -183,7 +192,7 @@ class CTRexTestConfiguringPlugin(Plugin):
new_path = '/tmp/trex-scripts'
rsync_template = 'rm -rf /tmp/trex-scripts; mkdir -p %s; rsync -Lc %s /tmp; tar -mxzf /tmp/%s -C %s; mv %s/v*.*/* %s'
rsync_command = rsync_template % (new_path, self.pkg, os.path.basename(self.pkg), new_path, new_path, new_path)
- return_code, stdout, stderr = trex_remote_command(self.configuration.trex, rsync_command, from_scripts = False)
+ return_code, stdout, stderr = trex_remote_command(self.configuration.trex, rsync_command, from_scripts = False, timeout = 300)
if return_code:
print('Failed copying')
sys.exit(-1)
@@ -272,6 +281,12 @@ if __name__ == "__main__":
nose_argv = ['', '-s', '-v', '--exe', '--rednose', '--detailed-errors']
+ test_client_package = False
+ if '--test-client-package' in sys.argv:
+ test_client_package = True
+
+ if '--collect' in sys.argv:
+ sys.argv.append('--collect-only')
if '--collect-only' in sys.argv: # this is a user trying simply to view the available tests. no need xunit.
CTRexScenario.is_test_list = True
xml_arg = ''
@@ -324,23 +339,31 @@ if __name__ == "__main__":
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin])
if len(CTRexScenario.test_types['stateful_tests']):
- additional_args = ['--stf'] + CTRexScenario.test_types['stateful_tests']
+ additional_args = ['--stf']
+ if '--warmup' in sys.argv:
+ additional_args.append('stateful_tests/trex_imix_test.py:CTRexIMIX_Test.test_warm_up')
+ additional_args += CTRexScenario.test_types['stateful_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
if len(CTRexScenario.test_types['stateless_tests']):
additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
if xml_arg:
additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
result = nose.run(argv = nose_argv + additional_args, addplugins = [red_nose, config_plugin]) and result
- except Exception as e:
- result = False
- print(e)
+ #except Exception as e:
+ # result = False
+ # print(e)
finally:
save_setup_info()
- if (result == True and not CTRexScenario.is_test_list):
- print(termstyle.green("""
+ if not CTRexScenario.is_test_list:
+ if result == True:
+ print(termstyle.green("""
..::''''::..
.;'' ``;.
:: :: :: ::
@@ -358,8 +381,18 @@ if __name__ == "__main__":
/_/ /_/ |_/___/___(_)
"""))
- sys.exit(0)
- sys.exit(-1)
+ sys.exit(0)
+ else:
+ print(termstyle.red("""
+ /\_/\
+ ( o.o )
+ > ^ <
+
+This cat is sad, test failed.
+ """))
+ sys.exit(-1)
+
+
diff --git a/scripts/automation/trex_control_plane/client/outer_packages.py b/scripts/automation/trex_control_plane/client/outer_packages.py
deleted file mode 100755
index 2565be08..00000000
--- a/scripts/automation/trex_control_plane/client/outer_packages.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/router/bin/python
-
-import sys
-import os
-
-
-CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
-ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
-PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
-
-CLIENT_MODULES = ['enum34-1.0.4',
- 'jsonrpclib-pelix-0.2.5',
- 'termstyle',
- 'rpc_exceptions-0.1',
- 'yaml-3.11'
- ]
-
-
-def import_client_modules():
- sys.path.append(ROOT_PATH)
- import_module_list(CLIENT_MODULES)
-
-
-def import_module_list(modules_list):
- assert(isinstance(modules_list, list))
- for p in modules_list:
- full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
- fix_path = os.path.normcase(full_path) # (CURRENT_PATH+p)
- sys.path.insert(1, full_path)
-
-import_client_modules()
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py
deleted file mode 100755
index dfd3dc01..00000000
--- a/scripts/automation/trex_control_plane/client/trex_client.py
+++ /dev/null
@@ -1,1209 +0,0 @@
-#!/router/bin/python
-
-import sys
-import os
-
-if __package__:
- from . import outer_packages
-else:
- import outer_packages
-
-import jsonrpclib
-from jsonrpclib import ProtocolError, AppError
-from common.trex_status_e import TRexStatus
-from common.trex_exceptions import *
-from common.trex_exceptions import exception_handler
-from client_utils.general_utils import *
-from enum import Enum
-import socket
-import errno
-import time
-import re
-import copy
-import binascii
-from collections import deque, OrderedDict
-from json import JSONDecoder
-from distutils.util import strtobool
-
-
-
-class CTRexClient(object):
- """
- This class defines the client side of the RESTfull interaction with TRex
- """
-
- def __init__(self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
- """
- Instantiate a TRex client object, and connecting it to listening daemon-server
-
- :parameters:
- trex_host : str
- a string of the TRex ip address or hostname.
- max_history_size : int
- a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
-
- default value : **100**
- trex_daemon_port : int
- the port number on which the trex-daemon server can be reached
-
- default value: **8090**
- trex_zmq_port : int
- the port number on which trex's zmq module will interact with daemon server
-
- default value: **4500**
- verbose : bool
- sets a verbose output on supported class method.
-
- default value : **False**
-
- :raises:
- socket errors, in case server could not be reached.
-
- """
- try:
- self.trex_host = socket.gethostbyname(trex_host)
- except: # give it another try
- self.trex_host = socket.gethostbyname(trex_host)
- self.trex_daemon_port = trex_daemon_port
- self.trex_zmq_port = trex_zmq_port
- self.seq = None
- self.verbose = verbose
- self.result_obj = CTRexResult(max_history_size)
- self.decoder = JSONDecoder()
- self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port )
- self.__verbose_print("Connecting to TRex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
- self.history = jsonrpclib.history.History()
- self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
- self.check_server_connectivity()
- self.__verbose_print("Connection established successfully!")
- self._last_sample = time.time()
- self.__default_user = get_current_user()
-
-
- def add (self, x, y):
- try:
- return self.server.add(x,y)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def start_trex (self, f, d, block_to_success = True, timeout = 40, user = None, trex_development = False, **trex_cmd_options):
- """
- Request to start a TRex run on server.
-
- :parameters:
- f : str
- a path (on server) for the injected traffic data (.yaml file)
- d : int
- the desired duration of the test. must be at least 30 seconds long.
- block_to_success : bool
- determine if this method blocks until TRex changes state from 'Starting' to either 'Idle' or 'Running'
-
- default value : **True**
- timeout : int
- maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running'
-
- default value: **40**
- user : str
- the identity of the the run issuer.
- trex_cmd_options : key, val
- sets desired TRex options using key=val syntax, separated by comma.
- for keys with no value, state key=True
-
- :return:
- **True** on success
-
- :raises:
- + :exc:`ValueError`, in case 'd' parameter inserted with wrong value.
- + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server.
- + :exc:`trex_exceptions.TRexInUseError`, in case TRex is already taken.
- + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying start TRex.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- user = user or self.__default_user
- try:
- d = int(d)
- if d < 30 and not trex_development: # test duration should be at least 30 seconds, unless trex_development flag is specified.
- raise ValueError
- except ValueError:
- raise ValueError('d parameter must be integer, specifying how long TRex run, and must be larger than 30 secs.')
-
- trex_cmd_options.update( {'f' : f, 'd' : d} )
- if not trex_cmd_options.get('l'):
- self.result_obj.latency_checked = False
- if 'k' in trex_cmd_options:
- timeout += int(trex_cmd_options['k']) # during 'k' seconds TRex stays in 'Starting' state
-
- self.result_obj.clear_results()
- try:
- issue_time = time.time()
- retval = self.server.start_trex(trex_cmd_options, user, block_to_success, timeout)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- if retval!=0:
- self.seq = retval # update seq num only on successful submission
- return True
- else: # TRex is has been started by another user
- raise TRexInUseError('TRex is already being used by another user or process. Try again once TRex is back in IDLE state.')
-
- def stop_trex (self):
- """
- Request to stop a TRex run on server.
-
- The request is only valid if the stop initiator is the same client as the TRex run initiator.
-
- :parameters:
- None
-
- :return:
- + **True** on successful termination
- + **False** if request issued but TRex wasn't running.
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex ir running but started by another user.
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- return self.server.stop_trex(self.seq)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def force_kill (self, confirm = True):
- """
- Force killing of running TRex process (if exists) on the server.
-
- .. tip:: This method is a safety method and **overrides any running or reserved resources**, and as such isn't designed to be used on a regular basis.
- Always consider using :func:`trex_client.CTRexClient.stop_trex` instead.
-
- In the end of this method, TRex will return to IDLE state with no reservation.
-
- :parameters:
- confirm : bool
- Prompt a user confirmation before continue terminating TRex session
-
- :return:
- + **True** on successful termination
- + **False** otherwise.
-
- :raises:
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- if confirm:
- prompt = "WARNING: This will terminate active TRex session indiscriminately.\nAre you sure? "
- sys.stdout.write('%s [y/n]\n' % prompt)
- while True:
- try:
- if strtobool(user_input().lower()):
- break
- else:
- return
- except ValueError:
- sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
- try:
- return self.server.force_trex_kill()
- except AppError as err:
- # Silence any kind of application errors- by design
- return False
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def wait_until_kickoff_finish(self, timeout = 40):
- """
- Block the client application until TRex changes state from 'Starting' to either 'Idle' or 'Running'
-
- The request is only valid if the stop initiator is the same client as the TRex run initiator.
-
- :parameters:
- timeout : int
- maximum time (in seconds) to wait in blocking state until TRex changes state from 'Starting' to either 'Idle' or 'Running'
-
- :return:
- + **True** on successful termination
- + **False** if request issued but TRex wasn't running.
-
- :raises:
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- .. note:: Exceptions are throws only when start_trex did not block in the first place, i.e. `block_to_success` parameter was set to `False`
-
- """
-
- try:
- return self.server.wait_until_kickoff_finish(timeout)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def is_running (self, dump_out = False):
- """
- Poll for TRex running status.
-
- If TRex is running, a history item will be added into result_obj and processed.
-
- .. tip:: This method is especially useful for iterating until TRex run is finished.
-
- :parameters:
- dump_out : dict
- if passed, the pointer object is cleared and the latest dump stored in it.
-
- :return:
- + **True** if TRex is running.
- + **False** if TRex is not running.
-
- :raises:
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- res = self.get_running_info()
- if res == {}:
- return False
- if (dump_out != False) and (isinstance(dump_out, dict)): # save received dump to given 'dump_out' pointer
- dump_out.clear()
- dump_out.update(res)
- return True
- except TRexWarning as err:
- if err.code == -12: # TRex is either still at 'Starting' state or in Idle state, however NO error occured
- return False
- except TRexException:
- raise
- except ProtocolError as err:
- raise
- finally:
- self.prompt_verbose_data()
-
- def is_idle (self):
- """
- Poll for TRex running status, check if TRex is in Idle state.
-
- :parameters:
- None
-
- :return:
- + **True** if TRex is idle.
- + **False** if TRex is starting or running.
-
- :raises:
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- if self.get_running_status()['state'] == TRexStatus.Idle:
- return True
- return False
- except TRexException:
- raise
- except ProtocolError as err:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_trex_files_path (self):
- """
- Fetches the local path in which files are stored when pushed to TRex server from client.
-
- :parameters:
- None
-
- :return:
- string representation of the desired path
-
- .. note:: The returned path represents a path on the TRex server **local machine**
-
- :raises:
- ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- return (self.server.get_files_path() + '/')
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_running_status (self):
- """
- Fetches the current TRex status.
-
- If available, a verbose data will accompany the state itself.
-
- :parameters:
- None
-
- :return:
- dictionary with 'state' and 'verbose' keys.
-
- :raises:
- ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- res = self.server.get_running_status()
- res['state'] = TRexStatus(res['state'])
- return res
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_running_info (self):
- """
- Performs single poll of TRex running data and process it into the result object (named `result_obj`).
-
- .. tip:: This method will throw an exception if TRex isn't running. Always consider using :func:`trex_client.CTRexClient.is_running` which handles a single poll operation in safer manner.
-
- :parameters:
- None
-
- :return:
- dictionary containing the most updated data dump from TRex.
-
- :raises:
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- if not self.is_query_relevance():
- # if requested in timeframe smaller than the original sample rate, return the last known data without interacting with server
- return self.result_obj.get_latest_dump()
- else:
- try:
- latest_dump = self.decoder.decode( self.server.get_running_info() ) # latest dump is not a dict, but json string. decode it.
- self.result_obj.update_result_data(latest_dump)
- return latest_dump
- except TypeError as inst:
- raise TypeError('JSON-RPC data decoding failed. Check out incoming JSON stream.')
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def sample_until_condition (self, condition_func, time_between_samples = 5):
- """
- Automatically sets ongoing sampling of TRex data, with sampling rate described by time_between_samples.
-
- On each fetched dump, the condition_func is applied on the result objects, and if returns True, the sampling will stop.
-
- :parameters:
- condition_func : function
- function that operates on result_obj and checks if a condition has been met
-
- .. note:: `condition_finc` is applied on `CTRexResult` object. Make sure to design a relevant method.
- time_between_samples : int
- determines the time between each sample of the server
-
- default value : **5**
-
- :return:
- the first result object (see :class:`CTRexResult` for further details) of the TRex run on which the condition has been met.
-
- :raises:
- + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
- + :exc:`Exception`, in case the condition_func suffered from any kind of exception
-
- """
- # make sure TRex is running. raise exceptions here if any
- self.wait_until_kickoff_finish()
- try:
- while self.is_running():
- results = self.get_result_obj()
- if condition_func(results):
- # if condition satisfied, stop TRex and return result object
- self.stop_trex()
- return results
- time.sleep(time_between_samples)
- except TRexWarning:
- # means we're back to Idle state, and didn't meet our condition
- raise UserWarning("TRex results condition wasn't met during TRex run.")
- except Exception:
- # this could come from provided method 'condition_func'
- raise
-
- def sample_to_run_finish (self, time_between_samples = 5):
- """
- Automatically sets automatically sampling of TRex data with sampling rate described by time_between_samples until TRex run finished.
-
- :parameters:
- time_between_samples : int
- determines the time between each sample of the server
-
- default value : **5**
-
- :return:
- the latest result object (see :class:`CTRexResult` for further details) with sampled data.
-
- :raises:
- + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- self.wait_until_kickoff_finish()
-
- try:
- while self.is_running():
- time.sleep(time_between_samples)
- except TRexWarning:
- pass
- results = self.get_result_obj()
- return results
-
- def sample_x_seconds (self, sample_time, time_between_samples = 5):
- """
- Automatically sets ongoing sampling of TRex data for sample_time seconds, with sampling rate described by time_between_samples.
- Does not stop the TRex afterwards!
-
- .. tip:: Useful for changing the device (Router, ASA etc.) configuration after given time.
-
- :parameters:
- sample_time : int
- sample the TRex this number of seconds
-
- time_between_samples : int
- determines the time between each sample of the server
-
- default value : **5**
-
- :return:
- the first result object (see :class:`CTRexResult` for further details) of the TRex run after given sample_time.
-
- :raises:
- + :exc:`UserWarning`, in case the TRex run ended before sample_time duration
- + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed TRex run (unexpected termination).
- + :exc:`TypeError`, in case JSON stream decoding error.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- # make sure TRex is running. raise exceptions here if any
- self.wait_until_kickoff_finish()
- elapsed_time = 0
- while self.is_running():
- if elapsed_time >= sample_time:
- return self.get_result_obj()
- time.sleep(time_between_samples)
- elapsed_time += time_between_samples
- raise UserWarning("TRex has stopped at %s seconds (before expected %s seconds)\nTry increasing test duration or decreasing sample_time" % (elapsed_time, sample_time))
-
- def get_result_obj (self, copy_obj = True):
- """
- Returns the result object of the trex_client's instance.
-
- By default, returns a **copy** of the objects (so that changes to the original object are masked).
-
- :parameters:
- copy_obj : bool
- False means that a reference to the original (possibly changing) object are passed
-
- defaul value : **True**
-
- :return:
- the latest result object (see :class:`CTRexResult` for further details) with sampled data.
-
- """
- if copy_obj:
- return copy.deepcopy(self.result_obj)
- else:
- return self.result_obj
-
- def is_reserved (self):
- """
- Checks if TRex is currently reserved to any user or not.
-
- :parameters:
- None
-
- :return:
- + **True** if TRex is reserved.
- + **False** otherwise.
-
- :raises:
- ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- return self.server.is_reserved()
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_trex_daemon_log (self):
- """
- Get Trex daemon log.
-
- :return:
- String representation of TRex daemon log
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be read.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- return binascii.a2b_base64(self.server.get_trex_daemon_log())
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_trex_log (self):
- """
- Get TRex CLI output log
-
- :return:
- String representation of TRex log
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case file could not be fetched at server side.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- try:
- return binascii.a2b_base64(self.server.get_trex_log())
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def get_trex_version (self):
- """
- Get TRex version details.
-
- :return:
- Trex details (Version, User, Date, Uuid, Git SHA) as ordered dictionary
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex version could not be determined.
- + ProtocolError, in case of error in JSON-RPC protocol.
- + General Exception is case one of the keys is missing in response
- """
-
- try:
- version_dict = OrderedDict()
- result_lines = binascii.a2b_base64(self.server.get_trex_version()).split('\n')
- for line in result_lines:
- if not line:
- continue
- key, value = line.strip().split(':', 1)
- version_dict[key.strip()] = value.strip()
- for key in ('Version', 'User', 'Date', 'Uuid', 'Git SHA'):
- if key not in version_dict:
- raise Exception('get_trex_version: got server response without key: {0}'.format(key))
- return version_dict
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def reserve_trex (self, user = None):
- """
- Reserves the usage of TRex to a certain user.
-
- When TRex is reserved, it can't be reserved.
-
- :parameters:
- user : str
- a username of the desired owner of TRex
-
- default: current logged user
-
- :return:
- **True** if reservation made successfully
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to make the reservation.
- + :exc:`trex_exceptions.TRexInUseError`, in case TRex is currently running.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- username = user or self.__default_user
- try:
- return self.server.reserve_trex(user = username)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def cancel_reservation (self, user = None):
- """
- Cancels a current reservation of TRex to a certain user.
-
- When TRex is reserved, no other user can start new TRex runs.
-
-
- :parameters:
- user : str
- a username of the desired owner of TRex
-
- default: current logged user
-
- :return:
- + **True** if reservation canceled successfully,
- + **False** if there was no reservation at all.
-
- :raises:
- + :exc:`trex_exceptions.TRexRequestDenied`, in case TRex is reserved for another user than the one trying to cancel the reservation.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
-
- username = user or self.__default_user
- try:
- return self.server.cancel_reservation(user = username)
- except AppError as err:
- self._handle_AppError_exception(err.args[0])
- except ProtocolError:
- raise
- finally:
- self.prompt_verbose_data()
-
- def push_files (self, filepaths):
- """
- Pushes a file (or a list of files) to store locally on server.
-
- :parameters:
- filepaths : str or list
- a path to a file to be pushed to server.
- if a list of paths is passed, all of those will be pushed to server
-
- :return:
- + **True** if file(s) copied successfully.
- + **False** otherwise.
-
- :raises:
- + :exc:`IOError`, in case specified file wasn't found or could not be accessed.
- + ProtocolError, in case of error in JSON-RPC protocol.
-
- """
- paths_list = None
- if isinstance(filepaths, str):
- paths_list = [filepaths]
- elif isinstance(filepaths, list):
- paths_list = filepaths
- else:
- raise TypeError("filepaths argument must be of type str or list")
-
- for filepath in paths_list:
- try:
- if not os.path.exists(filepath):
- raise IOError(errno.ENOENT, "The requested `{fname}` file wasn't found. Operation aborted.".format(
- fname = filepath) )
- else:
- filename = os.path.basename(filepath)
- with open(filepath, 'rb') as f:
- file_content = f.read()
- self.server.push_file(filename, binascii.b2a_base64(file_content))
- finally:
- self.prompt_verbose_data()
- return True
-
- def is_query_relevance(self):
- """
- Checks if time between any two consecutive server queries (asking for live running data) passed.
-
- .. note:: The allowed minimum time between each two consecutive samples is 0.5 seconds.
-
- :parameters:
- None
-
- :return:
- + **True** if more than 0.5 seconds has been past from last server query.
- + **False** otherwise.
-
- """
- cur_time = time.time()
- if cur_time-self._last_sample < 0.5:
- return False
- else:
- self._last_sample = cur_time
- return True
-
- def call_server_mathod_safely (self, method_to_call):
- try:
- return method_to_call()
- except socket.error as e:
- if e.errno == errno.ECONNREFUSED:
- raise SocketError(errno.ECONNREFUSED, "Connection from TRex server was refused. Please make sure the server is up.")
-
- def check_server_connectivity (self):
- """
- Checks for server valid connectivity.
- """
- try:
- socket.gethostbyname(self.trex_host)
- return self.server.connectivity_check()
- except socket.gaierror as e:
- raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
- except socket.error as e:
- if e.errno == errno.ECONNREFUSED:
- raise socket.error(errno.ECONNREFUSED, "Connection from TRex server was refused. Please make sure the server is up.")
- finally:
- self.prompt_verbose_data()
-
- def prompt_verbose_data(self):
- """
- This method prompts any verbose data available, only if `verbose` option has been turned on.
- """
- if self.verbose:
- print ('\n')
- print ("(*) JSON-RPC request:", self.history.request)
- print ("(*) JSON-RPC response:", self.history.response)
-
- def __verbose_print(self, print_str):
- """
- This private method prints the `print_str` string only in case self.verbose flag is turned on.
-
- :parameters:
- print_str : str
- a string to be printed
-
- :returns:
- None
- """
- if self.verbose:
- print (print_str)
-
-
-
- def _handle_AppError_exception(self, err):
- """
- This private method triggres the TRex dedicated exception generation in case a general ProtocolError has been raised.
- """
- # handle known exceptions based on known error codes.
- # if error code is not known, raise ProtocolError
- raise exception_handler.gen_exception(err)
-
-
-class CTRexResult(object):
- """
- A class containing all results received from TRex.
-
- Ontop to containing the results, this class offers easier data access and extended results processing options
- """
- def __init__(self, max_history_size):
- """
- Instatiate a TRex result object
-
- :parameters:
- max_history_size : int
- a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
-
- """
- self._history = deque(maxlen = max_history_size)
- self.clear_results()
- self.latency_checked = True
-
- def __repr__(self):
- return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) +
- "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
- "Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
- "Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
- "Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
- "Average latency: {arg}\n".format( arg = self.get_avg_latency() ) +
- "Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) +
- "Total drops: {arg}\n".format( arg = self.get_total_drops() ) +
- "Drop rate: {arg}\n".format( arg = self.get_drop_rate() ) +
- "History size so far: {arg}\n".format( arg = len(self._history) ) )
-
- def get_expected_tx_rate (self):
- """
- Fetches the expected TX rate in various units representation
-
- :parameters:
- None
-
- :return:
- dictionary containing the expected TX rate, where the key is the measurement units, and the value is the measurement value.
-
- """
- return self._expected_tx_rate
-
- def get_current_tx_rate (self):
- """
- Fetches the current TX rate in various units representation
-
- :parameters:
- None
-
- :return:
- dictionary containing the current TX rate, where the key is the measurement units, and the value is the measurement value.
-
- """
- return self._current_tx_rate
-
- def get_max_latency (self):
- """
- Fetches the maximum latency measured on each of the interfaces
-
- :parameters:
- None
-
- :return:
- dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
-
- """
- return self._max_latency
-
- def get_avg_latency (self):
- """
- Fetches the average latency measured on each of the interfaces from the start of TRex run
-
- :parameters:
- None
-
- :return:
- dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
-
- The `all` key represents the average of all interfaces' average
-
- """
- return self._avg_latency
-
- def get_avg_window_latency (self):
- """
- Fetches the average latency measured on each of the interfaces from all the sampled currently stored in window.
-
- :parameters:
- None
-
- :return:
- dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
-
- The `all` key represents the average of all interfaces' average
-
- """
- return self._avg_window_latency
-
- def get_total_drops (self):
- """
- Fetches the total number of drops identified from the moment TRex run began.
-
- :parameters:
- None
-
- :return:
- total drops count (as int)
-
- """
- return self._total_drops
-
- def get_drop_rate (self):
- """
- Fetches the most recent drop rate in pkts/sec units.
-
- :parameters:
- None
-
- :return:
- current drop rate (as float)
-
- """
- return self._drop_rate
-
- def is_valid_hist (self):
- """
- Checks if result obejct contains valid data.
-
- :parameters:
- None
-
- :return:
- + **True** if history is valid.
- + **False** otherwise.
-
- """
- return self.valid
-
- def set_valid_hist (self, valid_stat = True):
- """
- Sets result obejct validity status.
-
- :parameters:
- valid_stat : bool
- defines the validity status
-
- dafault value : **True**
-
- :return:
- None
-
- """
- self.valid = valid_stat
-
- def is_done_warmup (self):
- """
- Checks if TRex latest results TX-rate indicates that TRex has reached its expected TX-rate.
-
- :parameters:
- None
-
- :return:
- + **True** if expected TX-rate has been reached.
- + **False** otherwise.
-
- """
- return self._done_warmup
-
- def get_last_value (self, tree_path_to_key, regex = None):
- """
- A dynamic getter from the latest sampled data item stored in the result object.
-
- :parameters:
- tree_path_to_key : str
- defines a path to desired data.
-
- .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
- | Use '[i]' to access the i'th indexed object of an array.
-
- tree_path_to_key : regex
- apply a regex to filter results out from a multiple results set.
-
- Filter applies only on keys of dictionary type.
-
- dafault value : **None**
-
- :return:
- + a list of values relevant to the specified path
- + None if no results were fetched or the history isn't valid.
-
- """
- if not self.is_valid_hist():
- return None
- else:
- return CTRexResult.__get_value_by_path(self._history[len(self._history)-1], tree_path_to_key, regex)
-
- def get_value_list (self, tree_path_to_key, regex = None, filter_none = True):
- """
- A dynamic getter from all sampled data items stored in the result object.
-
- :parameters:
- tree_path_to_key : str
- defines a path to desired data.
-
- .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
- | Use '[i]' to access the i'th indexed object of an array.
-
- tree_path_to_key : regex
- apply a regex to filter results out from a multiple results set.
-
- Filter applies only on keys of dictionary type.
-
- dafault value : **None**
-
- filter_none : bool
- specify if None results should be filtered out or not.
-
- dafault value : **True**
-
- :return:
- + a list of values relevant to the specified path. Each item on the list refers to a single server sample.
- + None if no results were fetched or the history isn't valid.
- """
-
- if not self.is_valid_hist():
- return None
- else:
- raw_list = list( map(lambda x: CTRexResult.__get_value_by_path(x, tree_path_to_key, regex), self._history) )
- if filter_none:
- return list (filter(lambda x: x!=None, raw_list) )
- else:
- return raw_list
-
- def get_latest_dump(self):
- """
- A getter to the latest sampled data item stored in the result object.
-
- :parameters:
- None
-
- :return:
- + a dictionary of the latest data item
- + an empty dictionary if history is empty.
-
- """
- history_size = len(self._history)
- if history_size != 0:
- return self._history[len(self._history) - 1]
- else:
- return {}
-
- def update_result_data (self, latest_dump):
- """
- Integrates a `latest_dump` dictionary into the CTRexResult object.
-
- :parameters:
- latest_dump : dict
- a dictionary with the items desired to be integrated into the object history and stats
-
- :return:
- None
-
- """
- # add latest dump to history
- if latest_dump != {}:
- self._history.append(latest_dump)
- if not self.valid:
- self.valid = True
-
- # parse important fields and calculate averages and others
- if self._expected_tx_rate is None:
- # get the expected data only once since it doesn't change
- self._expected_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_expected_\w+")
-
- self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+")
- if not self._done_warmup and self._expected_tx_rate is not None:
- # check for up to 2% change between expected and actual
- if (self._current_tx_rate['m_tx_bps']/self._expected_tx_rate['m_tx_expected_bps'] > 0.98):
- self._done_warmup = True
-
- # handle latency data
- if self.latency_checked:
- latency_pre = "trex-latency"
- self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "max-")#None # TBC
- # support old typo
- if self._max_latency is None:
- latency_pre = "trex-latecny"
- self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "max-")
-
- self._avg_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "avg-")#None # TBC
- self._avg_latency = CTRexResult.__avg_all_and_rename_keys(self._avg_latency)
-
- avg_win_latency_list = self.get_value_list("{latency}.data".format(latency = latency_pre), "avg-")
- self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
-
- tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
- rx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_rx_pkts")
- if tx_pkts is not None and rx_pkts is not None:
- self._total_drops = tx_pkts - rx_pkts
- self._drop_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_rx_drop_bps")
-
- def clear_results (self):
- """
- Clears all results and sets the history's validity to `False`
-
- :parameters:
- None
-
- :return:
- None
-
- """
- self.valid = False
- self._done_warmup = False
- self._expected_tx_rate = None
- self._current_tx_rate = None
- self._max_latency = None
- self._avg_latency = None
- self._avg_window_latency = None
- self._total_drops = None
- self._drop_rate = None
- self._history.clear()
-
- @staticmethod
- def __get_value_by_path (dct, tree_path, regex = None):
- try:
- for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
- dct = dct[p or int(i)]
- if regex is not None and isinstance(dct, dict):
- res = {}
- for key,val in dct.items():
- match = re.match(regex, key)
- if match:
- res[key]=val
- return res
- else:
- return dct
- except (KeyError, TypeError):
- return None
-
- @staticmethod
- def __calc_latency_win_stats (latency_win_list):
- res = {'all' : None }
- port_dict = {'all' : []}
- list( map(lambda x: CTRexResult.__update_port_dict(x, port_dict), latency_win_list) )
-
- # finally, calculate everages for each list
- res['all'] = float("%.3f" % (sum(port_dict['all'])/float(len(port_dict['all']))) )
- port_dict.pop('all')
- for port, avg_list in port_dict.items():
- res[port] = float("%.3f" % (sum(avg_list)/float(len(avg_list))) )
-
- return res
-
- @staticmethod
- def __update_port_dict (src_avg_dict, dest_port_dict):
- all_list = src_avg_dict.values()
- dest_port_dict['all'].extend(all_list)
- for key, val in src_avg_dict.items():
- reg_res = re.match("avg-(\d+)", key)
- if reg_res:
- tmp_key = "port"+reg_res.group(1)
- if tmp_key in dest_port_dict:
- dest_port_dict[tmp_key].append(val)
- else:
- dest_port_dict[tmp_key] = [val]
-
- @staticmethod
- def __avg_all_and_rename_keys (src_dict):
- res = {}
- all_list = src_dict.values()
- res['all'] = float("%.3f" % (sum(all_list)/float(len(all_list))) )
- for key, val in src_dict.items():
- reg_res = re.match("avg-(\d+)", key)
- if reg_res:
- tmp_key = "port"+reg_res.group(1)
- res[tmp_key] = val # don't touch original fields values
- return res
-
-
-
-if __name__ == "__main__":
- pass
-
diff --git a/scripts/automation/trex_control_plane/doc/_templates/layout.html b/scripts/automation/trex_control_plane/doc/_templates/layout.html
new file mode 100644
index 00000000..8c1c709c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/_templates/layout.html
@@ -0,0 +1,17 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+ ga('create', 'UA-75220362-1', 'auto');
+ ga('send', 'pageview');
+
+</script>
+{% endblock %}
+
+
diff --git a/scripts/automation/trex_control_plane/doc/conf.py b/scripts/automation/trex_control_plane/doc/conf.py
index a2641ffc..ec133a1c 100755
--- a/scripts/automation/trex_control_plane/doc/conf.py
+++ b/scripts/automation/trex_control_plane/doc/conf.py
@@ -20,7 +20,7 @@ import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../stf'))
+sys.path.insert(0, os.path.abspath('../stf/trex_stf_lib'))
sys.path.insert(0, os.path.abspath('../client_utils'))
sys.path.insert(0, os.path.abspath('../examples'))
sys.path.insert(0, os.path.abspath('../common'))
diff --git a/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html b/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html
new file mode 100644
index 00000000..8c1c709c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc_stl/_templates/layout.html
@@ -0,0 +1,17 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+ ga('create', 'UA-75220362-1', 'auto');
+ ga('send', 'pageview');
+
+</script>
+{% endblock %}
+
+
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst
index 8736e98d..4ae2b9fd 100644..100755
--- a/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/api/client_code.rst
@@ -2,16 +2,26 @@
Client Module
==================
-TRex Client is an object to access TRex server. It is per user. Each user can own number of interfaces.
-Multi user can interact with one TRex server each user should own a different set of interfaces.
+The TRex Client provides access to the TRex server.
+
+**Client and interfaces**
+
+Multiple users can interact with one TRex server. Each user "owns" a different set of interfaces.
The protocol is JSON-RPC2 over ZMQ transport.
-The API has two type of API
+In addition to the Python API, a console-based API interface is also available.
+
+Python-like example::
+
+ c.start(ports = [0, 1], mult = "5mpps", duration = 10)
+
+Console-like example::
+
+ c.start_line (" -f stl/udp_1pkt_simple.py -m 10mpps --port 0 1 ")
-1. Normal API
-2. xx_line: this api get a line like the Console and parse it and call the low level api
-Example1::
+
+Example 1 - Typical Python API::
c = STLClient(username = "itay",server = "10.0.0.10", verbose_level = LoggerApi.VERBOSE_HIGH)
@@ -49,7 +59,8 @@ STLClient snippet
-----------------
-Example1::
+.. code-block:: python
+ :caption: Example 1: Minimal example of client interacting with the TRex server
c = STLClient()
@@ -75,8 +86,9 @@ Example1::
c.disconnect()
+.. code-block:: python
+ :caption: Example 2: Client can execute other functions while the TRex server is generating traffic
-Example2: wait while doing somthing::
c = STLClient()
try:
@@ -99,7 +111,10 @@ Example2: wait while doing somthing::
c.disconnect()
-Example3: Console like::
+
+.. code-block:: python
+ :caption: Example 3: Console-like API interface
+
def simple ():
@@ -148,7 +163,8 @@ Example3: Console like::
finally:
c.disconnect()
-Example4: Load profile from a file::
+
+Example 4: Load profile from a file::
def simple ():
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst b/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst
index d134b0b9..541e195f 100644..100755
--- a/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/api/field_engine.rst
@@ -2,19 +2,19 @@
Field Engine modules
=======================
-The Field Engine (FE) has limited number of instructions/operation for supporting most use cases.
+The Field Engine (FE) has limited number of instructions/operations to support most use cases.
There is a plan to add LuaJIT to be more flexible at the cost of performance.
-The FE can allocate stream variables in a Stream context, write a stream variable to a packet offset, change packet size, etc.
+The FE can allocate stream variables in a stream context, write a stream variable to a packet offset, change packet size, and so on.
-*Some examples for what can be done:*
+*Examples of Field Engine uses:*
* Change ipv4.tos 1-10
-* Change packet size to be random in the range 64-9K
-* Create range of flows (change src_ip, dest_ip, src_port, dest_port)
+* Change packet size to a random value in the range 64 to 9K
+* Create a range of flows (change src_ip, dest_ip, src_port, dest_port)
* Update IPv4 checksum
-Snippet will create SYN Attack::
+The following snippet creates a SYN attack::
# create attack from random src_ip from 16.0.0.0-18.0.0.254 and random src_port 1025-65000
# attack 48.0.0.1 server
@@ -60,7 +60,7 @@ Snippet will create SYN Attack::
STLScVmRaw class
----------------
-Aggregate a raw instructions objects
+Aggregate raw instructions objects
.. autoclass:: trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw
:members:
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/index.rst b/scripts/automation/trex_control_plane/doc_stl/api/index.rst
index a3c8ad5a..a3c8ad5a 100644..100755
--- a/scripts/automation/trex_control_plane/doc_stl/api/index.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/api/index.rst
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst b/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst
index b61f05e4..9afa9df2 100644..100755
--- a/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/api/profile_code.rst
@@ -2,13 +2,13 @@
Traffic profile modules
=======================
-TRex STLProfile profile include a list of STLStream. The profile is a ``program`` of streams with a relation betwean the streams.
-Each stream can trigger another stream. Stream can be given a name for a full examples see here Manual_.
+The TRex STLProfile traffic profile includes a number of streams. The profile is a ``program`` of related streams.
+Each stream can trigger another stream. Each stream can be named. For a full set of examples, see Manual_.
-.. _Manual: ../draft_trex_stateless1.html
+.. _Manual: ../trex_stateless.html
-for example::
+Example::
def create_stream (self):
diff --git a/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst b/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst
index 5544df63..2c5790bf 100644..100755
--- a/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/api/scapy_builder.rst
@@ -2,15 +2,15 @@
Packet builder modules
=======================
-The packet builder module objective is to build a template packet for a stream and to create a Field engine program to change fields in the packet.
+The packet builder module is used for building a template packet for a stream, and creating a Field Engine program to change fields in the packet.
-**Some examples for what can be done:**
+**Examples:**
-* Build a IP/UDP/DNS packet and create a range of src_ip = 10.0.0.1-10.0.0.255
-* Build a IP/UDP packets in IMIX sizes
+* Build a IP/UDP/DNS packet with a src_ip range of 10.0.0.1 to 10.0.0.255
+* Build IP/UDP packets in IMIX sizes
-for example this snippet will create SYN Attack::
+For example, this snippet creates a SYN attack::
# create attack from random src_ip from 16.0.0.0-18.0.0.254 and random src_port 1025-65000
# attack 48.0.0.1 server
diff --git a/scripts/automation/trex_control_plane/doc_stl/conf.py b/scripts/automation/trex_control_plane/doc_stl/conf.py
index fe6b27bc..45738b6e 100644
--- a/scripts/automation/trex_control_plane/doc_stl/conf.py
+++ b/scripts/automation/trex_control_plane/doc_stl/conf.py
@@ -69,7 +69,7 @@ author = u'TRex team, Cisco Systems Inc.'
# The short X.Y version.
version = '1.94'
# The full version, including alpha/beta/rc tags.
-release = '1.7.1'
+release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/scripts/automation/trex_control_plane/doc_stl/index.rst b/scripts/automation/trex_control_plane/doc_stl/index.rst
index 97a1037a..aa2abd75 100644
--- a/scripts/automation/trex_control_plane/doc_stl/index.rst
+++ b/scripts/automation/trex_control_plane/doc_stl/index.rst
@@ -20,19 +20,23 @@ How to Install
| Put it at any place you like, preferably same place as your scripts.
| (If it's not at same place as your scripts, you will need to ensure trex_client directory is in sys.path)
-Un-pack it using command: tar -xzf trex_client.tar.gz
+Un-pack it using command::
-How to pyATS/v2.0
+ tar -xzf trex_client.tar.gz
+
+
+How to pyATS
==================
.. sectionauthor:: David Shen
pyATS Compatibility
-Trex only supports python2 for now, so it only works for **Python2** pyats.
+TRex supports both Python2 and Python3 pyATS.
-* Install python2 pyats
+* Install python2/python3 pyats
/auto/pyats/bin/pyats-install --python2
+ /auto/pyats/bin/pyats-install --python3
* setenv TREX_PATH to the trex stateless lib path
setenv TREX_PATH <your path>/automation/trex_control_plane/stl
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
index 59c382ea..74ce1750 100755
--- a/scripts/automation/trex_control_plane/server/trex_launch_thread.py
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -51,8 +51,8 @@ class AsynchronousTRexSession(threading.Thread):
self.trexObj.set_verbose_status("TRex run failed due to wrong input parameters, or due to readability issues.\n\nTRex command: {cmd}\n\nRun output:\n{output}".format(
cmd = self.cmd, output = self.load_trex_output(self.export_path)))
self.trexObj.errcode = -11
- elif (self.session.returncode is not None and self.session.returncode < 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
- if (self.session.returncode is not None and self.session.returncode < 0):
+ elif (self.session.returncode is not None and self.session.returncode != 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
+ if (self.session.returncode is not None and self.session.returncode != 0):
logger.debug("Failed TRex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
logger.debug("Failed TRex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
index bf788d35..3f8bc374 100755
--- a/scripts/automation/trex_control_plane/server/trex_server.py
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -39,7 +39,7 @@ class CTRexServer(object):
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
- def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500):
+ def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500, trex_nice=-19):
"""
Parameters
----------
@@ -68,6 +68,12 @@ class CTRexServer(object):
self.start_lock = threading.Lock()
self.__reservation = None
self.zmq_monitor = ZmqMonitorSession(self.trex, self.trex_zmq_port) # intiate single ZMQ monitor thread for server usage
+ self.trex_nice = int(trex_nice)
+ if self.trex_nice < -20 or self.trex_nice > 19:
+ err = "Parameter 'nice' should be integer in range [-20, 19]"
+ print(err)
+ logger.error(err)
+ raise Exception(err)
def add(self, x, y):
print "server function add ",x,y
@@ -366,7 +372,8 @@ class CTRexServer(object):
else:
trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
- cmd = "{run_command} -f {gen_file} -d {duration} --iom {io} {cmd_options} --no-key > {export}".format( # -- iom 0 disables the periodic log to the screen (not needed)
+ cmd = "{nice}{run_command} -f {gen_file} -d {duration} --iom {io} {cmd_options} --no-key > {export}".format( # -- iom 0 disables the periodic log to the screen (not needed)
+ nice = '' if self.trex_nice == 0 else 'nice -n %s ' % self.trex_nice,
run_command = self.TREX_START_CMD,
gen_file = f,
duration = d,
@@ -508,6 +515,8 @@ trex_daemon_server [options]
action="store", help="Specify a hostname to be registered as the TRex server.\n"
"Default is to bind all IPs using '0.0.0.0'.",
metavar="HOST", default = '0.0.0.0')
+ parser.add_argument('-n', '--nice', dest='nice', action="store", default = -19, type = int,
+ help="Determine the priority TRex process [-20, 19] (lower = higher priority)\nDefault is -19.")
return parser
trex_parser = generate_trex_parser()
@@ -517,7 +526,7 @@ def do_main_program ():
args = trex_parser.parse_args()
server = CTRexServer(trex_path = args.trex_path, trex_files_path = args.files_path,
trex_host = args.trex_host, trex_daemon_port = args.daemon_port,
- trex_zmq_port = args.zmq_port)
+ trex_zmq_port = args.zmq_port, trex_nice = args.nice)
server.start()
diff --git a/scripts/automation/trex_control_plane/stf/examples/stf_example.py b/scripts/automation/trex_control_plane/stf/examples/stf_example.py
new file mode 100755
index 00000000..f57435bf
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/examples/stf_example.py
@@ -0,0 +1,54 @@
+import argparse
+import stf_path
+from trex_stf_lib.trex_client import CTRexClient
+from pprint import pprint
+
+# sample TRex stateful run
+# assuming server daemon is running.
+
+def minimal_stateful_test(server):
+ print('Connecting to %s' % server)
+ trex_client = CTRexClient(server)
+
+ print('Connected, start TRex')
+ trex_client.start_trex(
+ c = 1,
+ m = 700,
+ f = 'cap2/http_simple.yaml',
+ d = 30,
+ l = 1000,
+ )
+
+ print('Sample until end')
+ result = trex_client.sample_to_run_finish()
+
+ print('Test results:')
+ print(result)
+
+ print('TX by ports:')
+ tx_ptks_dict = result.get_last_value('trex-global.data', 'opackets-*')
+ print(' | '.join(['%s: %s' % (k.split('-')[-1], tx_ptks_dict[k]) for k in sorted(tx_ptks_dict.keys())]))
+
+ print('RX by ports:')
+ rx_ptks_dict = result.get_last_value('trex-global.data', 'ipackets-*')
+ print(' | '.join(['%s: %s' % (k.split('-')[-1], rx_ptks_dict[k]) for k in sorted(rx_ptks_dict.keys())]))
+
+ print('CPU utilization:')
+ print(result.get_value_list('trex-global.data.m_cpu_util'))
+
+ #Dump of *latest* result sample, uncomment to see it all
+ #print('Latest result dump:')
+ #pprint(result.get_latest_dump())
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description="Example for TRex Stateful, assuming server daemon is running.")
+ parser.add_argument('-s', '--server',
+ dest='server',
+ help='Remote trex address',
+ default='127.0.0.1',
+ type = str)
+ args = parser.parse_args()
+
+ minimal_stateful_test(args.server)
+
diff --git a/scripts/automation/trex_control_plane/stf/examples/stf_path.py b/scripts/automation/trex_control_plane/stf/examples/stf_path.py
new file mode 100755
index 00000000..bb401148
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stf/examples/stf_path.py
@@ -0,0 +1,4 @@
+import sys
+
+# FIXME to the write path for trex_stf_lib
+sys.path.insert(0, "../")
diff --git a/scripts/automation/trex_control_plane/stf/CCustomLogger.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py
index ecf7d519..ecf7d519 100755
--- a/scripts/automation/trex_control_plane/stf/CCustomLogger.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/CCustomLogger.py
diff --git a/scripts/automation/trex_control_plane/stf/__init__.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py
index 5a1da046..5a1da046 100755
--- a/scripts/automation/trex_control_plane/stf/__init__.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/__init__.py
diff --git a/scripts/automation/trex_control_plane/stf/external_packages.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py
index 7353c397..7353c397 100755
--- a/scripts/automation/trex_control_plane/stf/external_packages.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/external_packages.py
diff --git a/scripts/automation/trex_control_plane/stf/general_utils.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py
index d2521f02..d2521f02 100755
--- a/scripts/automation/trex_control_plane/stf/general_utils.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/general_utils.py
diff --git a/scripts/automation/trex_control_plane/stf/outer_packages.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py
index 5e29f8d6..f8d50ce6 100755
--- a/scripts/automation/trex_control_plane/stf/outer_packages.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/outer_packages.py
@@ -5,8 +5,8 @@ import os
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
-PARENT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, 'external_libs'))
-SCRIPTS_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, 'external_libs'))
+PACKAGE_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, 'external_libs'))
+SCRIPTS_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs'))
CLIENT_MODULES = ['enum34-1.0.4',
'jsonrpclib-pelix-0.2.5',
@@ -22,9 +22,9 @@ def import_module_list(ext_libs_path):
raise Exception('Library %s is absent in path %s' % (p, ext_libs_path))
sys.path.insert(1, full_path)
-if os.path.exists(PARENT_PATH):
- import_module_list(PARENT_PATH)
+if os.path.exists(PACKAGE_PATH):
+ import_module_list(PACKAGE_PATH)
elif os.path.exists(SCRIPTS_PATH):
import_module_list(SCRIPTS_PATH)
else:
- raise Exception('Could not find external libs in path: %s' % [PARENT_PATH, SCRIPTS_PATH])
+ raise Exception('Could not find external libs in path: %s' % [PACKAGE_PATH, SCRIPTS_PATH])
diff --git a/scripts/automation/trex_control_plane/stf/text_opts.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py
index 78a0ab1f..78a0ab1f 100755
--- a/scripts/automation/trex_control_plane/stf/text_opts.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/text_opts.py
diff --git a/scripts/automation/trex_control_plane/stf/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
index 919253d1..074d9060 100755
--- a/scripts/automation/trex_control_plane/stf/trex_client.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
@@ -39,7 +39,7 @@ class CTRexClient(object):
This class defines the client side of the RESTfull interaction with TRex
"""
- def __init__(self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
+ def __init__(self, trex_host, max_history_size = 100, filtered_latency_amount = 0.001, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
"""
Instantiate a TRex client object, and connecting it to listening daemon-server
@@ -50,6 +50,12 @@ class CTRexClient(object):
a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
default value : **100**
+
+ filtered_latency_amount : float
+ Ignore high latency for this ammount of packets. (by default take value of 99.9% measurements)
+
+ default value : **0.001**
+
trex_daemon_port : int
the port number on which the trex-daemon server can be reached
@@ -75,7 +81,7 @@ class CTRexClient(object):
self.trex_zmq_port = trex_zmq_port
self.seq = None
self.verbose = verbose
- self.result_obj = CTRexResult(max_history_size)
+ self.result_obj = CTRexResult(max_history_size, filtered_latency_amount)
self.decoder = JSONDecoder()
self.trex_server_path = "http://{hostname}:{port}/".format( hostname = self.trex_host, port = trex_daemon_port )
self.__verbose_print("Connecting to TRex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
@@ -834,22 +840,25 @@ class CTRexResult(object):
Ontop to containing the results, this class offers easier data access and extended results processing options
"""
- def __init__(self, max_history_size):
+ def __init__(self, max_history_size, filtered_latency_amount = 0.001):
"""
Instatiate a TRex result object
:parameters:
max_history_size : int
- a number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
+ A number to set the maximum history size of a single TRex run. Each sampling adds a new item to history.
+ filtered_latency_amount : float
+ Ignore high latency for this ammount of packets. (by default take into account 99.9%)
"""
self._history = deque(maxlen = max_history_size)
self.clear_results()
self.latency_checked = True
+ self.filtered_latency_amount = filtered_latency_amount
def __repr__(self):
return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) +
- "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
+ "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
"Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
"Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
"Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
@@ -1107,22 +1116,16 @@ class CTRexResult(object):
self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+")
if not self._done_warmup and self._expected_tx_rate is not None:
# check for up to 2% change between expected and actual
- if (self._current_tx_rate['m_tx_bps']/self._expected_tx_rate['m_tx_expected_bps'] > 0.98):
+ if (self._current_tx_rate['m_tx_bps'] > 0.98 * self._expected_tx_rate['m_tx_expected_bps']):
self._done_warmup = True
-
+
# handle latency data
if self.latency_checked:
- latency_pre = "trex-latency"
- self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "max-")#None # TBC
- # support old typo
- if self._max_latency is None:
- latency_pre = "trex-latecny"
- self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "max-")
-
- self._avg_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "avg-")#None # TBC
- self._avg_latency = CTRexResult.__avg_all_and_rename_keys(self._avg_latency)
-
- avg_win_latency_list = self.get_value_list("{latency}.data".format(latency = latency_pre), "avg-")
+ latency_per_port = self.get_last_value("trex-latecny-v2.data", "port-")
+ self._max_latency = self.__get_filtered_max_latency(latency_per_port, self.filtered_latency_amount)
+ avg_latency = self.get_last_value("trex-latecny.data", "avg-")
+ self._avg_latency = CTRexResult.__avg_all_and_rename_keys(avg_latency)
+ avg_win_latency_list = self.get_value_list("trex-latecny.data", "avg-")
self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
@@ -1209,6 +1212,29 @@ class CTRexResult(object):
res[tmp_key] = val # don't touch original fields values
return res
+ @staticmethod
+ def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001):
+ result = {}
+ for port, data in src_dict.items():
+ if port.startswith('port-'):
+ max_port = 'max-%s' % port[5:]
+ res = data['hist']
+ if not len(res['histogram']):
+ result[max_port] = 0
+ continue
+ hist_last_keys = deque([res['histogram'][-1]['key']], maxlen = 2)
+ sum_high = 0.0
+
+ for i, elem in enumerate(reversed(res['histogram'])):
+ sum_high += elem['val']
+ hist_last_keys.append(elem['key'])
+ if sum_high / res['cnt'] >= filtered_latency_amount:
+ break
+ result[max_port] = sum(hist_last_keys) / len(hist_last_keys)
+ else:
+ return {}
+ return result
+
if __name__ == "__main__":
diff --git a/scripts/automation/trex_control_plane/stf/trex_daemon_server.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py
index 9784d42a..9784d42a 100755
--- a/scripts/automation/trex_control_plane/stf/trex_daemon_server.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py
diff --git a/scripts/automation/trex_control_plane/stf/trex_exceptions.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py
index 0de38411..0de38411 100755
--- a/scripts/automation/trex_control_plane/stf/trex_exceptions.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_exceptions.py
diff --git a/scripts/automation/trex_control_plane/stf/trex_status.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py
index f132720c..f132720c 100644
--- a/scripts/automation/trex_control_plane/stf/trex_status.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status.py
diff --git a/scripts/automation/trex_control_plane/stf/trex_status_e.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py
index 79a25acc..79a25acc 100755
--- a/scripts/automation/trex_control_plane/stf/trex_status_e.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_status_e.py
diff --git a/scripts/automation/trex_control_plane/stl/console/stl_path.py b/scripts/automation/trex_control_plane/stl/console/stl_path.py
new file mode 100644
index 00000000..f15c666e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/console/stl_path.py
@@ -0,0 +1,7 @@
+import sys, os
+
+# FIXME to the write path for trex_stl_lib
+sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, 'profiles')
+
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
index 8c71065c..da4c4486 100755
--- a/scripts/automation/trex_control_plane/stl/console/trex_console.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -30,14 +30,20 @@ import os
import sys
import tty, termios
+try:
+ import stl_path
+except:
+ from . import stl_path
from trex_stl_lib.api import *
from trex_stl_lib.utils.text_opts import *
from trex_stl_lib.utils.common import user_input, get_current_user
from trex_stl_lib.utils import parsing_opts
-
-from . import trex_tui
+try:
+ import trex_tui
+except:
+ from . import trex_tui
from functools import wraps
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
index 56fd3cfd..bc7990aa 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix.py
@@ -12,11 +12,12 @@ import argparse
# and attach it to both sides and inject
# at a certain rate for some time
# finally it checks that all packets arrived
-def imix_test (server):
+def imix_test (server, mult):
# create client
c = STLClient(server = server)
+
passed = True
@@ -27,6 +28,7 @@ def imix_test (server):
# take all the ports
c.reset()
+
# map ports - identify the routes
table = stl_map_ports(c)
@@ -37,7 +39,8 @@ def imix_test (server):
print("Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1))
# load IMIX profile
- profile = STLProfile.load_py('../../../../stl/imix.py')
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile = STLProfile.load_py(profile_file)
streams = profile.get_streams()
# add both streams to ports
@@ -47,9 +50,8 @@ def imix_test (server):
# clear the stats before injecting
c.clear_stats()
- # choose rate and start traffic for 10 seconds on 5 mpps
+ # choose rate and start traffic for 10 seconds
duration = 10
- mult = "30%"
print("Injecting {0} <--> {1} on total rate of '{2}' for {3} seconds".format(dir_0, dir_1, mult, duration))
c.start(ports = (dir_0 + dir_1), mult = mult, duration = duration, total = True)
@@ -96,10 +98,8 @@ def imix_test (server):
if passed:
print("\nTest has passed :-)\n")
- sys.exit(0)
else:
print("\nTest has failed :-(\n")
- sys.exit(-1)
parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
parser.add_argument('-s', '--server',
@@ -107,8 +107,13 @@ parser.add_argument('-s', '--server',
help='Remote trex address',
default='127.0.0.1',
type = str)
+parser.add_argument('-m', '--mult',
+ dest='mult',
+ help='Multiplier of traffic, see Stateless help for more info',
+ default='30%',
+ type = str)
args = parser.parse_args()
# run the tests
-imix_test(args.server)
+imix_test(args.server, args.mult)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
index 05a8777b..956b910a 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir.py
@@ -29,14 +29,15 @@ def imix_test (server):
# take all the ports
c.reset()
- dir_0 = [0]
- dir_1 = [1]
+ dir_0 = [0]
+ dir_1 = [1]
print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1)
# load IMIX profile
- profile1 = STLProfile.load_py('../../../../stl/imix.py', direction=0)
- profile2 = STLProfile.load_py('../../../../stl/imix.py', direction=1)
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py')
+ profile1 = STLProfile.load_py(profile_file, direction=0)
+ profile2 = STLProfile.load_py(profile_file, direction=1)
stream1 = profile1.get_streams()
stream2 = profile2.get_streams()
@@ -96,10 +97,8 @@ def imix_test (server):
if passed:
print "\nTest has passed :-)\n"
- sys.exit(0)
else:
print "\nTest has failed :-(\n"
- sys.exit(-1)
parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic")
parser.add_argument('-s', '--server',
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_path.py b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
index 8f400d23..f190aab1 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_path.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_path.py
@@ -1,4 +1,7 @@
-import sys
+import sys, os
-# FIXME to the write path for trex_stl_lib
+# FIXME to the right path for trex_stl_lib
sys.path.insert(0, "../")
+
+STL_PROFILES_PATH = os.path.join(os.pardir, os.pardir, os.pardir, os.pardir, 'stl')
+
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
index 3ae5f855..16d5238e 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_profile.py
@@ -18,8 +18,7 @@ def simple ():
# prepare our ports
c.reset(ports = my_ports)
-
- profile_file = "../../../../stl/udp_1pkt_simple.py"
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'hlt', 'udp_1pkt_simple.py')
try:
profile = STLProfile.load(profile_file)
diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
index 03909e65..1d4ef250 100644
--- a/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
+++ b/scripts/automation/trex_control_plane/stl/examples/stl_simple_console_like.py
@@ -29,9 +29,10 @@ def simple ():
print(c.get_port_info(my_ports))
c.ping()
+ profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'udp_1pkt_simple.py')
print("start")
- c.start_line (" -f ../../../../stl/udp_1pkt_simple.py -m 10mpps --port 0 1 ")
+ c.start_line (" -f %s -m 10mpps --port 0 1 " % profile_file)
time.sleep(2);
c.pause_line("--port 0 1");
time.sleep(2);
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index 25e35423..bddc4ad0 100644..100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -40,15 +40,15 @@ class LoggerApi(object):
# implemented by specific logger
def write(self, msg, newline = True):
- raise Exception("implement this")
+ raise Exception("Implement this")
# implemented by specific logger
def flush(self):
- raise Exception("implement this")
+ raise Exception("Implement this")
def set_verbose (self, level):
if not level in range(self.VERBOSE_QUIET, self.VERBOSE_HIGH + 1):
- raise ValueError("bad value provided for logger")
+ raise ValueError("Bad value provided for logger")
self.level = level
@@ -146,7 +146,7 @@ class AsyncEventHandler(object):
def on_async_dead (self):
if self.client.connected:
- msg = 'lost connection to server'
+ msg = 'Lost connection to server'
self.__add_event_log(msg, 'local', True)
self.client.connected = False
@@ -319,12 +319,12 @@ class AsyncEventHandler(object):
############################ #############################
class CCommLink(object):
- """describes the connectivity of the stateless client method"""
- def __init__(self, server="localhost", port=5050, virtual=False, prn_func = None):
+ """Describes the connectivity of the stateless client method"""
+ def __init__(self, server="localhost", port=5050, virtual=False, client = None):
self.virtual = virtual
self.server = server
self.port = port
- self.rpc_link = JsonRpcClient(self.server, self.port, prn_func)
+ self.rpc_link = JsonRpcClient(self.server, self.port, client)
@property
def is_connected(self):
@@ -347,25 +347,25 @@ class CCommLink(object):
if not self.virtual:
return self.rpc_link.disconnect()
- def transmit(self, method_name, params={}):
+ def transmit(self, method_name, params = None, api_class = 'core'):
if self.virtual:
self._prompt_virtual_tx_msg()
- _, msg = self.rpc_link.create_jsonrpc_v2(method_name, params)
+ _, msg = self.rpc_link.create_jsonrpc_v2(method_name, params, api_class)
print(msg)
return
else:
- return self.rpc_link.invoke_rpc_method(method_name, params)
+ return self.rpc_link.invoke_rpc_method(method_name, params, api_class)
def transmit_batch(self, batch_list):
if self.virtual:
self._prompt_virtual_tx_msg()
print([msg
- for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params)
+ for _, msg in [self.rpc_link.create_jsonrpc_v2(command.method, command.params, command.api_class)
for command in batch_list]])
else:
batch = self.rpc_link.create_batch()
for command in batch_list:
- batch.add(command.method, command.params)
+ batch.add(command.method, command.params, command.api_class)
# invoke the batch
return batch.invoke()
@@ -380,7 +380,7 @@ class CCommLink(object):
############################ #############################
class STLClient(object):
- """TRex Stateless client object- gives operations per TRex/user"""
+ """TRex Stateless client object - gives operations per TRex/user"""
def __init__(self,
username = common.get_current_user(),
@@ -391,7 +391,7 @@ class STLClient(object):
logger = None,
virtual = False):
"""
- Set the connection setting
+ Configure the connection settings
:parameters:
username : string
@@ -449,7 +449,7 @@ class STLClient(object):
self.comm_link = CCommLink(server,
sync_port,
virtual,
- self.logger)
+ self)
# async event handler manager
self.event_handler = AsyncEventHandler(self)
@@ -481,7 +481,11 @@ class STLClient(object):
self.flow_stats)
-
+ # API classes
+ self.api_vers = [ {'type': 'core', 'major': 1, 'minor':0 }
+ ]
+ self.api_h = {'core': None}
+
############# private functions - used by the class itself ###########
# some preprocessing for port argument
@@ -496,11 +500,11 @@ class STLClient(object):
port_id_list = [port_id_list]
if not isinstance(port_id_list, list):
- raise ValueError("bad port id list: {0}".format(port_id_list))
+ raise ValueError("Bad port id list: {0}".format(port_id_list))
for port_id in port_id_list:
if not isinstance(port_id, int) or (port_id < 0) or (port_id > self.get_port_count()):
- raise ValueError("bad port id {0}".format(port_id))
+ raise ValueError("Bad port id {0}".format(port_id))
return port_id_list
@@ -668,6 +672,7 @@ class STLClient(object):
return rc
+
# connect to server
def __connect(self):
@@ -686,12 +691,22 @@ class STLClient(object):
if not rc:
return rc
+
+ # API sync
+ rc = self._transmit("api_sync", params = {'api_vers': self.api_vers}, api_class = None)
+ if not rc:
+ return rc
+
+ # decode
+ for api in rc.data()['api_vers']:
+ self.api_h[ api['type'] ] = api['api_h']
+
+
# version
rc = self._transmit("get_version")
if not rc:
return rc
-
self.server_version = rc.data()
self.global_stats.server_version = rc.data()
@@ -727,7 +742,7 @@ class STLClient(object):
# connect async channel
- self.logger.pre_cmd("connecting to publisher server on {0}:{1}".format(self.connection_info['server'], self.connection_info['async_port']))
+ self.logger.pre_cmd("Connecting to publisher server on {0}:{1}".format(self.connection_info['server'], self.connection_info['async_port']))
rc = self.async_client.connect()
self.logger.post_cmd(rc)
@@ -765,7 +780,7 @@ class STLClient(object):
if clear_flow_stats:
self.flow_stats.clear_stats()
- self.logger.log_cmd("clearing stats on port(s) {0}:".format(port_id_list))
+ self.logger.log_cmd("Clearing stats on port(s) {0}:".format(port_id_list))
return RC
@@ -817,8 +832,8 @@ class STLClient(object):
# transmit request on the RPC link
- def _transmit(self, method_name, params={}):
- return self.comm_link.transmit(method_name, params)
+ def _transmit(self, method_name, params = None, api_class = 'core'):
+ return self.comm_link.transmit(method_name, params, api_class)
# transmit batch request on the RPC link
def _transmit_batch(self, batch_list):
@@ -852,8 +867,26 @@ class STLClient(object):
return RC_OK()
-
-
+ # remove all RX filters in a safe manner
+ def _remove_rx_filters (self, ports, rx_delay_ms):
+
+ # get the enabled RX ports
+ rx_ports = [port_id for port_id in ports if self.ports[port_id].has_rx_enabled()]
+
+ if not rx_ports:
+ return RC_OK()
+
+ # block while any RX configured port has not yet have it's delay expired
+ while any([not self.ports[port_id].has_rx_delay_expired(rx_delay_ms) for port_id in rx_ports]):
+ time.sleep(0.01)
+
+ # remove RX filters
+ rc = RC()
+ for port_id in rx_ports:
+ rc.add(self.ports[port_id].remove_rx_filters())
+
+ return rc
+
#################################
# ------ private methods ------ #
@@ -914,13 +947,13 @@ class STLClient(object):
# return verbose level of the logger
def get_verbose (self):
"""
- get the verbose mode
+ Get the verbose mode
:parameters:
none
:return:
- get the verbose mode as Bool
+ Get the verbose mode as Bool
:raises:
None
@@ -934,10 +967,10 @@ class STLClient(object):
is_all_ports_acquired
:parameters:
- none
+ None
:return:
- return True if all ports are acquired
+ Returns True if all ports are acquired
:raises:
None
@@ -951,7 +984,7 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
is_connected
@@ -969,10 +1002,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -987,10 +1020,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -1004,10 +1037,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -1021,10 +1054,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -1038,10 +1071,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -1065,10 +1098,10 @@ class STLClient(object):
"""
:parameters:
- none
+ None
:return:
- connection dict
+ Connection dict
:raises:
None
@@ -1089,6 +1122,7 @@ class STLClient(object):
for port_id, port_obj in self.ports.items()
if port_obj.is_active()]
+
# get paused ports
def get_paused_ports (self):
return [port_id
@@ -1189,9 +1223,9 @@ class STLClient(object):
:parameters:
stop_traffic : bool
- tries to stop traffic before disconnecting
+ Attempts to stop traffic before disconnecting.
release_ports : bool
- tries to release all the acquired ports
+ Attempts to release all the acquired ports.
"""
@@ -1217,9 +1251,9 @@ class STLClient(object):
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
force : bool
- force acquire the ports
+ Force acquire the ports.
:raises:
+ :exc:`STLError`
@@ -1252,7 +1286,7 @@ class STLClient(object):
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
:raises:
+ :exc:`STLError`
@@ -1285,7 +1319,7 @@ class STLClient(object):
self.logger.pre_cmd( "Pinging the server on '{0}' port '{1}': ".format(self.connection_info['server'],
self.connection_info['sync_port']))
- rc = self._transmit("ping")
+ rc = self._transmit("ping", api_class = None)
self.logger.post_cmd(rc)
@@ -1295,7 +1329,7 @@ class STLClient(object):
@__api_check(True)
def get_active_pgids(self):
"""
- Get active group ids
+ Get active group IDs
:parameters:
None
@@ -1319,11 +1353,11 @@ class STLClient(object):
@__api_check(True)
def reset(self, ports = None):
"""
- force acquire ports, stop the traffic, remove all streams and clear stats
+ Force acquire ports, stop the traffic, remove all streams and clear stats
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
:raises:
@@ -1336,7 +1370,7 @@ class STLClient(object):
ports = self._validate_port_list(ports)
self.acquire(ports, force = True)
- self.stop(ports)
+ self.stop(ports, rx_delay_ms = 0)
self.remove_all_streams(ports)
self.clear_stats(ports)
@@ -1348,7 +1382,7 @@ class STLClient(object):
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
:raises:
@@ -1371,16 +1405,16 @@ class STLClient(object):
@__api_check(True)
def add_streams (self, streams, ports = None):
"""
- add a list of streams to port(s)
+ Add a list of streams to port(s)
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
streams: list
- streams to attach (or profile)
+ Streams to attach (or profile)
:returns:
- list of stream IDs in order of the stream list
+ List of stream IDs in order of the stream list
:raises:
+ :exc:`STLError`
@@ -1416,13 +1450,13 @@ class STLClient(object):
@__api_check(True)
def remove_streams (self, stream_id_list, ports = None):
"""
- remove a list of streams from ports
+ Remove a list of streams from ports
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
stream_id_list: list
- stream id list to remove
+ Stream id list to remove
:raises:
@@ -1460,27 +1494,29 @@ class STLClient(object):
duration = -1,
total = False):
"""
- start traffic on port(s)
+ Start traffic on port(s)
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
mult : str
- multiplier in a form of pps, bps, or line util in %
- examples: "5kpps", "10gbps", "85%", "32mbps"
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
force : bool
- imply stopping the port of active and also
- forces a profile that exceeds the L1 BW
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
duration : int
- limit the run for time in seconds
- -1 means unlimited
+ Limit the run time (seconds)
+ -1 = unlimited
total : bool
- should the B/W be divided by the ports
- or duplicated for each
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
:raises:
@@ -1535,13 +1571,20 @@ class STLClient(object):
@__api_check(True)
- def stop (self, ports = None):
+ def stop (self, ports = None, rx_delay_ms = 10):
"""
- stop port(s)
+ Stop port(s)
:parameters:
ports : list
- ports to execute the command
+ Ports on which to execute the command
+
+ rx_delay_ms : int
+ time to wait until RX filters are removed
+ this value should reflect the time it takes
+ packets which were transmitted to arrive
+ to the destination.
+ after this time the RX filters will be removed
:raises:
+ :exc:`STLError`
@@ -1561,27 +1604,35 @@ class STLClient(object):
if not rc:
raise STLError(rc)
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
@__api_check(True)
def update (self, ports = None, mult = "1", total = False, force = False):
"""
- update traffic on port(s)
+ Update traffic on port(s)
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
mult : str
- multiplier in a form of pps, bps, or line util in %
- and also with +/-
- examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
+ Multiplier in a form of pps, bps, or line util in %
+ Can also specify +/-
+ Examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
force : bool
- forces a profile that exceeds the L1 BW
+ If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
+ True: Force start
+ False: Do not force start
total : bool
- should the B/W be divided by the ports
- or duplicated for each
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
:raises:
@@ -1619,11 +1670,11 @@ class STLClient(object):
@__api_check(True)
def pause (self, ports = None):
"""
- pause traffic on port(s). works only for ports that are active and all streams are in cont mode
+ Pause traffic on port(s). Works only for ports that are active, and only if all streams are in Continuous mode.
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
:raises:
+ :exc:`STLError`
@@ -1644,11 +1695,11 @@ class STLClient(object):
@__api_check(True)
def resume (self, ports = None):
"""
- resume traffic on port(s)
+ Resume traffic on port(s)
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
:raises:
+ :exc:`STLError`
@@ -1671,23 +1722,24 @@ class STLClient(object):
@__api_check(True)
def validate (self, ports = None, mult = "1", duration = "-1", total = False):
"""
- validate port(s) configuration
+ Validate port(s) configuration
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
mult : str
- multiplier in a form of pps, bps, or line util in %
- examples: "5kpps", "10gbps", "85%", "32mbps"
+ Multiplier in a form of pps, bps, or line util in %
+ Examples: "5kpps", "10gbps", "85%", "32mbps"
duration : int
- limit the run for time in seconds
- -1 means unlimited
+ Limit the run time (seconds)
+ -1 = unlimited
total : bool
- should the B/W be divided by the ports
- or duplicated for each
+ Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
+ True: Divide bandwidth among the ports
+ False: Duplicate
:raises:
+ :exc:`STLError`
@@ -1723,17 +1775,17 @@ class STLClient(object):
@__api_check(False)
def clear_stats (self, ports = None, clear_global = True, clear_flow_stats = True):
"""
- clear stats on port(s)
+ Clear stats on port(s)
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
clear_global : bool
- clear the global stats
+ Clear the global stats
clear_flow_stats : bool
- clear the flow stats
+ Clear the flow stats
:raises:
+ :exc:`STLError`
@@ -1757,11 +1809,11 @@ class STLClient(object):
@__api_check(True)
def is_traffic_active (self, ports = None):
"""
- retrun if specify port(s) has traffic
+ Return if specified port(s) have traffic
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
:raises:
@@ -1778,17 +1830,25 @@ class STLClient(object):
@__api_check(True)
- def wait_on_traffic (self, ports = None, timeout = 60):
+ def wait_on_traffic (self, ports = None, timeout = 60, rx_delay_ms = 10):
"""
- block until specify port(s) traffic has ended
+ Block until traffic on specified port(s) has ended
:parameters:
ports : list
- ports to execute command
+ Ports on which to execute the command
timeout : int
timeout in seconds
+ rx_delay_ms : int
+ time to wait until RX filters are removed
+ this value should reflect the time it takes
+ packets which were transmitted to arrive
+ to the destination.
+ after this time the RX filters will be removed
+
+
:raises:
+ :exc:`STLTimeoutError` - in case timeout has expired
+ :exe:'STLError'
@@ -1807,14 +1867,19 @@ class STLClient(object):
if time.time() > expr:
raise STLTimeoutError(timeout)
+ # remove any RX filters
+ rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
+ if not rc:
+ raise STLError(rc)
+
@__api_check(True)
def set_port_attr (self, ports = None, promiscuous = None):
"""
- set port(s) attributes
+ Set port attributes
:parameters:
- promiscuous - set this to True or False
+ promiscuous - True or False
:raises:
None
@@ -1825,7 +1890,7 @@ class STLClient(object):
ports = self._validate_port_list(ports)
# check arguments
- validate_type('promiscuous', promiscuous, (bool, NoneType))
+ validate_type('promiscuous', promiscuous, (bool, type(None)))
# build attributes
attr_dict = {}
@@ -1845,7 +1910,7 @@ class STLClient(object):
def clear_events (self):
"""
- clear all events
+ Clear all events
:parameters:
None
@@ -1921,7 +1986,7 @@ class STLClient(object):
@__console
def start_line (self, line):
- '''Start selected traffic in specified ports on TRex\n'''
+ '''Start selected traffic on specified ports on TRex\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"start",
@@ -2008,7 +2073,7 @@ class STLClient(object):
@__console
def stop_line (self, line):
- '''Stop active traffic in specified ports on TRex\n'''
+ '''Stop active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"stop",
self.stop_line.__doc__,
@@ -2061,7 +2126,7 @@ class STLClient(object):
@__console
def pause_line (self, line):
- '''Pause active traffic in specified ports on TRex\n'''
+ '''Pause active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"pause",
self.pause_line.__doc__,
@@ -2086,7 +2151,7 @@ class STLClient(object):
@__console
def resume_line (self, line):
- '''Resume active traffic in specified ports on TRex\n'''
+ '''Resume active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"resume",
self.resume_line.__doc__,
@@ -2130,7 +2195,7 @@ class STLClient(object):
@__console
def show_stats_line (self, line):
- '''Fetch statistics from TRex server by port\n'''
+ '''Get statistics from TRex server by port\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"stats",
@@ -2161,7 +2226,7 @@ class STLClient(object):
@__console
def show_streams_line(self, line):
- '''Fetch streams statistics from TRex server by port\n'''
+ '''Get stream statistics from TRex server by port\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"streams",
@@ -2190,7 +2255,7 @@ class STLClient(object):
@__console
def validate_line (self, line):
- '''validates port(s) stream configuration\n'''
+ '''Validates port(s) stream configuration\n'''
parser = parsing_opts.gen_parser(self,
"validate",
@@ -2208,7 +2273,7 @@ class STLClient(object):
@__console
def push_line (self, line):
- '''Push a PCAP file '''
+ '''Push a pcap file '''
parser = parsing_opts.gen_parser(self,
"push",
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
index d6d66ec3..ed0c393d 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_ext.py
@@ -9,7 +9,7 @@ TREX_STL_EXT_PATH = os.environ.get('TREX_STL_EXT_PATH')
# take default
if not TREX_STL_EXT_PATH or not os.path.exists(TREX_STL_EXT_PATH):
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
- TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, 'external_libs'))
+ TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, 'external_libs'))
if not os.path.exists(TREX_STL_EXT_PATH):
# ../../../../external_libs
TREX_STL_EXT_PATH = os.path.normpath(os.path.join(CURRENT_PATH, os.pardir, os.pardir, os.pardir, os.pardir, 'external_libs'))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
index 166fd64e..bd5ba8e7 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -26,9 +26,9 @@ class BatchMessage(object):
self.rpc_client = rpc_client
self.batch_list = []
- def add (self, method_name, params={}):
+ def add (self, method_name, params = None, api_class = 'core'):
- id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, encode = False)
+ id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False)
self.batch_list.append(msg)
def invoke(self, block = False):
@@ -46,8 +46,9 @@ class JsonRpcClient(object):
MSG_COMPRESS_THRESHOLD = 4096
MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA
- def __init__ (self, default_server, default_port, logger):
- self.logger = logger
+ def __init__ (self, default_server, default_port, client):
+ self.client = client
+ self.logger = client.logger
self.connected = False
# default values
@@ -93,14 +94,18 @@ class JsonRpcClient(object):
def create_batch (self):
return BatchMessage(self)
- def create_jsonrpc_v2 (self, method_name, params = {}, encode = True):
+ def create_jsonrpc_v2 (self, method_name, params = None, api_class = 'core', encode = True):
msg = {}
msg["jsonrpc"] = "2.0"
msg["method"] = method_name
+ msg["id"] = next(self.id_gen)
- msg["params"] = params
+ msg["params"] = params if params is not None else {}
- msg["id"] = next(self.id_gen)
+ # if this RPC has an API class - add it's handler
+ if api_class:
+ msg["params"]["api_h"] = self.client.api_h[api_class]
+
if encode:
return id, json.dumps(msg)
@@ -108,11 +113,11 @@ class JsonRpcClient(object):
return id, msg
- def invoke_rpc_method (self, method_name, params = {}):
+ def invoke_rpc_method (self, method_name, params = None, api_class = 'core'):
if not self.connected:
return RC_ERR("Not connected to server")
- id, msg = self.create_jsonrpc_v2(method_name, params)
+ id, msg = self.create_jsonrpc_v2(method_name, params, api_class)
return self.send_msg(msg)
@@ -273,7 +278,7 @@ class JsonRpcClient(object):
self.connected = True
- rc = self.invoke_rpc_method('ping')
+ rc = self.invoke_rpc_method('ping', api_class = None)
if not rc:
self.connected = False
return rc
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
index a7064853..f8517a47 100644..100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py
@@ -35,7 +35,7 @@ def safe_ord (c):
elif type(c) is int:
return c
else:
- raise TypeError("cannot convert: {0} of type: {1}".format(c, type(c)))
+ raise TypeError("Cannot convert: {0} of type: {1}".format(c, type(c)))
def _buffer_to_num(str_buffer):
validate_type('str_buffer', str_buffer, bytes)
@@ -48,18 +48,18 @@ def _buffer_to_num(str_buffer):
def ipv4_str_to_num (ipv4_buffer):
validate_type('ipv4_buffer', ipv4_buffer, bytes)
- assert len(ipv4_buffer)==4, 'size of ipv4_buffer is not 4'
+ assert len(ipv4_buffer)==4, 'Size of ipv4_buffer is not 4'
return _buffer_to_num(ipv4_buffer)
def mac_str_to_num (mac_buffer):
validate_type('mac_buffer', mac_buffer, bytes)
- assert len(mac_buffer)==6, 'size of mac_buffer is not 6'
+ assert len(mac_buffer)==6, 'Size of mac_buffer is not 6'
return _buffer_to_num(mac_buffer)
def is_valid_ipv4(ip_addr):
"""
- return buffer in network order
+ Return buffer in network order
"""
if type(ip_addr) == bytes and len(ip_addr) == 4:
return ip_addr
@@ -72,12 +72,12 @@ def is_valid_ipv4(ip_addr):
except AttributeError: # no inet_pton here, sorry
return socket.inet_aton(ip_addr)
except socket.error: # not a valid address
- raise CTRexPacketBuildException(-10,"not valid ipv4 format");
+ raise CTRexPacketBuildException(-10,"Not valid ipv4 format");
def is_valid_ipv6(ipv6_addr):
"""
- return buffer in network order
+ Return buffer in network order
"""
if type(ipv6_addr) == bytes and len(ipv6_addr) == 16:
return ipv6_addr
@@ -107,12 +107,12 @@ class CTRexScFieldRangeBase(CTRexScriptsBase):
self.field_name =field_name
self.field_type =field_type
if not self.field_type in CTRexScFieldRangeBase.FILED_TYPES :
- raise CTRexPacketBuildException(-12, 'field type should be in %s' % FILED_TYPES);
+ raise CTRexPacketBuildException(-12, 'Field type should be in %s' % FILED_TYPES);
class CTRexScFieldRangeValue(CTRexScFieldRangeBase):
"""
- range of field value
+ Range of field values
"""
def __init__(self, field_name,
field_type,
@@ -123,14 +123,14 @@ class CTRexScFieldRangeValue(CTRexScFieldRangeBase):
self.min_value =min_value;
self.max_value =max_value;
if min_value > max_value:
- raise CTRexPacketBuildException(-12, 'min is greater than max');
+ raise CTRexPacketBuildException(-12, 'Invalid range: min is greater than max.');
if min_value == max_value:
- raise CTRexPacketBuildException(-13, "min value is equal to max value, you can't use this type of range");
+ raise CTRexPacketBuildException(-13, "Invalid range: min value is equal to max value.");
class CTRexScIpv4SimpleRange(CTRexScFieldRangeBase):
"""
- range of ipv4 ip
+ Range of ipv4 ip
"""
def __init__(self, field_name, field_type, min_ip, max_ip):
super(CTRexScIpv4SimpleRange, self).__init__(field_name,field_type)
@@ -144,7 +144,7 @@ class CTRexScIpv4SimpleRange(CTRexScFieldRangeBase):
class CTRexScIpv4TupleGen(CTRexScriptsBase):
"""
- range tuple
+ Range tuple
"""
FLAGS_ULIMIT_FLOWS =1
@@ -166,7 +166,7 @@ class CTRexScIpv4TupleGen(CTRexScriptsBase):
class CTRexScTrimPacketSize(CTRexScriptsBase):
"""
- trim packet size. field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
+ Trim packet size. Field type is CTRexScFieldRangeBase.FILED_TYPES = ["inc","dec","rand"]
"""
def __init__(self,field_type="rand",min_pkt_size=None, max_pkt_size=None):
super(CTRexScTrimPacketSize, self).__init__()
@@ -183,11 +183,11 @@ class CTRexScTrimPacketSize(CTRexScriptsBase):
class STLScVmRaw(CTRexScriptsBase):
"""
- raw instructions
+ Raw instructions
"""
def __init__(self,list_of_commands=None,split_by_field=None):
"""
- include a list of a basic instructions objects
+ Include a list of a basic instructions objects.
:parameters:
list_of_commands : list
@@ -197,7 +197,7 @@ class STLScVmRaw(CTRexScriptsBase):
by which field to split to threads
- The following example will split the generated traffic by "ip_src" variable
+ The following example splits the generated traffic by "ip_src" variable.
.. code-block:: python
:caption: Split by
@@ -241,7 +241,7 @@ class STLScVmRaw(CTRexScriptsBase):
class CTRexVmInsBase(object):
"""
- instruction base
+ Instruction base
"""
def __init__(self, ins_type):
self.type = ins_type
@@ -334,7 +334,7 @@ class CTRexVmEngine(object):
def __init__(self):
"""
- inlcude list of instruction
+ Inlcude list of instructions.
"""
super(CTRexVmEngine, self).__init__()
self.ins=[]
@@ -390,7 +390,7 @@ class CTRexScapyPktUtl(object):
def get_pkt_layers(self):
"""
- return string 'IP:UDP:TCP'
+ Return string 'IP:UDP:TCP'
"""
l=self.get_list_iter ();
l1=map(lambda p: p.name,l );
@@ -398,7 +398,7 @@ class CTRexScapyPktUtl(object):
def _layer_offset(self, name, cnt = 0):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
"""
save_cnt=cnt
for pkt in self.pkt_iter ():
@@ -413,7 +413,7 @@ class CTRexScapyPktUtl(object):
def layer_offset(self, name, cnt = 0):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
"""
save_cnt=cnt
for pkt in self.pkt_iter ():
@@ -427,7 +427,7 @@ class CTRexScapyPktUtl(object):
def get_field_offet(self, layer, layer_cnt, field_name):
"""
- return offset of layer e.g 'IP',1 will return offfset of layer ip:1
+ Return offset of layer. Example: 'IP',1 returns offfset of layer ip:1
"""
t=self._layer_offset(layer,layer_cnt);
l_offset=t[1];
@@ -439,11 +439,11 @@ class CTRexScapyPktUtl(object):
if f.name == field_name:
return (l_offset+f.offset,f.get_size_bytes ());
- raise CTRexPacketBuildException(-11, "no layer %s-%d." % (name, save_cnt, field_name));
+ raise CTRexPacketBuildException(-11, "No layer %s-%d." % (name, save_cnt, field_name));
def get_layer_offet_by_str(self, layer_des):
"""
- return layer offset by string
+ Return layer offset by string.
:parameters:
@@ -469,21 +469,21 @@ class CTRexScapyPktUtl(object):
def get_field_offet_by_str(self, field_des):
"""
- return field_des (offset,size) layer:cnt.field
- for example
+ Return field_des (offset,size) layer:cnt.field
+ Example:
802|1Q.vlan get 802.1Q->valn replace | with .
IP.src
IP:0.src (first IP.src like IP.src)
- for example IP:1.src for internal IP
+ Example: IP:1.src for internal IP
- return (offset, size) as tuple
+ Return (offset, size) as tuple.
"""
s=field_des.split(".");
if len(s)!=2:
- raise CTRexPacketBuildException(-11, ("field desription should be layer:cnt.field e.g IP.src or IP:1.src"));
+ raise CTRexPacketBuildException(-11, ("Field desription should be layer:cnt.field Example: IP.src or IP:1.src"));
layer_ex = s[0].replace("|",".")
@@ -514,7 +514,7 @@ class CTRexScapyPktUtl(object):
class CTRexVmDescBase(object):
"""
- instruction base
+ Instruction base
"""
def __init__(self):
pass;
@@ -534,37 +534,37 @@ class CTRexVmDescBase(object):
def get_var_ref (self):
'''
- virtual function return a ref var name
+ Virtual function returns a ref var name.
'''
return None
def get_var_name(self):
'''
- virtual function return the varible name if exists
+ Virtual function returns the varible name if it exists.
'''
return None
def compile(self,parent):
'''
- virtual function to take parent than has function name_to_offset
+ Virtual function to take parent that has function name_to_offset.
'''
pass;
def valid_fv_size (size):
if not (size in CTRexVmInsFlowVar.VALID_SIZES):
- raise CTRexPacketBuildException(-11,("flow var has not valid size %d ") % size );
+ raise CTRexPacketBuildException(-11,("Flow var has invalid size %d ") % size );
def valid_fv_ops (op):
if not (op in CTRexVmInsFlowVar.OPERATIONS):
- raise CTRexPacketBuildException(-11,("flow var does not have a valid op %s ") % op );
+ raise CTRexPacketBuildException(-11,("Flow var has invalid op %s ") % op );
def convert_val (val):
if is_integer(val):
return val
if type(val) == str:
return ipv4_str_to_num (is_valid_ipv4(val))
- raise CTRexPacketBuildException(-11,("init val not valid %s ") % val );
+ raise CTRexPacketBuildException(-11,("init val invalid %s ") % val );
def check_for_int (val):
validate_type('val', val, int)
@@ -574,31 +574,32 @@ class STLVmFlowVar(CTRexVmDescBase):
def __init__(self, name, init_value=None, min_value=0, max_value=255, size=4, step=1,op="inc"):
"""
- Flow variable instruction. Allocate a variable on a stream context. The size of the variable could be determined
- The operation can be inc, dec and random. In case of increment and decrement operation, it is possible to set the "step" size.
- Initialization value, minimum and maximum value could be set too.
+ Flow variable instruction. Allocates a variable on a stream context. The size argument determines the variable size.
+ The operation can be inc, dec, and random.
+ For increment and decrement operations, can set the "step" size.
+ For all operations, can set initialization value, minimum and maximum value.
:parameters:
name : string
- The name of the stream variable
+ Name of the stream variable
init_value : int
- The init value of the variable. in case of None it will be min_value
+ Init value of the variable. If not specified, it will be min_value
min_value : int
- The min value
+ Min value
max_value : int
- The max value
+ Max value
size : int
- the number of bytes of the variable. could be 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
+ Number of bytes of the variable. Possible values: 1,2,4,8 for uint8_t, uint16_t, uint32_t, uint64_t
step : int
- step in case of "inc","dec" operation
+ Step in case of "inc" or "dec" operations
op : string
- could be "inc", "dec", "random"
+ Possible values: "inc", "dec", "random"
.. code-block:: python
:caption: Example1
@@ -657,7 +658,7 @@ class STLVmFlowVar(CTRexVmDescBase):
class STLVmFixIpv4(CTRexVmDescBase):
def __init__(self, offset):
"""
- Fix IPv4 header checksum. should be added if the packet header was changed and there is a need to fix he checksum
+ Fix IPv4 header checksum. Use this if the packet header has changed and it is necessary to change the checksum.
:parameters:
offset : uint16_t or string
@@ -694,26 +695,28 @@ class STLVmWrFlowVar(CTRexVmDescBase):
def __init__(self, fv_name, pkt_offset, offset_fixup=0, add_val=0, is_big=True):
"""
Write a stream variable into a packet field.
- The write is done in size of the stream variable.
- In case there is a need to change the write have a look into the command `STLVmWrMaskFlowVar`.
- The Field name/offset can be given by name in this format ``header[:id].field``.
+ The write position is determined by the packet offset + offset fixup. The size of the write is determined by the stream variable.
+ Example: Offset 10, fixup 0, variable size 4. This function writes at 10, 11, 12, and 13.
+
+ For inromation about chaning the write size, offset, or fixup, see the `STLVmWrMaskFlowVar` command.
+ The Field name/offset can be given by name in the following format: ``header[:id].field``.
:parameters:
fv_name : string
- the stream variable to write to a packet offset
+ Stream variable to write to a packet offset.
pkt_offset : string or in
- the name of the field or offset in byte from packet start.
+ Name of the field or offset in bytes from packet start.
offset_fixup : int
- how many bytes to go forward. In case of a negative value go backward
+ Number of bytes to move forward. If negative, move backward.
add_val : int
- value to add to stream variable before writing it to packet field. can be used as a constant offset
+ Value to add to the stream variable before writing it to the packet field. Can be used as a constant offset.
is_big : bool
- how to write the variable to the the packet. is it big-edian or little edian
+ How to write the variable to the the packet. True=big-endian, False=little-endian
.. code-block:: python
:caption: Example3
@@ -759,7 +762,7 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase):
"""
Write a stream variable into a packet field with some operations.
- Using this instruction the variable size and the field could be with different size.
+ Using this instruction, the variable size and the field can have different sizes.
Pseudocode of this code::
@@ -805,7 +808,7 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase):
is_big : bool
how to write the variable to the the packet. is it big-edian or little edian
- Example 1- casting from uint16_t (var) to uint8_t (pkt)::
+ Example 1 - Cast from uint16_t (var) to uint8_t (pkt)::
base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
@@ -824,7 +827,7 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase):
pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
- Example 2- change MSB of uint16_t variable::
+ Example 2 - Change MSB of uint16_t variable::
vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
@@ -841,7 +844,7 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase):
- Example 3- Every 2 packet change the MAC (shift right)::
+ Example 3 - Every 2 packets, change the MAC (shift right)::
vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src",
min_value=1,
@@ -893,12 +896,12 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase):
class STLVmTrimPktSize(CTRexVmDescBase):
"""
- Trim packet size by stream variable size. This instruction will only change the total packet size and will not fix up the fields to match the new size.
+ Trim the packet size by the stream variable size. This instruction only changes the total packet size, and does not repair the fields to match the new size.
:parameters:
fv_name : string
- the stream variable name. the value from this variable would be the new total packet size.
+ Stream variable name. The value of this variable is the new total packet size.
For Example::
@@ -959,8 +962,8 @@ class STLVmTrimPktSize(CTRexVmDescBase):
class STLVmTupleGen(CTRexVmDescBase):
def __init__(self,name, ip_min="0.0.0.1", ip_max="0.0.0.10", port_min=1025, port_max=65535, limit_flows=100000, flags=0):
"""
- Generate a struct with two varibles. ``var_name.ip`` as uint32_t and ``var_name.port`` as uint16_t
- The variable are dependent. When the ip variable is wrapped the port is getting increment.
+ Generate a struct with two variables: ``var_name.ip`` as uint32_t and ``var_name.port`` as uint16_t
+ The variables are dependent. When the ip variable value reaches its maximum, the port is incremented.
For:
@@ -1001,22 +1004,22 @@ class STLVmTupleGen(CTRexVmDescBase):
:parameters:
name : string
- The name of the stream struct.
+ Name of the stream struct.
ip_min : string or int
- The min value of the ip value. It can be in IPv4 format
+ Min value of the ip value. Number or IPv4 format.
ip_max : string or int
- The max value of the ip value. It can be in IPv4 format
+ Max value of the ip value. Number or IPv4 format.
port_min : int
- min value for port variable
+ Min value of port variable.
port_max : int
- max value for port variable
+ Max value of port variable.
limit_flows : int
- The limit of number of flows
+ Limit of number of flows.
flags : 0
@@ -1093,10 +1096,10 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def __init__(self, pkt = None, pkt_buffer = None, vm = None, path_relative_to_profile = False, build_raw = False, remove_fcs = True):
"""
- This class defines a way to build a template packet, and Field Engine using scapy package.
- Using this class the user can also define how TRex will handle the packet by specifying the Field engine setting.
- pkt could be Scapy pkt or pcap file name
- When path_relative_to_profile is a True load pcap file from a path relative to the profile
+ This class defines a method for building a template packet and Field Engine using the Scapy package.
+ Using this class the user can also define how TRex will handle the packet by specifying the Field engine settings.
+ The pkt can be a Scapy pkt or pcap file name.
+ If using a pcap file, and path_relative_to_profile is True, then the function loads the pcap file from a path relative to the profile.
.. code-block:: python
@@ -1134,22 +1137,22 @@ class STLPktBuilder(CTrexPktBuilderInterface):
:parameters:
pkt : string,
- Scapy or pcap file filename a scapy packet
+ Scapy object or pcap filename.
pkt_buffer : bytes
- a packet as buffer
+ Packet as buffer.
vm : list or base on :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLScVmRaw`
- a list of instructions to manipolate packet fields
+ List of instructions to manipulate packet fields.
path_relative_to_profile : bool
- in case pkt is pcap file, do we want to load it relative to profile file
+ If pkt is a pcap file, determines whether to load it relative to profile file.
build_raw : bool
- Do we want to build scapy in case buffer was given. good for cases we want offset to be taken from scapy
+ If a buffer is specified (by pkt_buffer), determines whether to build Scapy. Useful in cases where it is necessary to take the offset from Scapy.
remove_fcs : bool
- in case of buffer do we want to remove fcs
+ If a buffer is specified (by pkt_buffer), determines whether to remove FCS.
@@ -1171,7 +1174,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
if pkt != None and pkt_buffer != None:
- raise CTRexPacketBuildException(-15, "packet builder cannot be provided with both pkt and pkt_buffer")
+ raise CTRexPacketBuildException(-15, "Packet builder cannot be provided with both pkt and pkt_buffer.")
# process packet
if pkt != None:
@@ -1183,7 +1186,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
# process VM
if vm != None:
if not isinstance(vm, (STLScVmRaw, list)):
- raise CTRexPacketBuildException(-14, "bad value for variable vm")
+ raise CTRexPacketBuildException(-14, "Bad value for variable vm.")
self.add_command(vm if isinstance(vm, STLScVmRaw) else STLScVmRaw(vm))
@@ -1219,7 +1222,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def dump_pkt(self, encode = True):
"""
- Dumps the packet as a decimal array of bytes (each item x gets value between 0-255)
+ Dumps the packet as a decimal array of bytes (each item x gets value in range 0-255)
:parameters:
encode : bool
@@ -1254,7 +1257,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def pkt_layers_desc (self):
"""
- return layer description like this IP:TCP:Pyload
+ Return layer description in this format: IP:TCP:Pyload
"""
pkt_buf = self._get_pkt_as_str()
@@ -1274,13 +1277,13 @@ class STLPktBuilder(CTrexPktBuilderInterface):
def set_pcap_file (self, pcap_file):
"""
- load raw pcap file into a buffer. load only the first packet
+ Load raw pcap file into a buffer. Loads only the first packet.
:parameters:
pcap_file : file_name
:raises:
- + :exc:`AssertionError`, in case packet is empty.
+ + :exc:`AssertionError`, if packet is empty.
"""
f_path = self._get_pcap_file_path (pcap_file)
@@ -1293,7 +1296,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
self.pkt_raw = pkt[0]
break
if not was_set :
- raise CTRexPacketBuildException(-14, "no buffer inside the pcap file {0}".format(f_path))
+ raise CTRexPacketBuildException(-14, "No buffer inside the pcap file {0}".format(f_path))
def to_pkt_dump(self):
p = self.pkt
@@ -1312,7 +1315,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
"""
Scapy packet
- For Example::
+ Example::
pkt =Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/('x'*10)
@@ -1401,7 +1404,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
if var_names :
for var_name in var_names:
if var_name in vars:
- raise CTRexPacketBuildException(-11,("variable %s define twice ") % (var_name) );
+ raise CTRexPacketBuildException(-11,("Variable %s defined twice ") % (var_name) );
else:
vars[var_name]=1
@@ -1410,7 +1413,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
var_name = desc.get_var_ref()
if var_name :
if not var_name in vars:
- raise CTRexPacketBuildException(-11,("variable %s does not exists ") % (var_name) );
+ raise CTRexPacketBuildException(-11,("Variable %s does not exist ") % (var_name) );
desc.compile(self);
for desc in obj.commands:
@@ -1439,7 +1442,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
# regular scapy packet
elif not self.pkt:
# should not reach here
- raise CTRexPacketBuildException(-11, 'empty packet')
+ raise CTRexPacketBuildException(-11, 'Empty packet')
if self.remove_fcs and self.pkt.lastlayer().name == 'Padding':
self.pkt.lastlayer().underlayer.remove_payload()
@@ -1469,7 +1472,7 @@ class STLPktBuilder(CTrexPktBuilderInterface):
if self.pkt_raw:
return self.pkt_raw
- raise CTRexPacketBuildException(-11, 'empty packet');
+ raise CTRexPacketBuildException(-11, 'Empty packet');
def _add_tuple_gen(self,tuple_gen):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
index 47124114..89ad2663 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
@@ -7,8 +7,8 @@ from .trex_stl_types import *
from . import trex_stl_stats
import base64
-import time
import copy
+from datetime import datetime, timedelta
StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata'])
@@ -61,6 +61,8 @@ class Port(object):
self.port_stats = trex_stl_stats.CPortStats(self)
self.next_available_id = 1
+ self.tx_stopped_ts = None
+ self.has_rx_streams = False
def err(self, msg):
@@ -79,8 +81,7 @@ class Port(object):
"session_id": self.session_id,
"force": force}
- command = RpcCmdData("acquire", params)
- rc = self.transmit(command.method, command.params)
+ rc = self.transmit("acquire", params)
if rc.good():
self.handler = rc.data()
return self.ok()
@@ -92,8 +93,7 @@ class Port(object):
params = {"port_id": self.port_id,
"handler": self.handler}
- command = RpcCmdData("release", params)
- rc = self.transmit(command.method, command.params)
+ rc = self.transmit("release", params)
self.handler = None
if rc.good():
@@ -117,8 +117,7 @@ class Port(object):
def sync(self):
params = {"port_id": self.port_id}
- command = RpcCmdData("get_port_status", params)
- rc = self.transmit(command.method, command.params)
+ rc = self.transmit("get_port_status", params)
if rc.bad():
return self.err(rc.err())
@@ -147,8 +146,7 @@ class Port(object):
# sync the streams
params = {"port_id": self.port_id}
- command = RpcCmdData("get_all_streams", params)
- rc = self.transmit(command.method, command.params)
+ rc = self.transmit("get_all_streams", params)
if rc.bad():
return self.err(rc.err())
@@ -222,7 +220,7 @@ class Port(object):
"stream_id": stream_id,
"stream": stream_json}
- cmd = RpcCmdData('add_stream', params)
+ cmd = RpcCmdData('add_stream', params, 'core')
batch.append(cmd)
@@ -239,6 +237,9 @@ class Port(object):
'rate' : streams_list[i].get_rate()}
ret.add(RC_OK(data = stream_id))
+
+ self.has_rx_streams = self.has_rx_streams or streams_list[i].has_flow_stats()
+
else:
ret.add(RC(*single_rc))
@@ -271,7 +272,7 @@ class Port(object):
"port_id": self.port_id,
"stream_id": stream_id}
- cmd = RpcCmdData('remove_stream', params)
+ cmd = RpcCmdData('remove_stream', params, 'core')
batch.append(cmd)
@@ -283,6 +284,9 @@ class Port(object):
self.state = self.STATE_STREAMS if (len(self.streams) > 0) else self.STATE_IDLE
+ # recheck if any RX stats streams present on the port
+ self.has_rx_streams = any([stream.has_flow_stats() for stream in self.streams])
+
return self.ok() if rc else self.err(rc.err())
@@ -305,6 +309,7 @@ class Port(object):
self.streams = {}
self.state = self.STATE_IDLE
+ self.has_rx_streams = False
return self.ok()
@@ -351,7 +356,7 @@ class Port(object):
# stop traffic
# with force ignores the cached state and sends the command
def stop (self, force = False):
-
+
if not self.is_acquired():
return self.err("port is not owned")
@@ -360,7 +365,6 @@ class Port(object):
if (self.state == self.STATE_IDLE) or (self.state == self.state == self.STATE_STREAMS):
return self.ok()
-
params = {"handler": self.handler,
"port_id": self.port_id}
@@ -370,8 +374,56 @@ class Port(object):
self.state = self.STATE_STREAMS
+ # timestamp for last tx
+ self.tx_stopped_ts = datetime.now()
+
+ return self.ok()
+
+
+ # return True if port has any stream configured with RX stats
+ def has_rx_enabled (self):
+ return self.has_rx_streams
+
+
+ # return true if rx_delay_ms has passed since the last port stop
+ def has_rx_delay_expired (self, rx_delay_ms):
+ assert(self.has_rx_enabled())
+
+ # if active - it's not safe to remove RX filters
+ if self.is_active():
+ return False
+
+ # either no timestamp present or time has already passed
+ return not self.tx_stopped_ts or (datetime.now() - self.tx_stopped_ts) > timedelta(milliseconds = rx_delay_ms)
+
+
+
+ def remove_rx_filters (self):
+ assert(self.has_rx_enabled())
+
+ if not self.is_acquired():
+ return self.err("port is not owned")
+
+ if self.state == self.STATE_DOWN:
+ return self.err("Unable to remove RX filters - port is down")
+
+ if self.state == self.STATE_TX:
+ return self.err("Unable to remove RX filters - port is transmitting")
+
+ if self.state == self.STATE_IDLE:
+ return self.ok()
+
+
+ params = {"handler": self.handler,
+ "port_id": self.port_id}
+
+ rc = self.transmit("remove_rx_filters", params)
+ if rc.bad():
+ return self.err(rc.err())
+
return self.ok()
+
def pause (self):
if not self.is_acquired():
@@ -597,6 +649,8 @@ class Port(object):
################# events handler ######################
def async_event_port_job_done (self):
+ # until thread is locked - order is important
+ self.tx_stopped_ts = datetime.now()
self.state = self.STATE_STREAMS
# rest of the events are used for TUI / read only sessions
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
index 18678e3e..1d89a599 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_sim.py
@@ -39,7 +39,7 @@ class BpSimException(Exception):
# stateless simulation
class STLSim(object):
- def __init__ (self, bp_sim_path = None, handler = 0, port_id = 0):
+ def __init__ (self, bp_sim_path = None, handler = 0, port_id = 0, api_h = "dummy"):
if not bp_sim_path:
# auto find scripts
@@ -54,6 +54,7 @@ class STLSim(object):
# dummies
self.handler = handler
+ self.api_h = api_h
self.port_id = port_id
@@ -62,6 +63,7 @@ class STLSim(object):
"jsonrpc": "2.0",
"method": "start_traffic",
"params": {"handler": self.handler,
+ "api_h" : self.api_h,
"force": force,
"port_id": self.port_id,
"mul": parsing_opts.decode_multiplier(mult),
@@ -168,6 +170,7 @@ class STLSim(object):
"jsonrpc": "2.0",
"method": "add_stream",
"params": {"handler": self.handler,
+ "api_h": self.api_h,
"port_id": self.port_id,
"stream_id": stream_id,
"stream": stream_json}
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
index 18c49d4e..a4bb64db 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py
@@ -3,9 +3,8 @@
from .utils import text_tables
from .utils.text_opts import format_text, format_threshold, format_num
-from .trex_stl_async_client import CTRexAsyncStats
-
from collections import namedtuple, OrderedDict, deque
+import sys
import copy
import datetime
import time
@@ -520,6 +519,10 @@ class CTRexStats(object):
value = abs(v)
arrow = u'\u25b2' if v > 0 else u'\u25bc'
+
+ if sys.version_info < (3,0):
+ arrow = arrow.encode('utf-8')
+
color = up_color if v > 0 else down_color
# change in 1% is not meaningful
@@ -529,22 +532,22 @@ class CTRexStats(object):
elif value > 5:
if show_value:
- return format_text(u"{0}{0}{0} {1:.2f}%".format(arrow,v), color)
+ return format_text("{0}{0}{0} {1:.2f}%".format(arrow,v), color)
else:
- return format_text(u"{0}{0}{0}".format(arrow), color)
+ return format_text("{0}{0}{0}".format(arrow), color)
elif value > 2:
if show_value:
- return format_text(u"{0}{0} {1:.2f}%".format(arrow,v), color)
+ return format_text("{0}{0} {1:.2f}%".format(arrow,v), color)
else:
- return format_text(u"{0}{0}".format(arrow), color)
+ return format_text("{0}{0}".format(arrow), color)
else:
if show_value:
- return format_text(u"{0} {1:.2f}%".format(arrow,v), color)
+ return format_text("{0} {1:.2f}%".format(arrow,v), color)
else:
- return format_text(u"{0}".format(arrow), color)
+ return format_text("{0}".format(arrow), color)
@@ -595,21 +598,21 @@ class CGlobalStats(CTRexStats):
("version", "{ver}, UUID: {uuid}".format(ver=self.server_version.get("version", "N/A"),
uuid="N/A")),
- ("cpu_util", u"{0}% {1}".format( format_threshold(self.get("m_cpu_util"), [85, 100], [0, 85]),
+ ("cpu_util", "{0}% {1}".format( format_threshold(self.get("m_cpu_util"), [85, 100], [0, 85]),
self.get_trend_gui("m_cpu_util", use_raw = True))),
(" ", ""),
- ("total_tx_L2", u"{0} {1}".format( self.get("m_tx_bps", format=True, suffix="b/sec"),
+ ("total_tx_L2", "{0} {1}".format( self.get("m_tx_bps", format=True, suffix="b/sec"),
self.get_trend_gui("m_tx_bps"))),
- ("total_tx_L1", u"{0} {1}".format( self.get("m_tx_bps_L1", format=True, suffix="b/sec"),
+ ("total_tx_L1", "{0} {1}".format( self.get("m_tx_bps_L1", format=True, suffix="b/sec"),
self.get_trend_gui("m_tx_bps_L1"))),
- ("total_rx", u"{0} {1}".format( self.get("m_rx_bps", format=True, suffix="b/sec"),
+ ("total_rx", "{0} {1}".format( self.get("m_rx_bps", format=True, suffix="b/sec"),
self.get_trend_gui("m_rx_bps"))),
- ("total_pps", u"{0} {1}".format( self.get("m_tx_pps", format=True, suffix="pkt/sec"),
+ ("total_pps", "{0} {1}".format( self.get("m_tx_pps", format=True, suffix="pkt/sec"),
self.get_trend_gui("m_tx_pps"))),
(" ", ""),
@@ -721,24 +724,24 @@ class CPortStats(CTRexStats):
"----": " ",
"-----": " ",
- "Tx bps L1": u"{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
+ "Tx bps L1": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
self.get("m_total_tx_bps_L1", format = True, suffix = "bps")),
- "Tx bps L2": u"{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
+ "Tx bps L2": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
self.get("m_total_tx_bps", format = True, suffix = "bps")),
- "Line Util.": u"{0} {1}".format(self.get_trend_gui("m_percentage", show_value = False),
+ "Line Util.": "{0} {1}".format(self.get_trend_gui("m_percentage", show_value = False),
format_text(
self.get("m_percentage", format = True, suffix = "%") if self._port_obj else "",
- 'bold')),
+ 'bold')) if self._port_obj else "",
- "Rx bps": u"{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
+ "Rx bps": "{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
self.get("m_total_rx_bps", format = True, suffix = "bps")),
- "Tx pps": u"{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
+ "Tx pps": "{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
self.get("m_total_tx_pps", format = True, suffix = "pps")),
- "Rx pps": u"{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
+ "Rx pps": "{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
self.get("m_total_rx_pps", format = True, suffix = "pps")),
"opackets" : self.get_rel("opackets"),
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
index 4f8ce3e6..3ce876ad 100644..100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py
@@ -21,22 +21,22 @@ class STLTXMode(object):
def __init__ (self, pps = None, bps_L1 = None, bps_L2 = None, percentage = None):
"""
- Speed could be in packet per second (pps) or L2/L1 bps or port precent
- only one of them is valid.
+ Speed can be given in packets per second (pps), L2/L1 bps, or port percent
+ Use only one unit.
you can enter pps =10000 oe bps_L1=10
:parameters:
pps : float
- packet per second
+ Packets per second
bps_L1 : float
- bit per second L1 (with IPG)
+ Bits per second L1 (with IPG)
bps_L2 : float
- bit per second L2 (Ethernet-FCS)
+ Bits per second L2 (Ethernet-FCS)
percentage : float
- link interface precent 0-100 e.g. 10 is 10%% of the port link setup
+ Link interface percent (0-100). Example: 10 is 10% of the port link setup
.. code-block:: python
:caption: STLTXMode Example
@@ -95,11 +95,11 @@ class STLTXMode(object):
# continuous mode
class STLTXCont(STLTXMode):
- """ continuous mode """
+ """ Continuous mode """
def __init__ (self, **kwargs):
"""
- continuous mode
+ Continuous mode
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
@@ -124,11 +124,11 @@ class STLTXSingleBurst(STLTXMode):
def __init__ (self, total_pkts = 1, **kwargs):
"""
- single burst mode
+ Single burst mode
:parameters:
total_pkts : int
- how many packets for this burst
+ Number of packets for this burst
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
@@ -154,7 +154,7 @@ class STLTXSingleBurst(STLTXMode):
# multi burst mode
class STLTXMultiBurst(STLTXMode):
- """ Multi burst mode """
+ """ Multi-burst mode """
def __init__ (self,
pkts_per_burst = 1,
@@ -162,18 +162,18 @@ class STLTXMultiBurst(STLTXMode):
count = 1,
**kwargs):
"""
- Multi burst mode
+ Multi-burst mode
:parameters:
pkts_per_burst: int
- how many packets per burst
+ Number of packets per burst
ibg : float
- inter burst gap in usec 1000,000.0 is 1 sec
+ Inter-burst gap in usec 1,000,000.0 is 1 sec
count : int
- how many bursts
+ Number of bursts
see :class:`trex_stl_lib.trex_stl_streams.STLTXMode` for rate
@@ -230,7 +230,7 @@ class STLFlowStats(object):
def to_json (self):
- """ dump as json"""
+ """ Dump as json"""
return dict(self.fields)
@staticmethod
@@ -238,7 +238,7 @@ class STLFlowStats(object):
return {'enabled' : False}
class STLStream(object):
- """ One stream object, include mode, Field Engine mode packet template and Rx stats
+ """ One stream object. Includes mode, Field Engine mode packet template and Rx stats
.. code-block:: python
:caption: STLStream Example
@@ -277,42 +277,42 @@ class STLStream(object):
:parameters:
name : string
- The name of the stream. Needed if this stream is dependent on another stream and another stream need to refer to this stream by its name.
+ Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name.
packet : STLPktBuilder see :class:`trex_stl_lib.trex_stl_packet_builder_scapy.STLPktBuilder`
- The template packet and field engine program e.g. packet = STLPktBuilder(pkt = base_pkt/pad)
+ Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad)
mode : :class:`trex_stl_lib.trex_stl_streams.STLTXCont` or :class:`trex_stl_lib.trex_stl_streams.STLTXSingleBurst` or :class:`trex_stl_lib.trex_stl_streams.STLTXMultiBurst`
enabled : bool
- if the stream is enabled.
+ Indicates whether the stream is enabled.
self_start : bool
- In case it is False another stream will activate it
+ If False, another stream activates it.
isg : float
- Inter stream gap in usec. time to wait until stream will send the first packet
+ Inter-stream gap in usec. Time to wait until the stream sends the first packet.
flow_stats : :class:`trex_stl_lib.trex_stl_streams.STLFlowStats`
- Per stream statistic object see STLFlowStats
+ Per stream statistic object. See: STLFlowStats
next : string
- The name of the stream to activate
+ Name of the stream to activate.
stream_id :
- for HLTAPI usage
+ For use by HLTAPI.
action_count : uint16_t
- In case there is a next stream how many loops until stopping. Default is zero, which mean unlimited
+ If there is a next stream, number of loops before stopping. Default: 0 (unlimited).
random_seed: uint16_t
- If given the seed for this stream will be this value. Good in case you need a deterministic random value
+ If given, the seed for this stream will be this value. Useful if you need a deterministic random value.
mac_src_override_by_pkt : bool
- Template packet will set src MAC
+ Template packet sets src MAC.
mac_dst_override_mode=None : STLStreamDstMAC_xx
- Template packet will set dst MAC
+ Template packet sets dst MAC.
"""
@@ -326,7 +326,7 @@ class STLStream(object):
validate_type('random_seed',random_seed,int);
if (type(mode) == STLTXCont) and (next != None):
- raise STLError("continuous stream cannot have a next stream ID")
+ raise STLError("Continuous stream cannot have a next stream ID")
# tag for the stream and next - can be anything
self.name = name
@@ -412,7 +412,7 @@ class STLStream(object):
def to_json (self):
"""
- return json format
+ Return json format
"""
return dict(self.fields)
@@ -430,6 +430,10 @@ class STLStream(object):
return self.next
+ def has_flow_stats (self):
+ """ Return True if stream was configured with flow stats """
+ return self.fields['flow_stats']['enabled']
+
def get_pkt (self):
""" Get packet as string """
return self.pkt
@@ -444,7 +448,7 @@ class STLStream(object):
def get_pkt_type (self):
- """ Get packet description for example IP:UDP """
+ """ Get packet description. Example: IP:UDP """
if self.packet_desc == None:
self.packet_desc = STLPktBuilder.pkt_layers_desc_from_buffer(self.get_pkt())
@@ -472,7 +476,7 @@ class STLStream(object):
return self.get_rate_from_field(self.fields['mode']['rate'])
def to_pkt_dump (self):
- """ print packet description from scapy """
+ """ Print packet description from Scapy """
if self.name:
print("Stream Name: ",self.name)
scapy_b = self.scapy_pkt_builder;
@@ -484,7 +488,7 @@ class STLStream(object):
def to_yaml (self):
- """ convert to YAML """
+ """ Convert to YAML """
y = {}
if self.name:
@@ -506,7 +510,7 @@ class STLStream(object):
# returns the Python code (text) to build this stream, inside the code it will be in variable "stream"
def to_code (self):
- """ convert to Python code as profile """
+ """ Convert to Python code as profile """
packet = Ether(self.pkt)
layer = packet
while layer: # remove checksums
@@ -621,7 +625,7 @@ class STLStream(object):
return r'\x{0:02x}'.format(ord(match.group()))
def dump_to_yaml (self, yaml_file = None):
- """ print as yaml """
+ """ Print as yaml """
yaml_dump = yaml.dump([self.to_yaml()], default_flow_style = False)
# write to file if provided
@@ -642,7 +646,7 @@ class YAMLLoader(object):
packet_type = set(packet_dict).intersection(['binary', 'pcap'])
if len(packet_type) != 1:
- raise STLError("packet section must contain either 'binary' or 'pcap'")
+ raise STLError("Packet section must contain either 'binary' or 'pcap'")
if 'binary' in packet_type:
try:
@@ -707,7 +711,7 @@ class YAMLLoader(object):
pg_id = flow_stats_obj.get('stream_id')
if pg_id == None:
- raise STLError("enabled RX stats section must contain 'stream_id' field")
+ raise STLError("Enabled RX stats section must contain 'stream_id' field")
return STLFlowStats(pg_id = pg_id)
@@ -822,16 +826,21 @@ class STLProfile(object):
def get_streams (self):
- """ Get the list of stream"""
+ """ Get the list of streams"""
return self.streams
def __str__ (self):
return '\n'.join([str(stream) for stream in self.streams])
+ def is_pauseable (self):
+ return all([x.get_mode() == "Continuous" for x in self.get_streams()])
+
+ def has_flow_stats (self):
+ return any([x.has_flow_stats() for x in self.get_streams()])
@staticmethod
def load_yaml (yaml_file):
- """ load from YAML file a profile with number of streams"""
+ """ Load (from YAML file) a profile with a number of streams"""
# check filename
if not os.path.isfile(yaml_file):
@@ -866,11 +875,11 @@ class STLProfile(object):
@staticmethod
def load_py (python_file, direction = 0, port_id = 0, **kwargs):
- """ load from Python profile """
+ """ Load from Python profile """
# check filename
if not os.path.isfile(python_file):
- raise STLError("file '{0}' does not exists".format(python_file))
+ raise STLError("File '{0}' does not exist".format(python_file))
basedir = os.path.dirname(python_file)
sys.path.append(basedir)
@@ -883,7 +892,7 @@ class STLProfile(object):
t = STLProfile.get_module_tunables(module)
for arg in kwargs:
if not arg in t:
- raise STLError("profile {0} does not support tunable '{1}' - supported tunables are: '{2}'".format(python_file, arg, t))
+ raise STLError("Profile {0} does not support tunable '{1}' - supported tunables are: '{2}'".format(python_file, arg, t))
streams = module.register().get_streams(direction = direction,
port_id = port_id,
@@ -910,26 +919,26 @@ class STLProfile(object):
# loop_count = 0 means loop forever
@staticmethod
def load_pcap (pcap_file, ipg_usec = None, speedup = 1.0, loop_count = 1, vm = None):
- """ Convert a pcap file with a number of packets to a list of connected streams
+ """ Convert a pcap file with a number of packets to a list of connected streams.
packet1->packet2->packet3 etc
:parameters:
pcap_file : string
- The name of the pcap file
+ Name of the pcap file
ipg_usec : float
- The inter packet gap in usec. in case of None IPG is taken from pcap file
+ Inter packet gap in usec. If IPG=0, IPG is taken from pcap file
speedup : float
- By which factor to get IPG smaller so we will send pcap file in speedup
+ When reading the pcap file, divide IPG by this "speedup" factor. Resulting IPG is sped up by this factor.
loop_count : uint16_t
- how many loops to repeat the pcap file
+ Number of loops to repeat the pcap file
vm : list
- A list of Field engine instructions
+ List of Field engine instructions
:return: STLProfile
@@ -987,12 +996,12 @@ class STLProfile(object):
@staticmethod
def load (filename, direction = 0, port_id = 0, **kwargs):
- """ load a profile by its type supported type are
+ """ Load a profile by its type. Supported types are:
* py
* yaml
* pcap file that converted to profile automaticly
- :parameters:
+ :Parameters:
filename : string as filename
direction : profile's direction (if supported by the profile)
port_id : which port ID this profile is being loaded to
@@ -1024,7 +1033,7 @@ class STLProfile(object):
return profile.meta
def dump_as_pkt (self):
- """ dump the profile as scapy packet. in case it is raw convert to scapy and dump it"""
+ """ Dump the profile as Scapy packet. If the packet is raw, convert it to Scapy before dumping it."""
cnt=0;
for stream in self.streams:
print("=======================")
@@ -1034,7 +1043,7 @@ class STLProfile(object):
stream.to_pkt_dump()
def dump_to_yaml (self, yaml_file = None):
- """ convert it to yaml """
+ """ Convert the profile to yaml """
yaml_list = [stream.to_yaml() for stream in self.streams]
yaml_str = yaml.dump(yaml_list, default_flow_style = False)
@@ -1046,7 +1055,7 @@ class STLProfile(object):
return yaml_str
def dump_to_code (self, profile_file = None):
- """ convert it to Python native profile. yeah this is cool """
+ """ Convert the profile to Python native profile. """
profile_dump = '''# !!! Auto-generated code !!!
from trex_stl_lib.api import *
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
index e5305c78..cd15b831 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py
@@ -4,7 +4,7 @@ from .utils.text_opts import *
from .trex_stl_exceptions import *
import types
-RpcCmdData = namedtuple('RpcCmdData', ['method', 'params'])
+RpcCmdData = namedtuple('RpcCmdData', ['method', 'params', 'api_class'])
TupleRC = namedtuple('RCT', ['rc', 'data', 'is_warn'])
class RpcResponseStatus(namedtuple('RpcResponseStatus', ['success', 'id', 'msg'])):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
index 8917cd28..4b7e9b3e 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_tables.py
@@ -1,3 +1,4 @@
+import sys
from texttable import Texttable
from .text_opts import format_text
@@ -23,6 +24,7 @@ def generate_trex_stats_table():
def print_table_with_header(texttable_obj, header="", untouched_header=""):
header = header.replace("_", " ").title() + untouched_header
print(format_text(header, 'cyan', 'underline') + "\n")
+
print((texttable_obj.draw() + "\n"))
if __name__ == "__main__":
diff --git a/scripts/external_libs/texttable-0.8.4/texttable.py b/scripts/external_libs/texttable-0.8.4/texttable.py
index 2224ad77..71ef0ea6 100644
--- a/scripts/external_libs/texttable-0.8.4/texttable.py
+++ b/scripts/external_libs/texttable-0.8.4/texttable.py
@@ -590,10 +590,7 @@ class Texttable:
array = []
for c in cell.split('\n'):
try:
- if sys.version >= '3.0':
- c = str(c)
- else:
- c = unicode(c, 'utf')
+ c = str(c)
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
if sys.version >= '3.0':
diff --git a/scripts/find_python.sh b/scripts/find_python.sh
index e9607fe5..9a9717c1 100755
--- a/scripts/find_python.sh
+++ b/scripts/find_python.sh
@@ -1,21 +1,27 @@
#!/bin/bash
-# if no variable of $PYTHON is define - we try to find it
-function find_python {
+# function finds python2
+function find_python2 {
# two candidates - machine python and cisco linux python
+
+ if [ -n "$PYTHON" ]; then #
+ return;
+ fi
+
+
MACHINE_PYTHON=python
CEL_PYTHON=/router/bin/python-2.7.4
# try the machine python
PYTHON=$MACHINE_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)"`
+ PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)" > /dev/null 2>&1 `
if [ $? -eq 0 ]; then
return
fi
# try the CEL python
PYTHON=$CEL_PYTHON
- PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)"`
+ PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver < 27)" > /dev/null 2>&1 `
if [ $? -eq 0 ]; then
return
fi
@@ -24,27 +30,56 @@ function find_python {
exit -1
}
+# function finds python3
function find_python3 {
+
+ if [ -n "$PYTHON3" ]; then
+ PYTHON=$PYTHON3
+ return;
+ fi
+
MACHINE_PYTHON=python3
ITAY_PYTHON=/auto/proj-pcube-b/apps/PL-b/tools/python3.4/bin/python3
- PYTHON3=$MACHINE_PYTHON
- PCHECK=`$PYTHON3 -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34)"`
+ PYTHON=$MACHINE_PYTHON
+ PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34)" > /dev/null 2>&1 `
if [ $? -eq 0 ]; then
return
fi
- PYTHON3=$ITAY_PYTHON
- PCHECK=`$PYTHON3 -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34)"`
+ PYTHON=$ITAY_PYTHON
+ PCHECK=`$PYTHON -c "import sys; ver = sys.version_info[0] * 10 + sys.version_info[1];sys.exit(ver != 34)" > /dev/null 2>&1 `
if [ $? -eq 0 ]; then
return
fi
- echo "*** $PYTHON3 - python version does not match, 3.4 is required"
+ echo "*** $PYTHON - python version does not match, 3.4 is required"
exit -1
}
-if [ -z "$PYTHON" ]; then
- find_python
-fi
+case "$1" in
+ "--python2") # we want python2
+ find_python2
+ ;;
+ "--python3") # we want python3
+ find_python3
+ ;;
+ *)
+ if [ -z "$PYTHON" ]; then # no python env. var
+ case $USER in
+ imarom|hhaim|ybrustin|ibarnea) # dev users, 70% python3 30% python2
+ case $(($RANDOM % 10)) in
+ [7-9])
+ find_python2
+ ;;
+ *)
+ find_python3
+ ;;
+ esac
+ ;;
+ *) # default is python2
+ find_python2
+ ;;
+ esac
+ fi
+ ;;
+esac
+
-if [ -z "$PYTHON3" ]; then
- find_python3
-fi
diff --git a/scripts/ko/4.2.3-300.fc23.x86_64/igb_uio.ko b/scripts/ko/4.2.3-300.fc23.x86_64/igb_uio.ko
new file mode 100644
index 00000000..174a9a21
--- /dev/null
+++ b/scripts/ko/4.2.3-300.fc23.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/run_functional_tests b/scripts/run_functional_tests
index 6e6a00a1..9ec1bd39 100755
--- a/scripts/run_functional_tests
+++ b/scripts/run_functional_tests
@@ -1,26 +1,36 @@
#!/bin/bash
-source find_python.sh
-cd automation/regression
-# Python 2
-echo Python2 test
-$PYTHON trex_unit_test.py --functional $@
-if [ $? -eq 0 ]; then
- printf "\n$PYTHON test succeeded\n\n"
-else
- printf "\n*** $PYTHON test failed\n\n"
- exit -1
+if [[ $@ =~ '--python2' || ! $@ =~ '--python3' ]]; then
+ source find_python.sh --python2
+ cd automation/regression
+
+ # Python 2
+ echo Python2 test
+ $PYTHON trex_unit_test.py --functional $@
+ if [ $? -eq 0 ]; then
+ printf "\n$PYTHON test succeeded\n\n"
+ else
+ printf "\n*** $PYTHON test failed\n\n"
+ exit -1
+ fi
+ cd -
fi
-# Python 3
-echo Python3 test
-$PYTHON3 trex_unit_test.py --functional $@
-if [ $? -eq 0 ]; then
- printf "\n$PYTHON3 test succeeded\n\n"
-else
- printf "\n*** $PYTHON3 test failed\n\n"
- exit -1
+if [[ $@ =~ '--python3' || ! $@ =~ '--python2' ]]; then
+ source find_python.sh --python3
+ cd automation/regression
+
+ # Python 3
+ echo Python3 test
+ $PYTHON trex_unit_test.py --functional $@
+ if [ $? -eq 0 ]; then
+ printf "\n$PYTHON test succeeded\n\n"
+ else
+ printf "\n*** $PYTHON test failed\n\n"
+ exit -1
+ fi
+ cd -
fi
diff --git a/scripts/run_regression b/scripts/run_regression
index 5bb33652..02746bab 100755
--- a/scripts/run_regression
+++ b/scripts/run_regression
@@ -1,6 +1,6 @@
#!/bin/bash
-source find_python.sh
+source find_python.sh --python2
cd automation/regression
$PYTHON trex_unit_test.py --exclude functional $@
diff --git a/scripts/stl-sim b/scripts/stl-sim
index 57fe4fa8..198d1275 100755
--- a/scripts/stl-sim
+++ b/scripts/stl-sim
@@ -1,6 +1,20 @@
#!/bin/bash
-source find_python.sh
+INPUT_ARGS=$@
+
+if [[ $@ =~ '--python2' ]]; then
+ INPUT_ARGS=${@//--python2/}
+ source find_python.sh --python2
+fi
+
+if [[ $@ =~ '--python3' ]]; then
+ INPUT_ARGS=${@//--python3/}
+ source find_python.sh --python3
+fi
+
+if [ -z "$PYTHON" ]; then
+ source find_python.sh
+fi
export PYTHONPATH=automation/trex_control_plane/stl
-$PYTHON -m trex_stl_lib.trex_stl_sim -p $PWD $@
+$PYTHON -m trex_stl_lib.trex_stl_sim -p $PWD $INPUT_ARGS
diff --git a/scripts/stl/flow_stats.py b/scripts/stl/flow_stats.py
index 69e1166c..cbb5ac21 100644
--- a/scripts/stl/flow_stats.py
+++ b/scripts/stl/flow_stats.py
@@ -1,15 +1,17 @@
from trex_stl_lib.api import *
+import os
# stream from pcap file. continues pps 10 in sec
+CP = os.path.join(os.path.dirname(__file__))
class STLS1(object):
def get_streams (self, direction = 0, **kwargs):
- return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), # path relative to pwd
+ return [STLStream(packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_64B_no_crc.pcap")), # path relative to pwd
mode = STLTXCont(pps=1000),
flow_stats = STLFlowStats(pg_id = 7)),
- STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_594B_no_crc.pcap"), # path relative to pwd
+ STLStream(packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_594B_no_crc.pcap")), # path relative to pwd
mode = STLTXCont(pps=5000),
flow_stats = STLFlowStats(pg_id = 12))
]
diff --git a/scripts/stl/udp_1pkt_pcap.py b/scripts/stl/udp_1pkt_pcap.py
index 9fb0e269..2a364810 100644
--- a/scripts/stl/udp_1pkt_pcap.py
+++ b/scripts/stl/udp_1pkt_pcap.py
@@ -1,11 +1,14 @@
from trex_stl_lib.api import *
+import os
# stream from pcap file. continues pps 10 in sec
+CP = os.path.join(os.path.dirname(__file__))
+
class STLS1(object):
def get_streams (self, direction = 0, **kwargs):
- return [STLStream(packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"), # path relative to pwd
+ return [STLStream(packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_64B_no_crc.pcap")),
mode = STLTXCont(pps=10)) ] #rate continues, could be STLTXSingleBurst,STLTXMultiBurst
diff --git a/scripts/stl/udp_1pkt_simple_burst.py b/scripts/stl/udp_1pkt_simple_burst.py
new file mode 100644
index 00000000..bf485ab0
--- /dev/null
+++ b/scripts/stl/udp_1pkt_simple_burst.py
@@ -0,0 +1,24 @@
+from trex_stl_lib.api import *
+
+class STLS1(object):
+
+ def create_stream (self):
+ return STLStream(
+ packet =
+ STLPktBuilder(
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/
+ UDP(dport=12,sport=1025)/(10*'x')
+ ),
+ mode = STLTXSingleBurst(total_pkts = 1))
+
+ def get_streams (self, direction = 0, **kwargs):
+ # create 1 stream
+ return [ self.create_stream() ]
+
+
+# dynamic load - used for trex console or simulator
+def register():
+ return STLS1()
+
+
+
diff --git a/scripts/stl/udp_3pkt_pcap.py b/scripts/stl/udp_3pkt_pcap.py
index fd2c609e..19ff46bc 100644
--- a/scripts/stl/udp_3pkt_pcap.py
+++ b/scripts/stl/udp_3pkt_pcap.py
@@ -1,26 +1,29 @@
from trex_stl_lib.api import *
+import os
# stream from pcap file. continues pps 10 in sec
+CP = os.path.join(os.path.dirname(__file__))
+
class STLS1(object):
def create_stream (self):
return STLProfile( [ STLStream( isg = 10.0, # star in delay
name ='S0',
- packet = STLPktBuilder(pkt ="stl/yaml/udp_64B_no_crc.pcap"),
+ packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_64B_no_crc.pcap")),
mode = STLTXSingleBurst( pps = 10, total_pkts = 10),
next = 'S1'), # point to next stream
STLStream( self_start = False, # stream is disabled enable trow S0
name ='S1',
- packet = STLPktBuilder(pkt ="stl/yaml/udp_594B_no_crc.pcap"),
+ packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_594B_no_crc.pcap")),
mode = STLTXSingleBurst( pps = 10, total_pkts = 20),
next = 'S2' ),
STLStream( self_start = False, # stream is disabled enable trow S0
name ='S2',
- packet = STLPktBuilder(pkt ="stl/yaml/udp_1518B_no_crc.pcap"),
+ packet = STLPktBuilder(pkt = os.path.join(CP, "yaml/udp_1518B_no_crc.pcap")),
mode = STLTXSingleBurst( pps = 10, total_pkts = 30 )
)
]).get_streams()
diff --git a/scripts/stl/yaml/imix_1pkt_vm_minus.yaml b/scripts/stl/yaml/imix_1pkt_vm_minus.yaml
index e83cfdd0..6d5345df 100644
--- a/scripts/stl/yaml/imix_1pkt_vm_minus.yaml
+++ b/scripts/stl/yaml/imix_1pkt_vm_minus.yaml
@@ -18,7 +18,8 @@
"min_value" : 1000,
"name" : "l3_src",
"op" : "inc",
- "size" : 2,
+ "step": 1,
+ "size" : 4,
"type" : "flow_var"
},
{
diff --git a/scripts/t-rex-64 b/scripts/t-rex-64
index 0516d7da..c18db43f 100755
--- a/scripts/t-rex-64
+++ b/scripts/t-rex-64
@@ -19,14 +19,22 @@ saveterm="$(stty -g)"
# if we have a new core run optimized trex
if cat /proc/cpuinfo | grep -q avx ; then
./_$(basename $0) $INPUT_ARGS
+ RESULT=$?
if [ $? -eq 132 ]; then
echo " WARNING this program is optimized for the new Intel processors. "
echo " try the ./t-rex-64-o application that should work for any Intel processor but might be slower. "
echo " try to run t-rex-64-o .. "
./_t-rex-64-o $INPUT_ARGS
+ RESULT=$?
fi
else
./_t-rex-64-o $INPUT_ARGS
+ RESULT=$?
fi
stty $saveterm
+if [ $RESULT -ne 0 ]; then
+ exit $RESULT
+fi
+
+
diff --git a/scripts/trex-console b/scripts/trex-console
index ea253fdd..0fcf656a 100755
--- a/scripts/trex-console
+++ b/scripts/trex-console
@@ -1,9 +1,23 @@
#!/bin/bash
-source find_python.sh
+INPUT_ARGS=$@
+
+if [[ $@ =~ '--python2' ]]; then
+ INPUT_ARGS=${@//--python2/}
+ source find_python.sh --python2
+fi
+
+if [[ $@ =~ '--python3' ]]; then
+ INPUT_ARGS=${@//--python3/}
+ source find_python.sh --python3
+fi
+
+if [ -z "$PYTHON" ]; then
+ source find_python.sh
+fi
export PYTHONPATH=automation/trex_control_plane/stl
printf "\nUsing '$PYTHON' as Python interpeter\n\n"
-$PYTHON -m console.trex_console $@
+$PYTHON -m console.trex_console $INPUT_ARGS
diff --git a/src/bp_sim.h b/src/bp_sim.h
index 4b1a88e3..cd85e82b 100755
--- a/src/bp_sim.h
+++ b/src/bp_sim.h
@@ -41,7 +41,7 @@ limitations under the License.
#include <common/Network/Packet/IPv6Header.h>
#include <common/Network/Packet/EthernetHeader.h>
#include <math.h>
-#include <common/bitMan.h>
+#include <common/bitMan.h>
#include <yaml-cpp/yaml.h>
#include "trex_defs.h"
#include "os_time.h"
@@ -97,7 +97,7 @@ public:
MIN_VM_V6=1 // IPv6 addressing
};
uint8_t m_cmd;
- uint8_t m_flags;
+ uint8_t m_flags;
uint16_t m_start_0;
uint16_t m_stop_1;
uint16_t m_add_pkt_len; /* request more length for mbuf packet the size */
@@ -116,16 +116,16 @@ public:
uint16_t m_server_port;
};
-/* this command replace IP in 2 diffrent location and port
+/* this command replace IP in 2 diffrent location and port
-c = 10.1.1.2
-o = 10.1.1.2
+c = 10.1.1.2
+o = 10.1.1.2
m = audio 102000
==>
-c = xx.xx.xx.xx
-o = xx.xx.xx.xx
+c = xx.xx.xx.xx
+o = xx.xx.xx.xx
m = audio yyyy
*/
@@ -248,7 +248,7 @@ class CFlowGenListPerThread ;
/* callback */
void on_node_first(uint8_t plugin_id,CGenNode * node,
- CFlowYamlInfo * template_info,
+ CFlowYamlInfo * template_info,
CTupleTemplateGeneratorSmart * tuple_gen,
CFlowGenListPerThread * flow_gen
);
@@ -259,7 +259,7 @@ rte_mbuf_t * on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPk
class CPreviewMode ;
struct CGenNode;
-/* represent the virtual interface
+/* represent the virtual interface
*/
/* counters per side */
@@ -276,7 +276,7 @@ public:
uint64_t m_tx_drop;
uint64_t m_tx_queue_full;
uint64_t m_tx_alloc_error;
- tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS];
+ tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS];
CPerTxthreadTemplateInfo m_template;
public:
@@ -309,10 +309,10 @@ public:
void CVirtualIFPerSideStats::Dump(FILE *fd){
#define DP_B(f) if (f) printf(" %-40s : %lu \n",#f,f)
- DP_B(m_tx_pkt);
+ DP_B(m_tx_pkt);
DP_B(m_tx_rx_check_pkt);
- DP_B(m_tx_bytes);
- DP_B(m_tx_drop);
+ DP_B(m_tx_bytes);
+ DP_B(m_tx_drop);
DP_B(m_tx_alloc_error);
DP_B(m_tx_queue_full);
m_template.Dump(fd);
@@ -342,17 +342,17 @@ public:
/**
* send one packet
- *
+ *
* @param node
- *
- * @return
+ *
+ * @return
*/
virtual int send_node(CGenNode * node) =0;
/**
* send one packet to a specific dir. flush all packets
- *
+ *
* @param dir
* @param m
*/
@@ -361,26 +361,29 @@ public:
/**
- * flush all pending packets into the stream
- *
- * @return
+ * flush all pending packets into the stream
+ *
+ * @return
*/
virtual int flush_tx_queue(void)=0;
-
+ // read all packets from rx_queue on dp core
+ virtual void flush_dp_rx_queue(void) {};
+ // read all packets from rx queue
+ virtual void flush_rx_queue(void) {};
/**
* update the source and destination mac-addr of a given mbuf by global database
- *
+ *
* @param dir
* @param m
- *
- * @return
+ *
+ * @return
*/
virtual int update_mac_addr_from_global_cfg(pkt_dir_t dir, uint8_t * p)=0;
/**
* translate a port_id to the correct dir on the core
- *
+ *
*/
virtual pkt_dir_t port_id_to_dir(uint8_t port_id) {
return (CS_INVALID);
@@ -602,13 +605,13 @@ public:
}
}
- bool get_is_rx_check_enable(){
- return (btGetMaskBit32(m_flags,31,31) ? true:false);
- }
+ bool get_is_rx_check_enable(){
+ return (btGetMaskBit32(m_flags,31,31) ? true:false);
+ }
- void set_rx_check_enable(bool enable){
- btSetMaskBit32(m_flags,31,31,enable?1:0);
- }
+ void set_rx_check_enable(bool enable){
+ btSetMaskBit32(m_flags,31,31,enable?1:0);
+ }
bool get_mac_ip_features_enable(){
@@ -693,7 +696,7 @@ public:
u.m_mac.dest[3]=1;
u.m_mac.src[3]=1;
}
- union {
+ union {
mac_align_t m_mac;
uint8_t m_data[16];
} u;
@@ -717,10 +720,10 @@ public:
};
enum trex_learn_mode_e {
- LEARN_MODE_DISABLED=0,
- LEARN_MODE_TCP_ACK=1,
- LEARN_MODE_IP_OPTION=2,
- LEARN_MODE_MAX=LEARN_MODE_IP_OPTION
+ LEARN_MODE_DISABLED=0,
+ LEARN_MODE_TCP_ACK=1,
+ LEARN_MODE_IP_OPTION=2,
+ LEARN_MODE_MAX=LEARN_MODE_IP_OPTION
};
public:
@@ -736,7 +739,7 @@ public:
m_expected_portd = 4; /* should be at least the number of ports found in the system but could be less */
m_vlan_port[0]=100;
m_vlan_port[1]=100;
- m_rx_check_sample=0;
+ m_rx_check_sample=0;
m_rx_check_hops = 0;
m_io_mode=1;
m_run_flags=0;
@@ -759,12 +762,12 @@ public:
uint32_t m_latency_rate; /* pkt/sec for each thread/port zero disable */
uint32_t m_latency_mask;
uint32_t m_latency_prev;
- uint16_t m_rx_check_sample; /* the sample rate of flows */
+ uint16_t m_rx_check_sample; /* the sample rate of flows */
uint16_t m_rx_check_hops;
uint16_t m_zmq_port;
uint16_t m_telnet_port;
uint16_t m_expected_portd;
- uint16_t m_io_mode; //0,1,2 0 disable, 1- normal , 2 - short
+ uint16_t m_io_mode; //0,1,2 0 disable, 1- normal , 2 - short
uint16_t m_run_flags;
uint8_t m_mac_splitter;
uint8_t m_l_pkt_mode;
@@ -782,7 +785,7 @@ public:
std::string out_file;
std::string prefix;
-
+
CMacAddrCfg m_mac_addr[TREX_MAX_PORTS];
uint8_t * get_src_mac_addr(int if_index){
@@ -861,7 +864,7 @@ public:
void Dump(FILE *fd);
public:
- uint32_t m_mbuf[MBUF_SIZE]; // relative to traffic norm to 2x10G ports
+ uint32_t m_mbuf[MBUF_SIZE]; // relative to traffic norm to 2x10G ports
uint32_t m_num_cores;
};
@@ -869,28 +872,28 @@ public:
typedef uint8_t socket_id_t;
typedef uint8_t port_id_t;
/* the real phsical thread id */
-typedef uint8_t physical_thread_id_t;
+typedef uint8_t physical_thread_id_t;
-typedef uint8_t virtual_thread_id_t;
-/*
+typedef uint8_t virtual_thread_id_t;
+/*
+
+ virtual thread 0 (v0)- is always the master
- virtual thread 0 (v0)- is always the master
-
-for 2 dual ports ( 2x2 =4 ports) the virtual thread looks like that
+for 2 dual ports ( 2x2 =4 ports) the virtual thread looks like that
-----------------
DEFAULT:
-----------------
(0,1) (2,3)
dual-if0 dual-if-1
v1 v2
- v3 v4
+ v3 v4
v5 v6
- v7 v8
-
- rx is v9
+ v7 v8
+
+ rx is v9
- */
+ */
#define MAX_SOCKETS_SUPPORTED (4)
#define MAX_THREADS_SUPPORTED (120)
@@ -904,12 +907,12 @@ public:
/* is socket enabled */
virtual bool is_sockets_enable(socket_id_t socket)=0;
-
+
/* number of main active sockets. socket #0 is always used */
virtual socket_id_t max_num_active_sockets()=0;
virtual ~CPlatformSocketInfoBase() {}
-
+
public:
/* which socket to allocate memory to each port */
virtual socket_id_t port_to_socket(port_id_t port)=0;
@@ -949,7 +952,7 @@ public:
/* is socket enabled */
bool is_sockets_enable(socket_id_t socket);
-
+
/* number of main active sockets. socket #0 is always used */
socket_id_t max_num_active_sockets();
@@ -995,7 +998,7 @@ public:
/* is socket enabled */
bool is_sockets_enable(socket_id_t socket);
-
+
/* number of main active sockets. socket #0 is always used */
socket_id_t max_num_active_sockets();
@@ -1033,7 +1036,7 @@ private:
bool m_sockets_enable[MAX_SOCKETS_SUPPORTED];
uint32_t m_sockets_enabled;
socket_id_t m_socket_per_dual_if[(TREX_MAX_PORTS >> 1)];
-
+
uint32_t m_max_threads_per_dual_if;
uint32_t m_num_dual_if;
@@ -1058,7 +1061,7 @@ public:
/* is socket enabled */
bool is_sockets_enable(socket_id_t socket);
-
+
/* number of main active sockets. socket #0 is always used */
socket_id_t max_num_active_sockets();
@@ -1141,15 +1144,15 @@ public:
public:
rte_mempool_t * m_small_mbuf_pool; /* pool for start packets */
- rte_mempool_t * m_mbuf_pool_128;
- rte_mempool_t * m_mbuf_pool_256;
- rte_mempool_t * m_mbuf_pool_512;
- rte_mempool_t * m_mbuf_pool_1024;
- rte_mempool_t * m_mbuf_pool_2048;
- rte_mempool_t * m_mbuf_pool_4096;
- rte_mempool_t * m_mbuf_pool_9k;
+ rte_mempool_t * m_mbuf_pool_128;
+ rte_mempool_t * m_mbuf_pool_256;
+ rte_mempool_t * m_mbuf_pool_512;
+ rte_mempool_t * m_mbuf_pool_1024;
+ rte_mempool_t * m_mbuf_pool_2048;
+ rte_mempool_t * m_mbuf_pool_4096;
+ rte_mempool_t * m_mbuf_pool_9k;
- rte_mempool_t * m_mbuf_global_nodes;
+ rte_mempool_t * m_mbuf_global_nodes;
uint32_t m_pool_id;
};
@@ -1167,16 +1170,16 @@ public:
return ( m_mem_pool[socket].pktmbuf_alloc_small() );
}
-
+
/**
- * try to allocate small buffers too
- * _alloc allocate big buffers only
- *
+ * try to allocate small buffers too
+ * _alloc allocate big buffers only
+ *
* @param socket
* @param size
- *
- * @return
+ *
+ * @return
*/
static inline rte_mbuf_t * pktmbuf_alloc(socket_id_t socket,uint16_t size){
if (size<FIRST_PKT_SIZE) {
@@ -1232,7 +1235,7 @@ public:
public:
static CRteMemPool m_mem_pool[MAX_SOCKETS_SUPPORTED];
- static uint32_t m_nodes_pool_size;
+ static uint32_t m_nodes_pool_size;
static CParserOption m_options;
static CGlobalMemory m_memory_cfg;
static CPlatformSocketInfo m_socket;
@@ -1320,19 +1323,19 @@ struct CFlowYamlInfo {
m_server_pool_idx = 0;
m_cap_mode=false;
}
-
+
std::string m_name;
std::string m_client_pool_name;
std::string m_server_pool_name;
- double m_k_cps; //k CPS
- double m_restart_time; /* restart time of this template */
- dsec_t m_ipg_sec; // ipg in sec
+ double m_k_cps; //k CPS
+ double m_restart_time; /* restart time of this template */
+ dsec_t m_ipg_sec; // ipg in sec
dsec_t m_rtt_sec; // rtt in sec
- uint32_t m_w;
+ uint32_t m_w;
uint32_t m_wlength;
uint32_t m_limit;
uint32_t m_flowcnt;
- uint8_t m_plugin_id; /* 0 - default , 1 - RTSP160 , 2- RTSP250 */
+ uint8_t m_plugin_id; /* 0 - default , 1 - RTSP160 , 2- RTSP250 */
uint8_t m_client_pool_idx;
uint8_t m_server_pool_idx;
bool m_one_app_server;
@@ -1418,7 +1421,7 @@ public:
NODE_FLAGS_LEARN_MSG_PROCESSED =0x10, /* got NAT msg */
NODE_FLAGS_LATENCY =0x20, /* got NAT msg */
- NODE_FLAGS_INIT_START_FROM_SERVER_SIDE = 0x40,
+ NODE_FLAGS_INIT_START_FROM_SERVER_SIDE = 0x40,
NODE_FLAGS_ALL_FLOW_SAME_PORT_SIDE = 0x80,
NODE_FLAGS_INIT_START_FROM_SERVER_SIDE_SERVER_ADDR = 0x100 /* init packet start from server side with server addr */
};
@@ -1434,8 +1437,8 @@ public:
uint16_t m_src_port;
uint16_t m_flags; /* BIT 0 - DIR ,
- BIT 1 - mbug_cache
- BIT 2 - SAMPLE DUPLICATE */
+ BIT 1 - mbug_cache
+ BIT 2 - SAMPLE DUPLICATE */
double m_time; /* can't change this header - size 16 bytes*/
@@ -1512,7 +1515,7 @@ public:
/* is it possible to cache MBUF */
inline void update_next_pkt_in_flow(void);
- inline void reset_pkt_in_flow(void);
+ inline void reset_pkt_in_flow(void);
inline uint8_t get_plugin_id(void){
return ( m_template_info->m_plugin_id);
}
@@ -1567,8 +1570,8 @@ public:
/* direction for TCP/UDP port */
inline pkt_dir_t cur_pkt_port_addr_dir();
/* from which interface dir to get out */
- inline pkt_dir_t cur_interface_dir();
-
+ inline pkt_dir_t cur_interface_dir();
+
inline void set_mbuf_cache_dir(pkt_dir_t dir){
if (dir) {
@@ -1597,19 +1600,19 @@ public:
public:
- inline void set_rx_check(){
- m_flags |= NODE_FLAGS_SAMPLE_RX_CHECK;
- }
+ inline void set_rx_check(){
+ m_flags |= NODE_FLAGS_SAMPLE_RX_CHECK;
+ }
- inline bool is_rx_check_enabled(){
- return ((m_flags & NODE_FLAGS_SAMPLE_RX_CHECK)?true:false);
- }
+ inline bool is_rx_check_enabled(){
+ return ((m_flags & NODE_FLAGS_SAMPLE_RX_CHECK)?true:false);
+ }
public:
inline void set_nat_first_state(){
btSetMaskBit16(m_flags,4,3,1);
- m_type=FLOW_PKT_NAT;
+ m_type=FLOW_PKT_NAT;
}
inline bool is_nat_first_state(){
@@ -1665,8 +1668,8 @@ public:
bool is_external_is_eq_to_internal_ip(){
/* this API is used to check TRex itself */
- if ( (get_nat_ipv4_addr() == m_src_ip ) &&
- (get_nat_ipv4_port()==m_src_port) &&
+ if ( (get_nat_ipv4_addr() == m_src_ip ) &&
+ (get_nat_ipv4_port()==m_src_port) &&
( get_nat_ipv4_addr_server() == m_dest_ip) ) {
return (true);
}else{
@@ -1704,7 +1707,7 @@ struct CGenNodeDeferPort {
uint16_t m_ports[DEFER_CLIENTS_NUM];
uint8_t m_pool_idx[DEFER_CLIENTS_NUM];
public:
- void init(void){
+ void init(void){
m_type=CGenNode::FLOW_DEFER_PORT_RELEASE;
m_cnt=0;
}
@@ -1724,7 +1727,7 @@ public:
} __rte_cache_aligned ;
-/* run time verification of objects size and offsets
+/* run time verification of objects size and offsets
need to clean this up and derive this objects from base object but require too much refactoring right now
hhaim
*/
@@ -1817,19 +1820,19 @@ public:
/**
* send one packet
- *
+ *
* @param node
- *
- * @return
+ *
+ * @return
*/
virtual int send_node(CGenNode * node);
/**
- * flush all pending packets into the stream
- *
- * @return
+ * flush all pending packets into the stream
+ *
+ * @return
*/
virtual int flush_tx_queue(void);
@@ -1858,7 +1861,7 @@ public:
/**
* same as regular STL but no I/O (dry run)
- *
+ *
* @author imarom (07-Jan-16)
*/
class CErfIFStlNull : public CErfIFStl {
@@ -1959,7 +1962,7 @@ public:
int open_file(std::string file_name,
CPreviewMode * preview);
int close_file(CFlowGenListPerThread * thread);
- int flush_file(dsec_t max_time,
+ int flush_file(dsec_t max_time,
dsec_t d_time,
bool always,
CFlowGenListPerThread * thread,
@@ -2012,7 +2015,7 @@ public:
CPreviewMode m_preview_mode;
uint64_t m_cnt;
uint64_t m_limit;
- CTimeHistogram m_realtime_his;
+ CTimeHistogram m_realtime_his;
};
@@ -2128,7 +2131,7 @@ inline bool CFlowKey::operator ==(const CFlowKey& rhs) const{
#define IS_PCAP_TIMING 7
-// 8-12 is used
+// 8-12 is used
#define FLOW_ID 8
@@ -2164,9 +2167,9 @@ public:
}
private:
- // per direction info
+ // per direction info
uint16_t m_dir_pkt_num; // pkt id
- uint16_t m_max_dir_flow_pkts;
+ uint16_t m_max_dir_flow_pkts;
};
@@ -2219,7 +2222,7 @@ public:
}
/**
* start from zero 0,1,2,.. , it is on global flow if you have couple of flows it will count all of the flows
- *
+ *
* flow FlowPktNum
* 0 0
* 0 1
@@ -2227,8 +2230,8 @@ public:
* 1 0
* 1 1
* 2 0
- *
- * @return
+ *
+ * @return
*/
inline uint32_t getFlowPktNum(){
return ( m_flow_pkt_num);
@@ -2252,7 +2255,7 @@ public:
}
- /* return true if this packet in diff direction from prev flow packet ,
+ /* return true if this packet in diff direction from prev flow packet ,
if true need to choose RTT else IPG for inter packet gap */
inline bool IsRtt(){
return (btGetMaskBit32(m_flags,IS_RTT,IS_RTT) ? true:false);
@@ -2313,7 +2316,7 @@ public:
inline void SetId(uint16_t _id){
btSetMaskBit32(m_flags,31,16,_id);
-
+
}
inline uint16_t getId(){
return ( ( uint16_t)btGetMaskBit32(m_flags,31,16));
@@ -2328,7 +2331,7 @@ public:
return (btGetMaskBit32(m_flags,IS_LAST_PKT_S,IS_LAST_PKT_E) ? true:false);
}
- // there could be couple of flows per template in case of plugin
+ // there could be couple of flows per template in case of plugin
inline void SetMaxPktsPerFlow(uint32_t pkts){
assert(pkts<65000);
m_max_flow_pkts=pkts;
@@ -2336,7 +2339,7 @@ public:
inline uint16_t GetMaxPktsPerFlow(){
return ( m_max_flow_pkts );
}
- // there could be couple of flows per template in case of plugin
+ // there could be couple of flows per template in case of plugin
inline void SetMaxFlowTimeout(double sec){
//assert (sec<65000);
sec = sec*2.0+5.0;
@@ -2369,12 +2372,12 @@ public:
private:
uint32_t m_flags;
- uint16_t m_flow_pkt_num; // packet number inside the flow
- uint8_t m_plugin_id; // packet number inside the flow
+ uint16_t m_flow_pkt_num; // packet number inside the flow
+ uint8_t m_plugin_id; // packet number inside the flow
uint8_t m_pad;
uint16_t m_max_flow_pkts; // how many packet per this flow getFlowId()
- uint16_t m_max_flow_aging; // maximum aging in sec
- CPacketDescriptorPerDir m_per_dir[CS_NUM]; // per direction info
+ uint16_t m_max_flow_aging; // maximum aging in sec
+ CPacketDescriptorPerDir m_per_dir[CS_NUM]; // per direction info
};
@@ -2427,7 +2430,7 @@ public:
class CPacketIndication {
public:
- dsec_t m_cap_ipg; /* ipg from cap file */
+ dsec_t m_cap_ipg; /* ipg from cap file */
CCapPktRaw * m_packet;
CFlow * m_flow;
@@ -2437,10 +2440,10 @@ public:
IPv6Header * m_ipv6;
} l3;
bool m_is_ipv6;
- union {
+ union {
TCPHeader * m_tcp;
UDPHeader * m_udp;
- ICMPHeader * m_icmp;
+ ICMPHeader * m_icmp;
} l4;
uint8_t * m_payload;
uint16_t m_payload_len;
@@ -2489,10 +2492,10 @@ public:
/**
- * return the application ipv4/ipv6 option offset
+ * return the application ipv4/ipv6 option offset
* if learn bit is ON , it is always the first options ( IPV6/IPV4)
- *
- * @return
+ *
+ * @return
*/
uint32_t getIpAppOptionOffset(){
if ( is_ipv6() ) {
@@ -2585,7 +2588,7 @@ class CPacketParser {
public:
bool Create();
void Delete();
- bool ProcessPacket(CPacketIndication * pkt_indication,
+ bool ProcessPacket(CPacketIndication * pkt_indication,
CCapPktRaw * raw_packet);
public:
CCPacketParserCounters m_counter;
@@ -2706,12 +2709,12 @@ public:
inline rte_mbuf_t * do_generate_new_mbuf_ex(CGenNode * node,CFlowInfo * flow_info);
inline rte_mbuf_t * do_generate_new_mbuf_ex_big(CGenNode * node,CFlowInfo * flow_info);
inline rte_mbuf_t * do_generate_new_mbuf_ex_vm(CGenNode * node,
- CFlowInfo * flow_info, int16_t * s_size);
+ CFlowInfo * flow_info, int16_t * s_size);
public:
- /* push the number of bytes into the packets and make more room
- should be used by NAT feature that should have ipv4 option in the first packet
- this function should not be called in runtime, only when template is loaded due to it heavey cost of operation ( malloc/free memory )
+ /* push the number of bytes into the packets and make more room
+ should be used by NAT feature that should have ipv4 option in the first packet
+ this function should not be called in runtime, only when template is loaded due to it heavey cost of operation ( malloc/free memory )
*/
char * push_ipv4_option_offline(uint8_t bytes);
char * push_ipv6_option_offline(uint8_t bytes);
@@ -2720,10 +2723,10 @@ public:
/**
* mark this packet as learn packet
- * should
+ * should
* 1. push ipv4 option ( 8 bytes)
- * 2. mask the packet as learn
- * 3. update the option pointer
+ * 2. mask the packet as learn
+ * 3. update the option pointer
*/
void mask_as_learn();
@@ -2750,7 +2753,7 @@ private:
public:
CPacketIndication m_pkt_indication;
- CCapPktRaw * m_packet;
+ CCapPktRaw * m_packet;
rte_mbuf_t * m_big_mbuf[MAX_SOCKETS_SUPPORTED]; /* allocate big mbug per socket */
};
@@ -2764,10 +2767,10 @@ inline void CFlowPktInfo::update_pkt_info2(char *p,
int update_len ,
CGenNode * node
){
- IPHeader * ipv4=
+ IPHeader * ipv4=
(IPHeader *)(p + m_pkt_indication.getFastIpOffsetFast());
- EthernetHeader * et =
+ EthernetHeader * et =
(EthernetHeader * )(p + m_pkt_indication.getFastEtherOffset());
(void)et;
@@ -2820,7 +2823,7 @@ inline void CFlowPktInfo::update_pkt_info2(char *p,
m_tcp->setSourcePort(flow_info->server_port);
}
}
-
+
}else {
if ( m_pkt_indication.m_desc.IsUdp() ){
UDPHeader * m_udp =(UDPHeader *)(p +m_pkt_indication.getFastTcpOffset() );
@@ -2848,10 +2851,10 @@ inline void CFlowPktInfo::update_pkt_info2(char *p,
inline void CFlowPktInfo::update_pkt_info(char *p,
CGenNode * node){
- IPHeader * ipv4=
+ IPHeader * ipv4=
(IPHeader *)(p + m_pkt_indication.getFastIpOffsetFast());
- EthernetHeader * et =
+ EthernetHeader * et =
(EthernetHeader * )(p + m_pkt_indication.getFastEtherOffset());
(void)et;
@@ -2863,7 +2866,7 @@ inline void CFlowPktInfo::update_pkt_info(char *p,
if ( unlikely (m_pkt_indication.is_ipv6())) {
-
+
// Update the IPv6 address
IPv6Header *ipv6= (IPv6Header *)ipv4;
@@ -2886,22 +2889,22 @@ inline void CFlowPktInfo::update_pkt_info(char *p,
ipv4->setTimeToLive(TTL_RESERVE_DUPLICATE);
/* first ipv4 option add the info in case of learn packet, usualy only the first packet */
- if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_IP_OPTION)) {
- CNatOption *lpNat =(CNatOption *)ipv4->getOption();
- lpNat->set_fid(node->get_short_fid());
- lpNat->set_thread_id(node->get_thread_id());
- } else {
- // This method only work on first TCP SYN
- if (ipv4->getProtocol() == IPPROTO_TCP) {
- TCPHeader *tcp = (TCPHeader *)(((uint8_t *)ipv4) + ipv4->getHeaderLength());
- if (tcp->getSynFlag()) {
- tcp->setAckNumber(CNatRxManager::calc_tcp_ack_val(node->get_short_fid(), node->get_thread_id()));
- }
+ if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_IP_OPTION)) {
+ CNatOption *lpNat =(CNatOption *)ipv4->getOption();
+ lpNat->set_fid(node->get_short_fid());
+ lpNat->set_thread_id(node->get_thread_id());
+ } else {
+ // This method only work on first TCP SYN
+ if (ipv4->getProtocol() == IPPROTO_TCP) {
+ TCPHeader *tcp = (TCPHeader *)(((uint8_t *)ipv4) + ipv4->getHeaderLength());
+ if (tcp->getSynFlag()) {
+ tcp->setAckNumber(CNatRxManager::calc_tcp_ack_val(node->get_short_fid(), node->get_thread_id()));
+ }
#ifdef NAT_TRACE_
- printf(" %.3f : flow_id: %x thread_id %x TCP ack %x\n",now_sec(), node->get_short_fid(), node->get_thread_id(), tcp->getAckNumber());
+ printf(" %.3f : flow_id: %x thread_id %x TCP ack %x\n",now_sec(), node->get_short_fid(), node->get_thread_id(), tcp->getAckNumber());
#endif
- }
- }
+ }
+ }
}
/* in all cases update the ip using the outside ip */
@@ -3005,7 +3008,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex(CGenNode * node,
BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
- memcpy(p,m_packet->raw,len);
+ memcpy(p,m_packet->raw,len);
update_pkt_info2(p,flow_info,0,node);
@@ -3014,7 +3017,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex(CGenNode * node,
return(m);
}
-
+
inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_big(CGenNode * node,
CFlowInfo * flow_info){
rte_mbuf_t * m;
@@ -3029,7 +3032,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_big(CGenNode * node,
BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
- memcpy(p,m_packet->raw,len);
+ memcpy(p,m_packet->raw,len);
update_pkt_info2(p,flow_info,0,node);
@@ -3055,7 +3058,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_vm(CGenNode * node,
/* alloc big buffer to update it*/
m = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), len);
- assert(m);
+ assert(m);
/* append the additional bytes requested and update later */
char *p=rte_pktmbuf_append(m, len);
@@ -3063,7 +3066,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_vm(CGenNode * node,
BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
/* copy the headers until the payload */
- memcpy(p, m_packet->raw, m_pkt_indication.getPayloadOffset() );
+ memcpy(p, m_packet->raw, m_pkt_indication.getPayloadOffset() );
CMiniVM vm;
vm.m_pkt_info = this;
vm.m_pyload_mbuf_ptr = p+m_pkt_indication.getPayloadOffset();
@@ -3077,7 +3080,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_vm(CGenNode * node,
/* update IP length , and TCP checksum , we can accelerate this using hardware ! */
uint16_t pkt_adjust = vm.m_new_pkt_size - m_packet->pkt_len;
update_pkt_info2(p,flow_info,pkt_adjust,node);
-
+
/* return change in packet size due to packet tranforms */
*s_size = vm.m_new_pkt_size - m_packet->pkt_len;
@@ -3110,7 +3113,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf(CGenNode * node){
BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
- memcpy(p,m_packet->raw,len);
+ memcpy(p,m_packet->raw,len);
update_pkt_info(p,node);
@@ -3119,7 +3122,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf(CGenNode * node){
return m;
}
-
+
inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_big(CGenNode * node){
rte_mbuf_t * m;
uint16_t len = m_packet->pkt_len;
@@ -3133,7 +3136,7 @@ inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_big(CGenNode * node){
BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
- memcpy(p,m_packet->raw,len);
+ memcpy(p,m_packet->raw,len);
update_pkt_info(p,node);
@@ -3206,15 +3209,15 @@ public:
class CCapFileFlowInfo {
public:
enum load_cap_file_err {
- kOK = 0,
- kFileNotExist,
- kNegTimestamp,
- kNoSyn,
- kTCPOffsetTooBig,
- kNoTCPFromServer,
- kPktNotSupp,
- kPktProcessFail,
- kCapFileErr
+ kOK = 0,
+ kFileNotExist,
+ kNegTimestamp,
+ kNoSyn,
+ kTCPOffsetTooBig,
+ kNoTCPFromServer,
+ kPktNotSupp,
+ kPktProcessFail,
+ kCapFileErr
};
bool Create();
@@ -3253,7 +3256,7 @@ public:
return (m_total_errors);
}
- // return the cap file length in sec
+ // return the cap file length in sec
double get_cap_file_length_sec();
void get_total_memory(CCCapFileMemoryUsage & memory);
@@ -3287,8 +3290,8 @@ public:
// IPv4 addressing
// IPv6 addressing
- std::vector <uint16_t> m_src_ipv6;
- std::vector <uint16_t> m_dst_ipv6;
+ std::vector <uint16_t> m_src_ipv6;
+ std::vector <uint16_t> m_dst_ipv6;
bool m_ipv6_set;
// new section
@@ -3342,7 +3345,7 @@ public:
double duration_sec;
double m_cps;
double m_mb_sec;
- double m_mB_sec;
+ double m_mB_sec;
double m_c_flows;
double m_pps ;
double m_total_Mbytes ;
@@ -3367,7 +3370,7 @@ public:
class CFlowGeneratorRecPerThread {
public:
- bool Create(CTupleGeneratorSmart * global_gen,
+ bool Create(CTupleGeneratorSmart * global_gen,
CFlowYamlInfo * info,
CFlowsYamlInfo * yaml_flow_info,
CCapFileFlowInfo * flow_info,
@@ -3388,11 +3391,11 @@ public:
CCapFileFlowInfo * m_flow_info;
CFlowYamlInfo * m_info;
CFlowsYamlInfo * m_flows_info;
- CPolicer m_policer;
+ CPolicer m_policer;
uint16_t m_id ;
uint32_t m_thread_id;
bool m_tuple_gen_was_set;
-} __rte_cache_aligned;
+} __rte_cache_aligned;
@@ -3405,16 +3408,16 @@ public:
uint16_t _id);
void Delete();
public:
-
+
void Dump(FILE *fd);
void getFlowStats(CFlowStats * stats);
public:
CCapFileFlowInfo m_flow_info;
CFlowYamlInfo * m_info;
CFlowsYamlInfo * m_flows_info;
- CPolicer m_policer;
+ CPolicer m_policer;
uint16_t m_id;
-private:
+private:
void fixup_ipg_if_needed();
};
@@ -3423,7 +3426,7 @@ public:
CPPSMeasure(){
reset();
}
- //reset
+ //reset
void reset(void){
m_start=false;
m_last_time_msec=0;
@@ -3453,7 +3456,7 @@ public:
class CBwMeasure {
public:
CBwMeasure();
- //reset
+ //reset
void reset(void);
//add packet size
double add(uint64_t size);
@@ -3498,7 +3501,7 @@ public:
friend class CNodeGenerator;
friend class CPluginCallbackSimple;
friend class CCapFileFlowInfo;
-
+
typedef CGenericMap<flow_id_t,CGenNode> flow_id_node_t;
bool Create(uint32_t thread_id,
@@ -3518,23 +3521,23 @@ public:
m_node_gen.set_vif(v_if);
}
- /* return the dual port ID this thread is attached to in 4 ports configuration
- there are 2 dual-ports
+ /* return the dual port ID this thread is attached to in 4 ports configuration
+ there are 2 dual-ports
thread 0 - dual 0
thread 1 - dual 1
thread 2 - dual 0
thread 3 - dual 1
-
- */
+
+ */
uint32_t getDualPortId();
public :
double get_total_kcps();
double get_total_kcps(uint8_t pool_idx, bool is_client);
double get_delta_flow_is_sec();
- double get_longest_flow();
- double get_longest_flow(uint8_t pool_idx, bool is_client);
+ double get_longest_flow();
+ double get_longest_flow(uint8_t pool_idx, bool is_client);
void inc_current_template(void);
int generate_flows_roundrobin(bool *done);
int reschedule_flow(CGenNode *node);
@@ -3627,9 +3630,9 @@ public:
CFlowGenList * m_flow_list;
rte_mempool_t * m_node_pool;
- std::vector<CFlowGeneratorRecPerThread *> m_cap_gen;
+ std::vector<CFlowGeneratorRecPerThread *> m_cap_gen;
- CFlowsYamlInfo m_yaml_info;
+ CFlowsYamlInfo m_yaml_info;
CTupleGeneratorSmart m_smart_gen;
@@ -3644,7 +3647,7 @@ public:
double m_stop_time_sec;
CPreviewMode m_preview_mode;
-public:
+public:
CFlowGenStats m_stats;
CBwMeasure m_mb_sec;
CCpuUtlDp m_cpu_dp_u;
@@ -3663,7 +3666,7 @@ private:
bool m_terminated_by_master;
private:
- uint8_t m_cacheline_pad[RTE_CACHE_LINE_SIZE][19]; // improve prefech
+ uint8_t m_cacheline_pad[RTE_CACHE_LINE_SIZE][19]; // improve prefech
} __rte_cache_aligned ;
inline CGenNode * CFlowGenListPerThread::create_node(void){
@@ -3726,7 +3729,7 @@ public:
public:
std::vector<CFlowGeneratorRec *> m_cap_gen; /* global info */
CFlowsYamlInfo m_yaml_info; /* global yaml*/
- std::vector<CFlowGenListPerThread *> m_threads_info;
+ std::vector<CFlowGenListPerThread *> m_threads_info;
CFlowGenListMac m_mac_info;
};
@@ -3761,19 +3764,19 @@ inline void CCapFileFlowInfo::generate_flow(CTupleTemplateGeneratorSmart * tup
node->m_flow_info = this;
node->m_flags=0;
node->m_template_info =template_info;
- node->m_tuple_gen = tuple_gen->get_gen();
+ node->m_tuple_gen = tuple_gen->get_gen();
node->m_src_ip= tuple.getClient();
node->m_dest_ip = tuple.getServer();
node->m_src_idx = tuple.getClientId();
node->m_dest_idx = tuple.getServerId();
node->m_src_port = tuple.getClientPort();
- memcpy(&node->m_src_mac,
- tuple.getClientMac(),
+ memcpy(&node->m_src_mac,
+ tuple.getClientMac(),
sizeof(mac_addr_align_t));
node->m_plugin_info =(void *)0;
if ( unlikely( CGlobalInfo::is_learn_mode() ) ){
- // check if flow is two direction
+ // check if flow is two direction
if ( lp->m_pkt_indication.m_desc.IsBiDirectionalFlow() ) {
/* we are in learn mode */
CFlowGenListPerThread * lpThread=gen->Parent();
@@ -3822,7 +3825,7 @@ inline void CFlowGeneratorRecPerThread::generate_flow(CNodeGenerator * gen,
uint64_t flow_id,
CGenNode * node){
- m_flow_info->generate_flow(&tuple_gen,
+ m_flow_info->generate_flow(&tuple_gen,
gen,
time,
flow_id,
@@ -3869,7 +3872,7 @@ inline void CGenNode::update_next_pkt_in_flow(void){
m_pkt_info = m_flow_info->GetPacket((pkt_index-1));
}
-inline void CGenNode::reset_pkt_in_flow(void){
+inline void CGenNode::reset_pkt_in_flow(void){
m_pkt_info = m_flow_info->GetPacket(0);
}
@@ -3898,7 +3901,7 @@ public:
class CPluginCallbackSimple : public CPluginCallback {
public:
virtual void on_node_first(uint8_t plugin_id,CGenNode * node,
- CFlowYamlInfo * template_info,
+ CFlowYamlInfo * template_info,
CTupleTemplateGeneratorSmart * tuple_gen,
CFlowGenListPerThread * flow_gen);
virtual void on_node_last(uint8_t plugin_id,CGenNode * node);
@@ -3950,7 +3953,7 @@ inline pkt_dir_t CGenNode::cur_interface_dir(){
return (is_eligible_from_server_side()?SERVER_SIDE:CLIENT_SIDE);
}else{
return ( is_init ?CLIENT_SIDE:SERVER_SIDE);
- }
+ }
}
diff --git a/src/common/Network/Packet/EthernetHeader.h b/src/common/Network/Packet/EthernetHeader.h
index 87d1ed91..c9dcdbe2 100755
--- a/src/common/Network/Packet/EthernetHeader.h
+++ b/src/common/Network/Packet/EthernetHeader.h
@@ -1,5 +1,5 @@
/*
-Copyright (c) 2015-2015 Cisco Systems, Inc.
+Copyright (c) 2015-2016 Cisco Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ limitations under the License.
#include "PacketHeaderBase.h"
#include "MacAddress.h"
+#define ETH_HDR_LEN 14
/**
* This class encapsulates an ethernet header.
diff --git a/src/common/basic_utils.cpp b/src/common/basic_utils.cpp
index 34c37755..4f5578a6 100755
--- a/src/common/basic_utils.cpp
+++ b/src/common/basic_utils.cpp
@@ -17,6 +17,7 @@ limitations under the License.
#include <ctype.h>
#include <stdio.h>
#include <string>
+#include <sstream>
bool utl_is_file_exists (const std::string& name) {
if (FILE *file = fopen(name.c_str(), "r")) {
@@ -175,3 +176,25 @@ void utl_macaddr_to_str(const uint8_t *macaddr, std::string &output) {
}
}
+
+/**
+ * generate a random connection handler
+ *
+ */
+std::string
+utl_generate_random_str(unsigned int &seed, int len) {
+ std::stringstream ss;
+
+ static const char alphanum[] =
+ "0123456789"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz";
+
+ /* generate 8 bytes of random handler */
+ for (int i = 0; i < len; ++i) {
+ ss << alphanum[rand_r(&seed) % (sizeof(alphanum) - 1)];
+ }
+
+ return (ss.str());
+}
+
diff --git a/src/common/basic_utils.h b/src/common/basic_utils.h
index 77282eea..63e858ab 100755
--- a/src/common/basic_utils.h
+++ b/src/common/basic_utils.h
@@ -21,8 +21,6 @@ limitations under the License.
#include <stdio.h>
#include <string>
-
-
/**
* the round must be power 2 e.g 2,4,8...
*
@@ -87,6 +85,8 @@ bool utl_is_file_exists (const std::string& name) ;
void utl_macaddr_to_str(const uint8_t *macaddr, std::string &output);
+std::string utl_generate_random_str(unsigned int &seed, int len);
+
#endif
diff --git a/src/flow_stat.cpp b/src/flow_stat.cpp
index 778c92b9..13f8eb16 100644
--- a/src/flow_stat.cpp
+++ b/src/flow_stat.cpp
@@ -18,6 +18,32 @@
See the License for the specific language governing permissions and
limitations under the License.
*/
+
+/*
+Important classes in this file:
+CFlowStatUserIdInfo - Information about one packet group id
+CFlowStatUserIdMap - Mapping between packet group id (chosen by user) and hardware counter id
+CFlowStatHwIdMap - Mapping between hardware id and packet group id
+CFlowStatRuleMgr - API to users of the file
+
+General idea of operation:
+For each stream needing flow statistics, the user provides packet group id (pg_id). Few streams can have the same pg_id.
+We maintain reference count.
+When doing start_stream, for the first stream in pg_id, hw_id is associated with the pg_id, and relevant hardware rules are
+inserted (on supported hardware). When stopping all streams with the pg_id, the hw_id <--> pg_id mapping is removed, hw_id is
+returned to the free hw_id pool, and hardware rules are removed. Counters for the pg_id are kept.
+If starting streams again, new hw_id will be assigned, and counters will continue from where they stopped. Only When deleting
+all streams using certain pg_id, infromation about this pg_id will be freed.
+
+For each stream we keep state in the m_rx_check.m_hw_id field. Since we keep reference count for certain structs, we want to
+protect from illegal operations, like starting stream while it is already starting, stopping when it is stopped...
+State machine is:
+stream_init: HW_ID_INIT
+stream_add: HW_ID_FREE
+stream_start: legal hw_id (range is 0..MAX_FLOW_STATS)
+stream_stop: HW_ID_FREE
+stream_del: HW_ID_INIT
+ */
#include <sstream>
#include <string>
#include <iostream>
@@ -26,6 +52,7 @@
#include "internal_api/trex_platform_api.h"
#include "trex_stateless.h"
#include "trex_stateless_messaging.h"
+#include "trex_stateless_rx_core.h"
#include "trex_stream.h"
#include "flow_stat_parser.h"
#include "flow_stat.h"
@@ -33,8 +60,8 @@
#define FLOW_STAT_ADD_ALL_PORTS 255
-static const uint16_t FREE_HW_ID = UINT16_MAX;
-static bool no_stat_supported = true;
+static const uint16_t HW_ID_INIT = UINT16_MAX;
+static const uint16_t HW_ID_FREE = UINT16_MAX - 1;
inline std::string methodName(const std::string& prettyFunction)
{
@@ -48,6 +75,11 @@ inline std::string methodName(const std::string& prettyFunction)
#define __METHOD_NAME__ methodName(__PRETTY_FUNCTION__)
#ifdef __DEBUG_FUNC_ENTRY__
#define FUNC_ENTRY (std::cout << __METHOD_NAME__ << std::endl);
+#ifdef __STREAM_DUMP__
+#define stream_dump(stream) stream->Dump(stderr)
+#else
+#define stream_dump(stream)
+#endif
#else
#define FUNC_ENTRY
#endif
@@ -107,7 +139,7 @@ int CFlowStatUserIdInfo::add_stream(uint8_t proto) {
#endif
if (proto != m_proto)
- return -1;
+ throw TrexException("Can't use same pg_id for streams with different l4 protocol");
m_ref_count++;
@@ -147,7 +179,7 @@ uint16_t CFlowStatUserIdMap::get_hw_id(uint32_t user_id) {
CFlowStatUserIdInfo *cf = find_user_id(user_id);
if (cf == NULL) {
- return FREE_HW_ID;
+ return HW_ID_FREE;
} else {
return cf->get_hw_id();
}
@@ -198,7 +230,7 @@ int CFlowStatUserIdMap::add_stream(uint32_t user_id, uint8_t proto) {
if (! c_user_id) {
c_user_id = add_user_id(user_id, proto);
if (! c_user_id)
- return -1;
+ throw TrexException("Failed adding statistic counter - Failure in add_stream");
return 0;
} else {
return c_user_id->add_stream(proto);
@@ -214,11 +246,11 @@ int CFlowStatUserIdMap::del_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- return -1;
+ throw TrexException("Trying to delete stream which does not exist");
}
if (c_user_id->del_stream() == 0) {
- // ref count of this port became 0. can release this entry.
+ // ref count of this entry became 0. can release this entry.
m_map.erase(user_id);
delete c_user_id;
}
@@ -237,13 +269,13 @@ int CFlowStatUserIdMap::start_stream(uint32_t user_id, uint16_t hw_id) {
if (! c_user_id) {
fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it does not exist\n"
, __func__, hw_id, user_id);
- return -1;
+ throw TrexException("Internal error: Trying to associate non exist group id");
}
if (c_user_id->is_hw_id()) {
- fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it is already associate to %u\n"
+ fprintf(stderr, "%s Error: Trying to associate hw id %d to user_id %d but it is already associated to %u\n"
, __func__, hw_id, user_id, c_user_id->get_hw_id());
- return -1;
+ throw TrexException("Internal error: Trying to associate used packet group id to different hardware counter");
}
c_user_id->set_hw_id(hw_id);
c_user_id->add_started_stream();
@@ -260,9 +292,9 @@ int CFlowStatUserIdMap::start_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- fprintf(stderr, "%s Error: Trying to start stream on user_id %d but it does not exist\n"
+ fprintf(stderr, "%s Error: Trying to start stream on pg_id %d but it does not exist\n"
, __func__, user_id);
- return -1;
+ throw TrexException("Trying to start stream with non exist packet group id");
}
c_user_id->add_started_stream();
@@ -281,9 +313,9 @@ int CFlowStatUserIdMap::stop_stream(uint32_t user_id) {
c_user_id = find_user_id(user_id);
if (! c_user_id) {
- fprintf(stderr, "%s Error: Trying to stop stream on user_id %d but it does not exist\n"
+ fprintf(stderr, "%s Error: Trying to stop stream on pg_id %d but it does not exist\n"
, __func__, user_id);
- return -1;
+ throw TrexException("Trying to stop stream with non exist packet group id");
}
return c_user_id->stop_started_stream();
@@ -332,7 +364,7 @@ uint16_t CFlowStatUserIdMap::unmap(uint32_t user_id) {
CFlowStatHwIdMap::CFlowStatHwIdMap() {
m_num_free = MAX_FLOW_STATS;
for (int i = 0; i < MAX_FLOW_STATS; i++) {
- m_map[i] = FREE_HW_ID;
+ m_map[i] = HW_ID_FREE;
}
}
@@ -357,11 +389,11 @@ std::ostream& operator<<(std::ostream& os, const CFlowStatHwIdMap& cf) {
uint16_t CFlowStatHwIdMap::find_free_hw_id() {
for (int i = 0; i < MAX_FLOW_STATS; i++) {
- if (m_map[i] == FREE_HW_ID)
+ if (m_map[i] == HW_ID_FREE)
return i;
}
- return FREE_HW_ID;
+ return HW_ID_FREE;
}
void CFlowStatHwIdMap::map(uint16_t hw_id, uint32_t user_id) {
@@ -378,7 +410,7 @@ void CFlowStatHwIdMap::unmap(uint16_t hw_id) {
std::cout << __METHOD_NAME__ << " hw id:" << hw_id << std::endl;
#endif
- m_map[hw_id] = FREE_HW_ID;
+ m_map[hw_id] = HW_ID_FREE;
m_num_free++;
}
@@ -388,6 +420,34 @@ CFlowStatRuleMgr::CFlowStatRuleMgr() {
m_max_hw_id = -1;
m_num_started_streams = 0;
m_ring_to_rx = NULL;
+ m_capabilities = 0;
+ m_parser = NULL;
+ m_rx_core = NULL;
+}
+
+CFlowStatRuleMgr::~CFlowStatRuleMgr() {
+ if (m_parser)
+ delete m_parser;
+}
+
+void CFlowStatRuleMgr::create() {
+ uint16_t num_counters, capabilities;
+ TrexStateless *tstateless = get_stateless_obj();
+ assert(tstateless);
+
+ m_api = tstateless->get_platform_api();
+ assert(m_api);
+ m_api->get_interface_stat_info(0, num_counters, capabilities);
+ m_api->get_port_num(m_num_ports);
+ for (uint8_t port = 0; port < m_num_ports; port++) {
+ assert(m_api->reset_hw_flow_stats(port) == 0);
+ }
+ m_ring_to_rx = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
+ assert(m_ring_to_rx);
+ m_rx_core = get_rx_sl_core_obj();
+ m_parser = m_api->get_flow_stat_parser();
+ assert(m_parser);
+ m_capabilities = capabilities;
}
std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf) {
@@ -397,110 +457,111 @@ std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf) {
return os;
}
-int CFlowStatRuleMgr::compile_stream(const TrexStream * stream, Cxl710Parser &parser) {
+int CFlowStatRuleMgr::compile_stream(const TrexStream * stream, CFlowStatParser *parser) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << " en:";
std::cout << stream->m_rx_check.m_enabled << std::endl;
#endif
- // currently we support only IP ID rule types
- // all our ports are the same type, so testing port 0 is enough
- uint16_t num_counters, capabilities;
- m_api->get_interface_stat_info(0, num_counters, capabilities);
- if ((capabilities & TrexPlatformApi::IF_STAT_IPV4_ID) == 0) {
- return -2;
- }
-
- if (parser.parse(stream->m_pkt.binary, stream->m_pkt.len) != 0) {
+ if (parser->parse(stream->m_pkt.binary, stream->m_pkt.len) != 0) {
// if we could not parse the packet, but no stat count needed, it is probably OK.
if (stream->m_rx_check.m_enabled) {
fprintf(stderr, "Error: %s - Compilation failed\n", __func__);
- return -1;
+ throw TrexException("Failed parsing given packet for flow stat. Probably bad packet format.");
} else {
return 0;
}
}
- if (!parser.is_fdir_supported()) {
+ if (!parser->is_stat_supported()) {
if (stream->m_stream_id <= 0) {
- // rx stat not needed. Do nothing.
+ // flow stat not needed. Do nothing.
return 0;
} else {
- // rx stat needed, but packet format is not supported
- fprintf(stderr, "Error: %s - Unsupported packet format for rx stat\n", __func__);
- return -1;
+ // flow stat needed, but packet format is not supported
+ fprintf(stderr, "Error: %s - Unsupported packet format for flow stat\n", __func__);
+ throw TrexException("Unsupported packet format for flow stat on given interface type");
}
}
return 0;
}
-int CFlowStatRuleMgr::add_stream(const TrexStream * stream) {
+void CFlowStatRuleMgr::copy_state(TrexStream * from, TrexStream * to) {
+ to->m_rx_check.m_hw_id = from->m_rx_check.m_hw_id;
+}
+void CFlowStatRuleMgr::init_stream(TrexStream * stream) {
+ stream->m_rx_check.m_hw_id = HW_ID_INIT;
+}
+
+int CFlowStatRuleMgr::add_stream(TrexStream * stream) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
+ stream_dump(stream);
#endif
+ if (! stream->m_rx_check.m_enabled) {
+ return 0;
+ }
+
// Init everything here, and not in the constructor, since we relay on other objects
// By the time a stream is added everything else is initialized.
if (! m_api ) {
- TrexStateless *tstateless = get_stateless_obj();
- m_api = tstateless->get_platform_api();
- uint16_t num_counters, capabilities;
- m_api->get_interface_stat_info(0, num_counters, capabilities);
- if ((capabilities & TrexPlatformApi::IF_STAT_IPV4_ID) == 0) {
- // All our interfaces are from the same type. If statistics not supported.
- // no operation will work
- return -1;
- } else {
- no_stat_supported = false;
- }
- m_api->get_port_num(m_num_ports);
- for (uint8_t port = 0; port < m_num_ports; port++) {
- assert(m_api->reset_hw_flow_stats(port) == 0);
- }
- m_ring_to_rx = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
+ create();
}
- if (no_stat_supported)
- return -ENOTSUP;
+ //??? put back assert(stream->m_rx_check.m_hw_id == HW_ID_INIT);
- Cxl710Parser parser;
- int ret;
+ uint16_t rule_type = TrexPlatformApi::IF_STAT_IPV4_ID; // In the future need to get it from the stream;
- if (! stream->m_rx_check.m_enabled) {
- return 0;
+ if ((m_capabilities & rule_type) == 0) {
+ fprintf(stderr, "Error: %s - rule type not supported by interface\n", __func__);
+ throw TrexException("Interface does not support given rule type");
}
- if ((ret = compile_stream(stream, parser)) < 0)
- return ret;
+ // compile_stream throws exception if something goes wrong
+ compile_stream(stream, m_parser);
uint8_t l4_proto;
- if (parser.get_l4_proto(l4_proto) < 0) {
- printf("Error: %s failed finding l4 proto\n", __func__);
- return -1;
+ if (m_parser->get_l4_proto(l4_proto) < 0) {
+ fprintf(stderr, "Error: %s failed finding l4 proto\n", __func__);
+ throw TrexException("Failed determining l4 proto for packet");
}
- return m_user_id_map.add_stream(stream->m_rx_check.m_pg_id, l4_proto);
+ // throws exception if there is error
+ m_user_id_map.add_stream(stream->m_rx_check.m_pg_id, l4_proto);
+
+ stream->m_rx_check.m_hw_id = HW_ID_FREE;
+ return 0;
}
-int CFlowStatRuleMgr::del_stream(const TrexStream * stream) {
+int CFlowStatRuleMgr::del_stream(TrexStream * stream) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
+ stream_dump(stream);
#endif
- if (no_stat_supported)
- return -ENOTSUP;
-
if (! stream->m_rx_check.m_enabled) {
return 0;
}
- if (m_user_id_map.is_started(stream->m_rx_check.m_pg_id)) {
- std::cerr << "Error: Trying to delete flow statistics stream " << stream->m_rx_check.m_pg_id
- << " which is not stopped." << std::endl;
- return -1;
+ if (! m_api)
+ throw TrexException("Called del_stream, but no stream was added");
+
+ // we got del_stream command for a stream which has valid hw_id.
+ // Probably someone forgot to call stop
+ if(stream->m_rx_check.m_hw_id < MAX_FLOW_STATS) {
+ stop_stream(stream);
+ }
+
+ // calling del for same stream twice, or for a stream which was never "added"
+ if(stream->m_rx_check.m_hw_id == HW_ID_INIT) {
+ return 0;
}
+ // Throws exception in case of error
+ m_user_id_map.del_stream(stream->m_rx_check.m_pg_id);
+ stream->m_rx_check.m_hw_id = HW_ID_INIT;
- return m_user_id_map.del_stream(stream->m_rx_check.m_pg_id);
+ return 0;
}
// called on all streams, when stream start to transmit
@@ -509,46 +570,73 @@ int CFlowStatRuleMgr::del_stream(const TrexStream * stream) {
// If stream does not need flow stat counting, make sure it does not interfere with
// other streams that do need stat counting.
// Might change the IP ID of the stream packet
-int CFlowStatRuleMgr::start_stream(TrexStream * stream, uint16_t &ret_hw_id) {
+int CFlowStatRuleMgr::start_stream(TrexStream * stream) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
+ stream_dump(stream);
#endif
- Cxl710Parser parser;
int ret;
-
- if (no_stat_supported)
- return -ENOTSUP;
-
- if ((ret = compile_stream(stream, parser)) < 0)
- return ret;
+ // Streams which does not need statistics might be started, before any stream that do
+ // need statistcs, so start_stream might be called before add_stream
+ if (! m_api ) {
+ create();
+ }
// first handle streams that do not need rx stat
if (! stream->m_rx_check.m_enabled) {
- // no need for stat count
+ try {
+ compile_stream(stream, m_parser);
+ } catch (TrexException) {
+ // If no statistics needed, and we can't parse the stream, that's OK.
+ return 0;
+ }
+
uint16_t ip_id;
- if (parser.get_ip_id(ip_id) < 0) {
- return 0; // if we could not find and ip id, no need to fix
+ if (m_parser->get_ip_id(ip_id) < 0) {
+ return 0; // if we could not find the ip id, no need to fix
}
// verify no reserved IP_ID used, and change if needed
if (ip_id >= IP_ID_RESERVE_BASE) {
- if (parser.set_ip_id(ip_id & 0xefff) < 0) {
- return -1;
+ if (m_parser->set_ip_id(ip_id & 0xefff) < 0) {
+ throw TrexException("Stream IP ID in reserved range. Failed changing it");
}
}
return 0;
}
- uint16_t hw_id;
// from here, we know the stream need rx stat
+
+ // Starting a stream which was never added
+ if (stream->m_rx_check.m_hw_id == HW_ID_INIT) {
+ add_stream(stream);
+ }
+
+ if (stream->m_rx_check.m_hw_id < MAX_FLOW_STATS) {
+ throw TrexException("Starting a stream which was already started");
+ }
+
+ uint16_t rule_type = TrexPlatformApi::IF_STAT_IPV4_ID; // In the future, need to get it from the stream;
+
+ if ((m_capabilities & rule_type) == 0) {
+ fprintf(stderr, "Error: %s - rule type not supported by interface\n", __func__);
+ throw TrexException("Interface does not support given rule type");
+ }
+
+ // compile_stream throws exception if something goes wrong
+ if ((ret = compile_stream(stream, m_parser)) < 0)
+ return ret;
+
+ uint16_t hw_id;
+
if (m_user_id_map.is_started(stream->m_rx_check.m_pg_id)) {
m_user_id_map.start_stream(stream->m_rx_check.m_pg_id); // just increase ref count;
hw_id = m_user_id_map.get_hw_id(stream->m_rx_check.m_pg_id); // can't fail if we got here
} else {
hw_id = m_hw_id_map.find_free_hw_id();
- if (hw_id == FREE_HW_ID) {
+ if (hw_id == HW_ID_FREE) {
printf("Error: %s failed finding free hw_id\n", __func__);
- return -1;
+ throw TrexException("Failed allocating statistic counter. Probably all are used.");
} else {
if (hw_id > m_max_hw_id) {
m_max_hw_id = hw_id;
@@ -557,19 +645,43 @@ int CFlowStatRuleMgr::start_stream(TrexStream * stream, uint16_t &ret_hw_id) {
m_user_id_map.start_stream(user_id, hw_id);
m_hw_id_map.map(hw_id, user_id);
add_hw_rule(hw_id, m_user_id_map.l4_proto(user_id));
+ // clear hardware counters. Just in case we have garbage from previous iteration
+ rx_per_flow_t rx_counter;
+ tx_per_flow_t tx_counter;
+ for (uint8_t port = 0; port < m_num_ports; port++) {
+ m_api->get_flow_stats(port, &rx_counter, (void *)&tx_counter, hw_id, hw_id, true);
+ }
}
}
- parser.set_ip_id(IP_ID_RESERVE_BASE + hw_id);
+ m_parser->set_ip_id(IP_ID_RESERVE_BASE + hw_id);
- ret_hw_id = hw_id;
+ // saving given hw_id on stream for use by tx statistics count
+ stream->m_rx_check.m_hw_id = hw_id;
#ifdef __DEBUG_FUNC_ENTRY__
- std::cout << "exit:" << __METHOD_NAME__ << " hw_id:" << ret_hw_id << std::endl;
+ std::cout << "exit:" << __METHOD_NAME__ << " hw_id:" << hw_id << std::endl;
+ stream_dump(stream);
#endif
if (m_num_started_streams == 0) {
send_start_stop_msg_to_rx(true); // First transmitting stream. Rx core should start reading packets;
+
+ // wait to make sure that message is acknowledged. RX core might be in deep sleep mode, and we want to
+ // start transmitting packets only after it is working, otherwise, packets will get lost.
+ if (m_rx_core) { // in simulation, m_rx_core will be NULL
+ int count = 0;
+ while (!m_rx_core->is_working()) {
+ delay(1);
+ count++;
+ if (count == 100) {
+ throw TrexException("Critical error!! - RX core failed to start");
+ }
+ }
+ }
+ } else {
+ // make sure rx core is working. If not, we got really confused somehow.
+ assert(m_rx_core->is_working());
}
m_num_started_streams++;
return 0;
@@ -583,17 +695,25 @@ int CFlowStatRuleMgr::add_hw_rule(uint16_t hw_id, uint8_t proto) {
return 0;
}
-int CFlowStatRuleMgr::stop_stream(const TrexStream * stream) {
+int CFlowStatRuleMgr::stop_stream(TrexStream * stream) {
#ifdef __DEBUG_FUNC_ENTRY__
std::cout << __METHOD_NAME__ << " user id:" << stream->m_rx_check.m_pg_id << std::endl;
+ stream_dump(stream);
#endif
- if (no_stat_supported)
- return -ENOTSUP;
-
if (! stream->m_rx_check.m_enabled) {
return 0;
}
+ if (! m_api)
+ throw TrexException("Called stop_stream, but no stream was added");
+
+ if (stream->m_rx_check.m_hw_id >= MAX_FLOW_STATS) {
+ // We allow stopping while already stopped. Will not hurt us.
+ return 0;
+ }
+
+ stream->m_rx_check.m_hw_id = HW_ID_FREE;
+
if (m_user_id_map.stop_stream(stream->m_rx_check.m_pg_id) == 0) {
// last stream associated with the entry stopped transmittig.
// remove user_id <--> hw_id mapping
@@ -601,7 +721,7 @@ int CFlowStatRuleMgr::stop_stream(const TrexStream * stream) {
uint16_t hw_id = m_user_id_map.get_hw_id(stream->m_rx_check.m_pg_id);
if (hw_id >= MAX_FLOW_STATS) {
fprintf(stderr, "Error: %s got wrong hw_id %d from unmap\n", __func__, hw_id);
- return -1;
+ throw TrexException("Internal error in stop_stream. Got bad hw_id");
} else {
// update counters, and reset before unmapping
CFlowStatUserIdInfo *p_user_id = m_user_id_map.find_user_id(m_hw_id_map.get_user_id(hw_id));
diff --git a/src/flow_stat.h b/src/flow_stat.h
index 83f076de..06b54d70 100644
--- a/src/flow_stat.h
+++ b/src/flow_stat.h
@@ -37,6 +37,8 @@
typedef std::map<uint32_t, uint16_t> flow_stat_map_t;
typedef std::map<uint32_t, uint16_t>::iterator flow_stat_map_it_t;
+class CRxCoreStateless;
+
class tx_per_flow_t_ {
public:
tx_per_flow_t_() {
@@ -104,7 +106,7 @@ typedef class tx_per_flow_t_ tx_per_flow_t;
typedef class tx_per_flow_t_ rx_per_flow_t;
class CPhyEthIF;
-class Cxl710Parser;
+class CFlowStatParser;
class CFlowStatUserIdInfo {
public:
@@ -198,16 +200,20 @@ class CFlowStatRuleMgr {
};
CFlowStatRuleMgr();
+ ~CFlowStatRuleMgr();
friend std::ostream& operator<<(std::ostream& os, const CFlowStatRuleMgr& cf);
- int add_stream(const TrexStream * stream);
- int del_stream(const TrexStream * stream);
- int start_stream(TrexStream * stream, uint16_t &ret_hw_id);
- int stop_stream(const TrexStream * stream);
+ void copy_state(TrexStream * from, TrexStream * to);
+ void init_stream(TrexStream * stream);
+ int add_stream(TrexStream * stream);
+ int del_stream(TrexStream * stream);
+ int start_stream(TrexStream * stream);
+ int stop_stream(TrexStream * stream);
int get_active_pgids(flow_stat_active_t &result);
bool dump_json(std::string & json, bool baseline);
private:
- int compile_stream(const TrexStream * stream, Cxl710Parser &parser);
+ void create();
+ int compile_stream(const TrexStream * stream, CFlowStatParser *parser);
int add_hw_rule(uint16_t hw_id, uint8_t proto);
void send_start_stop_msg_to_rx(bool is_start);
@@ -216,9 +222,12 @@ class CFlowStatRuleMgr {
CFlowStatUserIdMap m_user_id_map; // map user ids to hw ids
uint8_t m_num_ports; // How many ports are being used
const TrexPlatformApi *m_api;
+ const CRxCoreStateless *m_rx_core;
int m_max_hw_id; // max hw id we ever used
uint32_t m_num_started_streams; // How many started (transmitting) streams we have
CNodeRing *m_ring_to_rx; // handle for sending messages to Rx core
+ CFlowStatParser *m_parser;
+ uint16_t m_capabilities;
};
#endif
diff --git a/src/flow_stat_parser.cpp b/src/flow_stat_parser.cpp
index 52824f73..8cb41fb7 100644
--- a/src/flow_stat_parser.cpp
+++ b/src/flow_stat_parser.cpp
@@ -25,38 +25,42 @@
#include <common/Network/Packet/EthernetHeader.h>
#include <flow_stat_parser.h>
-Cxl710Parser::Cxl710Parser() {
- reset();
-}
-
-void Cxl710Parser::reset() {
+void CFlowStatParser::reset() {
m_ipv4 = 0;
m_l4_proto = 0;
- m_fdir_supported = false;
+ m_stat_supported = false;
}
-int Cxl710Parser::parse(uint8_t *p, uint16_t len) {
+int CFlowStatParser::parse(uint8_t *p, uint16_t len) {
EthernetHeader *ether = (EthernetHeader *)p;
+ int min_len = ETH_HDR_LEN + IPV4_HDR_LEN;
+ reset();
+
+ if (len < min_len)
+ return -1;
switch( ether->getNextProtocol() ) {
case EthernetHeader::Protocol::IP :
- m_ipv4 = (IPHeader *)(p + 14);
- m_fdir_supported = true;
+ m_ipv4 = (IPHeader *)(p + ETH_HDR_LEN);
+ m_stat_supported = true;
break;
case EthernetHeader::Protocol::VLAN :
+ min_len += 4;
+ if (len < min_len)
+ return -1;
switch ( ether->getVlanProtocol() ){
case EthernetHeader::Protocol::IP:
m_ipv4 = (IPHeader *)(p + 18);
- m_fdir_supported = true;
+ m_stat_supported = true;
break;
default:
- m_fdir_supported = false;
+ m_stat_supported = false;
return -1;
}
break;
default:
- m_fdir_supported = false;
+ m_stat_supported = false;
return -1;
break;
}
@@ -64,7 +68,7 @@ int Cxl710Parser::parse(uint8_t *p, uint16_t len) {
return 0;
}
-int Cxl710Parser::get_ip_id(uint16_t &ip_id) {
+int CFlowStatParser::get_ip_id(uint16_t &ip_id) {
if (! m_ipv4)
return -1;
@@ -73,18 +77,18 @@ int Cxl710Parser::get_ip_id(uint16_t &ip_id) {
return 0;
}
-int Cxl710Parser::set_ip_id(uint16_t new_id) {
+int CFlowStatParser::set_ip_id(uint16_t new_id) {
if (! m_ipv4)
return -1;
// Updating checksum, not recalculating, so if someone put bad checksum on purpose, it will stay bad
- m_ipv4->updateCheckSum(m_ipv4->getId(), PKT_NTOHS(new_id));
+ m_ipv4->updateCheckSum(PKT_NTOHS(m_ipv4->getId()), PKT_NTOHS(new_id));
m_ipv4->setId(new_id);
return 0;
}
-int Cxl710Parser::get_l4_proto(uint8_t &proto) {
+int CFlowStatParser::get_l4_proto(uint8_t &proto) {
if (! m_ipv4)
return -1;
@@ -96,7 +100,7 @@ int Cxl710Parser::get_l4_proto(uint8_t &proto) {
static const uint16_t TEST_IP_ID = 0xabcd;
static const uint8_t TEST_L4_PROTO = 0x11;
-int Cxl710Parser::test() {
+int CFlowStatParser::test() {
uint16_t ip_id = 0;
uint8_t l4_proto;
uint8_t test_pkt[] = {
@@ -107,7 +111,7 @@ int Cxl710Parser::test() {
0x0a, 0xbc, 0x08, 0x00, // vlan
// IP header
0x45,0x02,0x00,0x30,
- 0x00,0x00,0x40,0x00,
+ 0x01,0x02,0x40,0x00,
0xff, TEST_L4_PROTO, 0xbd,0x04,
0x10,0x0,0x0,0x1,
0x30,0x0,0x0,0x1,
@@ -124,14 +128,37 @@ int Cxl710Parser::test() {
assert(m_ipv4->isChecksumOK() == true);
assert(get_l4_proto(l4_proto) == 0);
assert(l4_proto == TEST_L4_PROTO);
- assert(m_fdir_supported == true);
+ assert(m_stat_supported == true);
reset();
// bad packet
test_pkt[16] = 0xaa;
assert (parse(test_pkt, sizeof(test_pkt)) == -1);
- assert(m_fdir_supported == false);
+ assert(m_stat_supported == false);
+
+ return 0;
+}
+
+// In 82599 10G card we do not support VLANs
+int C82599Parser::parse(uint8_t *p, uint16_t len) {
+ EthernetHeader *ether = (EthernetHeader *)p;
+ int min_len = ETH_HDR_LEN + IPV4_HDR_LEN;
+ reset();
+
+ if (len < min_len)
+ return -1;
+
+ switch( ether->getNextProtocol() ) {
+ case EthernetHeader::Protocol::IP :
+ m_ipv4 = (IPHeader *)(p + ETH_HDR_LEN);
+ m_stat_supported = true;
+ break;
+ default:
+ m_stat_supported = false;
+ return -1;
+ break;
+ }
return 0;
}
diff --git a/src/flow_stat_parser.h b/src/flow_stat_parser.h
index 606a1bec..8c9e1418 100644
--- a/src/flow_stat_parser.h
+++ b/src/flow_stat_parser.h
@@ -19,19 +19,33 @@
limitations under the License.
*/
-class Cxl710Parser {
+#ifndef __FLOW_STAT_PARSER_H__
+#define __FLOW_STAT_PARSER_H__
+
+// Basic flow stat parser. Relevant for xl710/x710/x350 cards
+#include "common/Network/Packet/IPHeader.h"
+
+class CFlowStatParser {
public:
- Cxl710Parser();
- void reset();
- int parse(uint8_t *pkt, uint16_t len);
- bool is_fdir_supported() {return m_fdir_supported == true;};
- int get_ip_id(uint16_t &ip_id);
- int set_ip_id(uint16_t ip_id);
- int get_l4_proto(uint8_t &proto);
- int test();
+ virtual ~CFlowStatParser() {};
+ virtual void reset();
+ virtual int parse(uint8_t *pkt, uint16_t len);
+ virtual bool is_stat_supported() {return m_stat_supported == true;};
+ virtual int get_ip_id(uint16_t &ip_id);
+ virtual int set_ip_id(uint16_t ip_id);
+ virtual int get_l4_proto(uint8_t &proto);
+ virtual int test();
- private:
+ protected:
IPHeader *m_ipv4;
- bool m_fdir_supported;
+ bool m_stat_supported;
uint8_t m_l4_proto;
};
+
+class C82599Parser : public CFlowStatParser {
+ public:
+ ~C82599Parser() {};
+ int parse(uint8_t *pkt, uint16_t len);
+};
+
+#endif
diff --git a/src/gtest/trex_stateless_gtest.cpp b/src/gtest/trex_stateless_gtest.cpp
index c3dfcb95..a5cf3307 100644
--- a/src/gtest/trex_stateless_gtest.cpp
+++ b/src/gtest/trex_stateless_gtest.cpp
@@ -3581,7 +3581,7 @@ class rx_stat_pkt_parse : public testing::Test {
TEST_F(rx_stat_pkt_parse, x710_parser) {
- Cxl710Parser parser;
+ CFlowStatParser parser;
parser.test();
}
diff --git a/src/internal_api/trex_platform_api.h b/src/internal_api/trex_platform_api.h
index dbca5a8a..90eaa7c7 100644
--- a/src/internal_api/trex_platform_api.h
+++ b/src/internal_api/trex_platform_api.h
@@ -26,6 +26,7 @@ limitations under the License.
#include <vector>
#include <string>
#include <string.h>
+#include "flow_stat_parser.h"
#include "trex_defs.h"
/**
@@ -34,6 +35,7 @@ limitations under the License.
* @author imarom (06-Oct-15)
*/
+
class TrexPlatformGlobalStats {
public:
TrexPlatformGlobalStats() {
@@ -151,6 +153,7 @@ public:
virtual bool get_promiscuous(uint8_t port_id) const = 0;
virtual void flush_dp_messages() const = 0;
virtual int get_active_pgids(flow_stat_active_t &result) const = 0;
+ virtual CFlowStatParser *get_flow_stat_parser() const = 0;
virtual ~TrexPlatformApi() {}
};
@@ -180,6 +183,7 @@ public:
bool get_promiscuous(uint8_t port_id) const;
void flush_dp_messages() const;
int get_active_pgids(flow_stat_active_t &result) const;
+ CFlowStatParser *get_flow_stat_parser() const;
};
@@ -241,6 +245,7 @@ public:
void flush_dp_messages() const {
}
int get_active_pgids(flow_stat_active_t &result) const {return 0;}
+ CFlowStatParser *get_flow_stat_parser() const {return new CFlowStatParser();}
private:
int m_dp_core_count;
diff --git a/src/latency.h b/src/latency.h
index 3dd1cc36..f5f90cf9 100644
--- a/src/latency.h
+++ b/src/latency.h
@@ -22,6 +22,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
#include <bp_sim.h>
+#include <flow_stat.h>
#define L_PKT_SUBMODE_NO_REPLY 1
#define L_PKT_SUBMODE_REPLY 2
diff --git a/src/main.cpp b/src/main.cpp
index 6a6b5721..3c68990c 100755
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -253,6 +253,10 @@ TrexStateless * get_stateless_obj() {
return m_sim_statelss_obj;
}
+CRxCoreStateless * get_rx_sl_core_obj() {
+ return NULL;
+}
+
void set_stateless_obj(TrexStateless *obj) {
m_sim_statelss_obj = obj;
}
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index ee408c63..1f415958 100644
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -148,6 +148,7 @@ public:
virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
virtual int get_stat_counters_num() {return 0;}
virtual int get_rx_stat_capabilities() {return 0;}
+ virtual CFlowStatParser *get_flow_stat_parser();
};
@@ -281,6 +282,7 @@ public:
virtual int wait_for_stable_link();
virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
virtual int get_rx_stat_capabilities() {return TrexPlatformApi::IF_STAT_IPV4_ID;}
+ virtual CFlowStatParser *get_flow_stat_parser();
};
class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
@@ -332,9 +334,12 @@ public:
// disabling flow control on 40G using DPDK API causes the interface to malfunction
virtual bool flow_control_disable_supported(){return false;}
virtual bool hw_rx_stat_supported(){return true;}
+ virtual CFlowStatParser *get_flow_stat_parser();
+
private:
virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl, uint16_t ip_id, int queue, uint16_t stat_idx);
virtual int configure_rx_filter_rules_statfull(CPhyEthIF * _if);
+
private:
uint8_t m_if_per_card;
};
@@ -1231,6 +1236,10 @@ void CPhyEthIFStats::Dump(FILE *fd){
DP_A(rx_nombuf);
}
+// only on VM we have rx queues on DP cores
+void CPhyEthIF::flush_dp_rx_queue(void) {
+}
+
// Clear the RX queue of an interface, dropping all packets
void CPhyEthIF::flush_rx_queue(void){
@@ -1735,8 +1744,8 @@ public:
virtual int send_node(CGenNode * node);
virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
+ virtual void flush_dp_rx_queue(void);
virtual int flush_tx_queue(void);
-
__attribute__ ((noinline)) void flush_rx_queue();
__attribute__ ((noinline)) void update_mac_addr(CGenNode * node,uint8_t *p);
@@ -1804,6 +1813,11 @@ bool CCoreEthIF::Create(uint8_t core_id,
return (true);
}
+// On VM, we get the packets in dp core, so just call general flush_rx_queue
+void CCoreEthIF::flush_dp_rx_queue(void) {
+ flush_rx_queue();
+}
+
// This function is only relevant if we are in VM. In this case, we only have one rx queue. Can't have
// rules to drop queue 0, and pass queue 1 to RX core, like in other cases.
// We receive all packets in the same core that transmitted, and handle them to RX core.
@@ -2699,7 +2713,7 @@ public:
CFlowGenList m_fl;
bool m_fl_was_init;
volatile uint8_t m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
- volatile bool m_rx_running; // Signal main core when RX thread finished
+ volatile bool m_sl_rx_running; // Signal main core when RX thread finished
CLatencyManager m_mg; // statefull RX core
CRxCoreStateless m_rx_sl; // stateless RX core
CTrexGlobalIoMode m_io_modes;
@@ -2793,7 +2807,9 @@ void CGlobalTRex::try_stop_all_cores(){
TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
send_message_all_dp(dp_msg);
- send_message_to_rx(rx_msg);
+ if (get_is_stateless()) {
+ send_message_to_rx(rx_msg);
+ }
delete dp_msg;
// no need to delete rx_msg. Deleted by receiver
bool all_core_finished = false;
@@ -3804,16 +3820,16 @@ int CGlobalTRex::run_in_master() {
int CGlobalTRex::run_in_rx_core(void){
if (get_is_stateless()) {
- m_rx_running = true;
+ m_sl_rx_running = true;
m_rx_sl.start();
+ m_sl_rx_running = false;
} else {
if ( CGlobalInfo::m_options.is_rx_enabled() ){
- m_rx_running = true;
+ m_sl_rx_running = false;
m_mg.start(0);
}
}
- m_rx_running = false;
return (0);
}
@@ -3905,7 +3921,7 @@ bool CGlobalTRex::is_all_cores_finished() {
return false;
}
}
- if (m_rx_running)
+ if (m_sl_rx_running)
return false;
return true;
@@ -4116,11 +4132,14 @@ bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir,
return (send);
}
-
TrexStateless * get_stateless_obj() {
return g_trex.m_trex_stateless;
}
+CRxCoreStateless * get_rx_sl_core_obj() {
+ return &g_trex.m_rx_sl;
+}
+
static int latency_one_lcore(__attribute__((unused)) void *dummy)
{
CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
@@ -4274,12 +4293,19 @@ int core_mask_sanity(uint32_t wanted_core_mask) {
wanted_core_num = num_set_bits(wanted_core_mask);
calc_core_num = num_set_bits(calc_core_mask);
+ if (calc_core_num == 1) {
+ printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
+ printf(" If you are running on VM, consider adding more cores if possible\n");
+ return -1;
+ }
if (wanted_core_num > calc_core_num) {
printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
printf(" Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
, CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
, get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
- printf(" Maybe try smaller -c <num>.\n");
+ if (CGlobalInfo::m_options.preview.getCores() > 1)
+ printf(" Maybe try smaller -c <num>.\n");
+ printf(" If you are running on VM, consider adding more cores if possible\n");
return -1;
}
@@ -4483,7 +4509,7 @@ int main_test(int argc , char * argv[]){
g_trex.reset_counters();
}
- g_trex.m_rx_running = false;
+ g_trex.m_sl_rx_running = false;
if ( get_is_stateless() ) {
g_trex.start_master_stateless();
@@ -4537,6 +4563,12 @@ int CTRexExtendedDriverBase::configure_drop_queue(CPhyEthIF * _if) {
return (rte_eth_dev_rx_queue_stop(port_id, 0));
}
+CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
void wait_x_sec(int sec) {
int i;
printf(" wait %d sec ", sec);
@@ -4940,6 +4972,12 @@ int CTRexExtendedDriverBase10G::wait_for_stable_link(){
return (0);
}
+CFlowStatParser *CTRexExtendedDriverBase10G::get_flow_stat_parser() {
+ CFlowStatParser *parser = new C82599Parser();
+ assert (parser);
+ return parser;
+}
+
////////////////////////////////////////////////////////////////////////////////
void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){
rte_eth_stats_reset(_if->get_port_id());
@@ -5167,6 +5205,12 @@ int CTRexExtendedDriverBase40G::wait_for_stable_link(){
return (0);
}
+CFlowStatParser *CTRexExtendedDriverBase40G::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
/////////////////////////////////////////////////////////////////////
@@ -5407,3 +5451,8 @@ void TrexDpdkPlatformApi::flush_dp_messages() const {
int TrexDpdkPlatformApi::get_active_pgids(flow_stat_active_t &result) const {
return g_trex.m_trex_stateless->m_rx_flow_stat.get_active_pgids(result);
}
+
+CFlowStatParser *TrexDpdkPlatformApi::get_flow_stat_parser() const {
+ return CTRexExtendedDriverDb::Ins()->get_drv()
+ ->get_flow_stat_parser();
+}
diff --git a/src/main_dpdk.h b/src/main_dpdk.h
index ff1ea784..a9bfed39 100644
--- a/src/main_dpdk.h
+++ b/src/main_dpdk.h
@@ -122,6 +122,7 @@ class CPhyEthIF {
CPhyEthIFStats & get_stats(){
return ( m_stats );
}
+ void flush_dp_rx_queue(void);
void flush_rx_queue(void);
int add_rx_flow_stat_rule(uint8_t type, uint16_t proto, uint16_t id);
int del_rx_flow_stat_rule(uint8_t type, uint16_t proto, uint16_t id);
diff --git a/src/rpc-server/commands/trex_rpc_cmd_general.cpp b/src/rpc-server/commands/trex_rpc_cmd_general.cpp
index f054c0ed..f7a23188 100644
--- a/src/rpc-server/commands/trex_rpc_cmd_general.cpp
+++ b/src/rpc-server/commands/trex_rpc_cmd_general.cpp
@@ -38,6 +38,50 @@ limitations under the License.
using namespace std;
/**
+ * API sync
+ */
+trex_rpc_cmd_rc_e
+TrexRpcCmdAPISync::_run(const Json::Value &params, Json::Value &result) {
+ const Json::Value &api_vers = parse_array(params, "api_vers", result);
+
+ Json::Value api_ver_rc = Json::arrayValue;
+
+ /* for every element in the list - generate the appropirate API handler */
+ for (const auto api_ver : api_vers) {
+ Json::Value single_rc;
+
+ /* only those are supported */
+ const std::string type = parse_choice(api_ver, "type", {"core"}, result);
+
+ int major = parse_int(api_ver, "major", result);
+ int minor = parse_int(api_ver, "minor", result);
+ APIClass::type_e api_type;
+
+ /* decode type of API */
+ if (type == "core") {
+ api_type = APIClass::API_CLASS_TYPE_CORE;
+ }
+
+ single_rc["type"] = type;
+
+ /* this section might throw exception in case versions do not match */
+ try {
+ single_rc["api_h"] = get_stateless_obj()->verify_api(api_type, major, minor);
+
+ } catch (const TrexAPIException &e) {
+ generate_execute_err(result, e.what());
+ }
+
+ /* add to the response */
+ api_ver_rc.append(single_rc);
+ }
+
+ result["result"]["api_vers"] = api_ver_rc;
+
+ return (TREX_RPC_CMD_OK);
+}
+
+/**
* ping command
*/
trex_rpc_cmd_rc_e
diff --git a/src/rpc-server/commands/trex_rpc_cmd_stream.cpp b/src/rpc-server/commands/trex_rpc_cmd_stream.cpp
index 68bebeb6..40719325 100644
--- a/src/rpc-server/commands/trex_rpc_cmd_stream.cpp
+++ b/src/rpc-server/commands/trex_rpc_cmd_stream.cpp
@@ -545,7 +545,11 @@ TrexRpcCmdStartTraffic::_run(const Json::Value &params, Json::Value &result) {
std::string type = parse_choice(mul_obj, "type", TrexPortMultiplier::g_types, result);
std::string op = parse_string(mul_obj, "op", result);
double value = parse_double(mul_obj, "value", result);
-
+
+ if ( value <=0 ){
+ generate_parse_err(result, "multiplier can't be zero");
+ }
+
if (op != "abs") {
generate_parse_err(result, "start message can only specify absolute speed rate");
}
@@ -586,6 +590,27 @@ TrexRpcCmdStopTraffic::_run(const Json::Value &params, Json::Value &result) {
}
/***************************
+ * remove all hardware filters
+ *
+ **************************/
+trex_rpc_cmd_rc_e
+TrexRpcCmdRemoveRXFilters::_run(const Json::Value &params, Json::Value &result) {
+
+ uint8_t port_id = parse_port(params, result);
+ TrexStatelessPort *port = get_stateless_obj()->get_port_by_id(port_id);
+
+ try {
+ port->remove_rx_filters();
+ } catch (const TrexException &ex) {
+ generate_execute_err(result, ex.what());
+ }
+
+ result["result"] = Json::objectValue;
+
+ return (TREX_RPC_CMD_OK);
+}
+
+/***************************
* get all streams
*
**************************/
diff --git a/src/rpc-server/commands/trex_rpc_cmds.h b/src/rpc-server/commands/trex_rpc_cmds.h
index c4b01b85..428bdd7b 100644
--- a/src/rpc-server/commands/trex_rpc_cmds.h
+++ b/src/rpc-server/commands/trex_rpc_cmds.h
@@ -36,33 +36,39 @@ class TrexStream;
* syntactic sugar for creating a simple command
*/
-#define TREX_RPC_CMD_DEFINE_EXTENDED(class_name, cmd_name, param_count, needs_ownership, ext) \
- class class_name : public TrexRpcCommand { \
- public: \
- class_name () : TrexRpcCommand(cmd_name, param_count, needs_ownership) {} \
- protected: \
- virtual trex_rpc_cmd_rc_e _run(const Json::Value &params, Json::Value &result); \
- ext \
+#define TREX_RPC_CMD_DEFINE_EXTENDED(class_name, cmd_name, param_count, needs_ownership, api_type, ext) \
+ class class_name : public TrexRpcCommand { \
+ public: \
+ class_name () : TrexRpcCommand(cmd_name, param_count, needs_ownership, api_type) {} \
+ protected: \
+ virtual trex_rpc_cmd_rc_e _run(const Json::Value &params, Json::Value &result); \
+ ext \
}
-#define TREX_RPC_CMD_DEFINE(class_name, cmd_name, param_count, needs_ownership) TREX_RPC_CMD_DEFINE_EXTENDED(class_name, cmd_name, param_count, needs_ownership, ;)
+#define TREX_RPC_CMD_DEFINE(class_name, cmd_name, param_count, needs_ownership, api_type) TREX_RPC_CMD_DEFINE_EXTENDED(class_name, cmd_name, param_count, needs_ownership, api_type, ;)
/**
* test cmds
*/
-TREX_RPC_CMD_DEFINE(TrexRpcCmdTestAdd, "test_add", 2, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdTestSub, "test_sub", 2, false);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdTestAdd, "test_add", 2, false, APIClass::API_CLASS_TYPE_NO_API);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdTestSub, "test_sub", 2, false, APIClass::API_CLASS_TYPE_NO_API);
+
+/**
+ * api_sync command always present and valid and also ping....
+ */
+TREX_RPC_CMD_DEFINE(TrexRpcCmdAPISync, "api_sync", 1, false, APIClass::API_CLASS_TYPE_NO_API);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdPing, "ping", 0, false, APIClass::API_CLASS_TYPE_NO_API);
/**
* general cmds
*/
-TREX_RPC_CMD_DEFINE(TrexRpcCmdPing, "ping", 0, false);
-TREX_RPC_CMD_DEFINE(TrexRpcPublishNow, "publish_now", 2, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetCmds, "get_supported_cmds", 0, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetVersion, "get_version", 0, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetActivePGIds, "get_active_pgids",0, false);
-TREX_RPC_CMD_DEFINE_EXTENDED(TrexRpcCmdGetSysInfo, "get_system_info", 0, false,
+TREX_RPC_CMD_DEFINE(TrexRpcPublishNow, "publish_now", 2, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetCmds, "get_supported_cmds", 0, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetVersion, "get_version", 0, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetActivePGIds, "get_active_pgids", 0, false, APIClass::API_CLASS_TYPE_CORE);
+
+TREX_RPC_CMD_DEFINE_EXTENDED(TrexRpcCmdGetSysInfo, "get_system_info", 0, false, APIClass::API_CLASS_TYPE_CORE,
std::string get_cpu_model();
void get_hostname(std::string &hostname);
@@ -72,25 +78,25 @@ void get_hostname(std::string &hostname);
/**
* ownership
*/
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetOwner, "get_owner", 1, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdAcquire, "acquire", 4, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdRelease, "release", 1, true);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetOwner, "get_owner", 1, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdAcquire, "acquire", 4, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdRelease, "release", 1, true, APIClass::API_CLASS_TYPE_CORE);
/**
* port commands
*/
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetPortStats, "get_port_stats", 1, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetPortStatus, "get_port_status", 1, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdSetPortAttr, "set_port_attr", 3, false);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetPortStats, "get_port_stats", 1, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetPortStatus, "get_port_status", 1, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdSetPortAttr, "set_port_attr", 3, false, APIClass::API_CLASS_TYPE_CORE);
/**
* stream cmds
*/
-TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveAllStreams, "remove_all_streams", 1, true);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveStream, "remove_stream", 2, true);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveAllStreams, "remove_all_streams", 1, true, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveStream, "remove_stream", 2, true, APIClass::API_CLASS_TYPE_CORE);
-TREX_RPC_CMD_DEFINE_EXTENDED(TrexRpcCmdAddStream, "add_stream", 3, true,
+TREX_RPC_CMD_DEFINE_EXTENDED(TrexRpcCmdAddStream, "add_stream", 3, true, APIClass::API_CLASS_TYPE_CORE,
/* extended part */
std::unique_ptr<TrexStream> allocate_new_stream(const Json::Value &section, uint8_t port_id, uint32_t stream_id, Json::Value &result);
@@ -107,20 +113,22 @@ void parse_vm_instr_write_mask_flow_var(const Json::Value &inst, std::unique_ptr
);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetStreamList, "get_stream_list", 1, false);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetAllStreams, "get_all_streams", 1, false);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetStreamList, "get_stream_list", 1, false, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetAllStreams, "get_all_streams", 1, false, APIClass::API_CLASS_TYPE_CORE);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdGetStream, "get_stream", 3, false);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdGetStream, "get_stream", 3, false, APIClass::API_CLASS_TYPE_CORE);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdStartTraffic, "start_traffic", 4, true);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdStopTraffic, "stop_traffic", 1, true);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdPauseTraffic, "pause_traffic", 1, true);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdResumeTraffic, "resume_traffic", 1, true);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdStartTraffic, "start_traffic", 4, true, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdStopTraffic, "stop_traffic", 1, true, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdRemoveRXFilters, "remove_rx_filters", 1, true, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdPauseTraffic, "pause_traffic", 1, true, APIClass::API_CLASS_TYPE_CORE);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdResumeTraffic, "resume_traffic", 1, true, APIClass::API_CLASS_TYPE_CORE);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdUpdateTraffic, "update_traffic", 3, true);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdUpdateTraffic, "update_traffic", 3, true, APIClass::API_CLASS_TYPE_CORE);
-TREX_RPC_CMD_DEFINE(TrexRpcCmdValidate, "validate", 2, false);
+TREX_RPC_CMD_DEFINE(TrexRpcCmdValidate, "validate", 2, false, APIClass::API_CLASS_TYPE_CORE);
#endif /* __TREX_RPC_CMD_H__ */
+
diff --git a/src/rpc-server/trex_rpc_cmd.cpp b/src/rpc-server/trex_rpc_cmd.cpp
index caf161e3..902e63c7 100644
--- a/src/rpc-server/trex_rpc_cmd.cpp
+++ b/src/rpc-server/trex_rpc_cmd.cpp
@@ -23,6 +23,32 @@ limitations under the License.
#include <trex_stateless.h>
#include <trex_stateless_port.h>
+/**
+ * method name and params
+ *
+ */
+TrexRpcCommand::TrexRpcCommand(const std::string &method_name,
+ int param_count,
+ bool needs_ownership,
+ APIClass::type_e type) : m_name(method_name),
+ m_param_count(param_count),
+ m_needs_ownership(needs_ownership) {
+
+ /* if needs ownership - another field is needed (handler) */
+ if (m_needs_ownership) {
+ m_param_count++;
+ }
+
+ /* API verification */
+ m_api_type = type;
+
+ if (type != APIClass::API_CLASS_TYPE_NO_API) {
+ m_api_handler = get_stateless_obj()->get_api_handler(type);
+ m_param_count++;
+ }
+
+}
+
trex_rpc_cmd_rc_e
TrexRpcCommand::run(const Json::Value &params, Json::Value &result) {
trex_rpc_cmd_rc_e rc;
@@ -30,12 +56,18 @@ TrexRpcCommand::run(const Json::Value &params, Json::Value &result) {
/* the internal run can throw a parser error / other error */
try {
- check_param_count(params, m_param_count, result);
+ /* verify API handler is correct (version mismatch) */
+ if ( (m_api_type != APIClass::API_CLASS_TYPE_NO_API) && !g_test_override_api ) {
+ verify_api_handler(params, result);
+ }
+ /* verify ownership */
if (m_needs_ownership && !g_test_override_ownership) {
verify_ownership(params, result);
}
+ check_param_count(params, m_param_count, result);
+
/* run the command itself*/
rc = _run(params, result);
@@ -72,6 +104,17 @@ TrexRpcCommand::verify_ownership(const Json::Value &params, Json::Value &result)
}
}
+void
+TrexRpcCommand::verify_api_handler(const Json::Value &params, Json::Value &result) {
+ std::string api_handler = parse_string(params, "api_h", result);
+
+ if (m_api_handler != api_handler) {
+ std::stringstream ss;
+ ss << "API verification failed - API handler provided mismatch for class: '" << APIClass::type_to_name(m_api_type) << "'";
+ generate_execute_err(result, ss.str());
+ }
+}
+
uint8_t
TrexRpcCommand::parse_port(const Json::Value &params, Json::Value &result) {
uint8_t port_id = parse_byte(params, "port_id", result);
@@ -281,3 +324,4 @@ TrexRpcCommand::generate_execute_err(Json::Value &result, const std::string &msg
* by default this is off
*/
bool TrexRpcCommand::g_test_override_ownership = false;
+bool TrexRpcCommand::g_test_override_api = false;
diff --git a/src/rpc-server/trex_rpc_cmd_api.h b/src/rpc-server/trex_rpc_cmd_api.h
index 7e694768..25920c6c 100644
--- a/src/rpc-server/trex_rpc_cmd_api.h
+++ b/src/rpc-server/trex_rpc_cmd_api.h
@@ -27,6 +27,8 @@ limitations under the License.
#include <json/json.h>
#include <trex_rpc_exception_api.h>
+#include "trex_api_class.h"
+
/**
* describe different types of rc for run()
*/
@@ -68,16 +70,10 @@ public:
/**
* method name and params
*/
- TrexRpcCommand(const std::string &method_name, int param_count, bool needs_ownership) :
- m_name(method_name),
- m_param_count(param_count),
- m_needs_ownership(needs_ownership) {
-
- /* if needs ownership - another field is needed (handler) */
- if (m_needs_ownership) {
- m_param_count++;
- }
- }
+ TrexRpcCommand(const std::string &method_name,
+ int param_count,
+ bool needs_ownership,
+ APIClass::type_e type);
/**
* entry point for executing RPC command
@@ -99,6 +95,10 @@ public:
g_test_override_ownership = enable;
}
+ static void test_set_override_api(bool enable) {
+ g_test_override_api = enable;
+ }
+
virtual ~TrexRpcCommand() {}
protected:
@@ -131,11 +131,18 @@ protected:
void check_param_count(const Json::Value &params, int expected, Json::Value &result);
/**
+ * verify API handler
+ *
+ */
+ void verify_api_handler(const Json::Value &params, Json::Value &result);
+
+ /**
* verify ownership
*
*/
void verify_ownership(const Json::Value &params, Json::Value &result);
+
/**
* validate port id
*
@@ -360,11 +367,13 @@ protected:
const char * json_type_to_name(const Json::Value &value);
/* RPC command name */
- std::string m_name;
- int m_param_count;
- bool m_needs_ownership;
-
- static bool g_test_override_ownership;
+ std::string m_name;
+ int m_param_count;
+ bool m_needs_ownership;
+ std::string m_api_handler;
+ APIClass::type_e m_api_type;
+ static bool g_test_override_ownership;
+ static bool g_test_override_api;
};
#endif /* __TREX_RPC_CMD_API_H__ */
diff --git a/src/rpc-server/trex_rpc_cmds_table.cpp b/src/rpc-server/trex_rpc_cmds_table.cpp
index e1bd3eee..924503f2 100644
--- a/src/rpc-server/trex_rpc_cmds_table.cpp
+++ b/src/rpc-server/trex_rpc_cmds_table.cpp
@@ -33,6 +33,7 @@ TrexRpcCommandsTable::TrexRpcCommandsTable() {
/* general */
+ register_command(new TrexRpcCmdAPISync());
register_command(new TrexRpcCmdPing());
register_command(new TrexRpcPublishNow());
register_command(new TrexRpcCmdGetCmds());
@@ -61,6 +62,8 @@ TrexRpcCommandsTable::TrexRpcCommandsTable() {
register_command(new TrexRpcCmdResumeTraffic());
register_command(new TrexRpcCmdUpdateTraffic());
+ register_command(new TrexRpcCmdRemoveRXFilters());
+
register_command(new TrexRpcCmdValidate());
}
diff --git a/src/rpc-server/trex_rpc_exception_api.h b/src/rpc-server/trex_rpc_exception_api.h
index e349b980..ebc9b411 100644
--- a/src/rpc-server/trex_rpc_exception_api.h
+++ b/src/rpc-server/trex_rpc_exception_api.h
@@ -25,17 +25,19 @@ limitations under the License.
#include <string>
#include <stdexcept>
+#include "trex_exception.h"
+
/**
* generic exception for RPC errors
*
*/
-class TrexRpcException : public std::runtime_error
-{
+class TrexRpcException : public TrexException {
+
public:
- TrexRpcException() : std::runtime_error("") {
+ TrexRpcException() : TrexException("") {
}
- TrexRpcException(const std::string &what) : std::runtime_error(what) {
+ TrexRpcException(const std::string &what) : TrexException(what) {
}
};
diff --git a/src/sim/trex_sim_stateless.cpp b/src/sim/trex_sim_stateless.cpp
index ffe377f4..fa13401d 100644
--- a/src/sim/trex_sim_stateless.cpp
+++ b/src/sim/trex_sim_stateless.cpp
@@ -117,6 +117,7 @@ SimStateless::SimStateless() {
/* override ownership checks */
TrexRpcCommand::test_set_override_ownership(true);
+ TrexRpcCommand::test_set_override_api(true);
}
diff --git a/src/stateless/cp/trex_api_class.h b/src/stateless/cp/trex_api_class.h
new file mode 100644
index 00000000..78933d23
--- /dev/null
+++ b/src/stateless/cp/trex_api_class.h
@@ -0,0 +1,110 @@
+/*
+ Itay Marom
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef __TREX_API_CLASS_H__
+#define __TREX_API_CLASS_H__
+
+#include <assert.h>
+
+#include "common/basic_utils.h"
+#include "trex_exception.h"
+
+/**
+ * API exception
+ *
+ * @author imarom (03-Apr-16)
+ */
+class TrexAPIException : public TrexException {
+public:
+ TrexAPIException(const std::string &what) : TrexException(what) {
+ }
+};
+
+/**
+ * define an API class
+ *
+ * @author imarom (03-Apr-16)
+ */
+class APIClass {
+public:
+
+ enum type_e {
+ API_CLASS_TYPE_CORE = 0,
+ API_CLASS_TYPE_MAX,
+
+ API_CLASS_TYPE_NO_API
+ };
+
+ static const char * type_to_name(type_e type) {
+ switch (type) {
+ case API_CLASS_TYPE_CORE:
+ return "core";
+ default:
+ assert(0);
+ }
+ }
+
+ APIClass() {
+ /* invalid */
+ m_type = API_CLASS_TYPE_MAX;
+ }
+
+ void init(type_e type, int major, int minor) {
+ m_type = type;
+ m_major = major;
+ m_minor = minor;
+
+ unsigned int seed = time(NULL);
+ m_handler = utl_generate_random_str(seed, 8);
+ }
+
+ std::string & verify_api(int major, int minor) {
+ std::stringstream ss;
+ ss << "API type '" << type_to_name(m_type) << "': ";
+
+ assert(m_type < API_CLASS_TYPE_MAX);
+
+ /* for now a simple major check */
+ if (major < m_major) {
+ ss << "server has a major newer API version - server: '" << m_major << "', client: '" << major << "'";
+ throw TrexAPIException(ss.str());
+ }
+
+ if (major > m_major) {
+ ss << "server has an older API version - server: '" << m_major << "', client: '" << major << "'";
+ throw TrexAPIException(ss.str());
+ }
+
+ return get_api_handler();
+ }
+
+ std::string & get_api_handler() {
+ return m_handler;
+ }
+
+private:
+ type_e m_type;
+ int m_major;
+ int m_minor;
+ std::string m_handler;
+
+};
+
+#endif /* __TREX_API_CLASS_H__ */
diff --git a/src/stateless/cp/trex_exception.h b/src/stateless/cp/trex_exception.h
new file mode 100644
index 00000000..b9e20761
--- /dev/null
+++ b/src/stateless/cp/trex_exception.h
@@ -0,0 +1,41 @@
+/*
+ Itay Marom
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef __TREX_EXCEPTION_H__
+#define __TREX_EXCEPTION_H__
+
+#include <stdexcept>
+#include <string>
+
+/**
+ * generic exception for errors
+ * TODO: move this to a better place
+ */
+class TrexException : public std::runtime_error
+{
+public:
+ TrexException() : std::runtime_error("") {
+
+ }
+ TrexException(const std::string &what) : std::runtime_error(what) {
+ }
+};
+
+#endif /* __TREX_EXCEPTION_H__ */
diff --git a/src/stateless/cp/trex_stateless.cpp b/src/stateless/cp/trex_stateless.cpp
index 9df57a50..f6f81b96 100644
--- a/src/stateless/cp/trex_stateless.cpp
+++ b/src/stateless/cp/trex_stateless.cpp
@@ -53,6 +53,8 @@ TrexStateless::TrexStateless(const TrexStatelessCfg &cfg) {
m_platform_api = cfg.m_platform_api;
m_publisher = cfg.m_publisher;
+ /* API core version */
+ m_api_classes[APIClass::API_CLASS_TYPE_CORE].init(APIClass::API_CLASS_TYPE_CORE, 1, 0);
}
/**
@@ -175,3 +177,4 @@ TrexStateless::generate_publish_snapshot(std::string &snapshot) {
snapshot = writer.write(root);
}
+
diff --git a/src/stateless/cp/trex_stateless.h b/src/stateless/cp/trex_stateless.h
index 6e5e0c44..b506da61 100644
--- a/src/stateless/cp/trex_stateless.h
+++ b/src/stateless/cp/trex_stateless.h
@@ -27,27 +27,18 @@ limitations under the License.
#include <mutex>
-#include <trex_stream.h>
-#include <trex_stateless_port.h>
-#include <trex_rpc_server_api.h>
-#include <publisher/trex_publisher.h>
+#include "trex_stream.h"
+#include "trex_stateless_port.h"
+#include "trex_rpc_server_api.h"
-#include <flow_stat.h>
-#include <internal_api/trex_platform_api.h>
+#include "publisher/trex_publisher.h"
+#include "internal_api/trex_platform_api.h"
-/**
- * generic exception for errors
- * TODO: move this to a better place
- */
-class TrexException : public std::runtime_error
-{
-public:
- TrexException() : std::runtime_error("") {
+#include "flow_stat.h"
- }
- TrexException(const std::string &what) : std::runtime_error(what) {
- }
-};
+
+#include "trex_exception.h"
+#include "trex_api_class.h"
class TrexStatelessPort;
@@ -81,6 +72,7 @@ public:
} m_stats;
};
+
/**
* config object for stateless object
*
@@ -167,6 +159,14 @@ public:
return m_rpc_server;
}
+ const std::string & verify_api(APIClass::type_e type, int major, int minor) {
+ return m_api_classes[type].verify_api(major, minor);
+ }
+
+ const std::string & get_api_handler(APIClass::type_e type) {
+ return m_api_classes[type].get_api_handler();
+ }
+
CFlowStatRuleMgr m_rx_flow_stat;
protected:
@@ -187,6 +187,8 @@ protected:
TrexPublisher *m_publisher;
+ /* API */
+ APIClass m_api_classes[APIClass::API_CLASS_TYPE_MAX];
};
/**
@@ -197,6 +199,7 @@ protected:
* @return TrexStateless&
*/
TrexStateless * get_stateless_obj();
+CRxCoreStateless * get_rx_sl_core_obj();
#endif /* __TREX_STATELESS_H__ */
diff --git a/src/stateless/cp/trex_stateless_port.cpp b/src/stateless/cp/trex_stateless_port.cpp
index 90589d7a..2239f3f6 100644
--- a/src/stateless/cp/trex_stateless_port.cpp
+++ b/src/stateless/cp/trex_stateless_port.cpp
@@ -272,6 +272,22 @@ TrexStatelessPort::stop_traffic(void) {
}
/**
+ * remove all RX filters from port
+ *
+ * @author imarom (28-Mar-16)
+ */
+void
+TrexStatelessPort::remove_rx_filters(void) {
+ /* only valid when IDLE or with streams and not TXing */
+ verify_state(PORT_STATE_STREAMS);
+
+ for (auto entry : m_stream_table) {
+ get_stateless_obj()->m_rx_flow_stat.stop_stream(entry.second);
+ }
+
+}
+
+/**
* when a port stops, perform various actions
*
*/
@@ -287,9 +303,6 @@ TrexStatelessPort::common_port_stop_actions(bool async) {
get_stateless_obj()->get_publisher()->publish_event(TrexPublisher::EVENT_PORT_STOPPED, data);
}
- for (auto entry : m_stream_table) {
- get_stateless_obj()->m_rx_flow_stat.stop_stream(entry.second);
- }
}
void
@@ -768,26 +781,5 @@ TrexPortOwner::TrexPortOwner() {
m_seed = time(NULL);
}
-/**
- * generate a random connection handler
- *
- */
-std::string
-TrexPortOwner::generate_handler() {
- std::stringstream ss;
-
- static const char alphanum[] =
- "0123456789"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz";
-
- /* generate 8 bytes of random handler */
- for (int i = 0; i < 8; ++i) {
- ss << alphanum[rand_r(&m_seed) % (sizeof(alphanum) - 1)];
- }
-
- return (ss.str());
-}
-
const std::string TrexPortOwner::g_unowned_name = "<FREE>";
const std::string TrexPortOwner::g_unowned_handler = "";
diff --git a/src/stateless/cp/trex_stateless_port.h b/src/stateless/cp/trex_stateless_port.h
index 7e1838d4..2167e735 100644
--- a/src/stateless/cp/trex_stateless_port.h
+++ b/src/stateless/cp/trex_stateless_port.h
@@ -21,6 +21,7 @@ limitations under the License.
#ifndef __TREX_STATELESS_PORT_H__
#define __TREX_STATELESS_PORT_H__
+#include "common/basic_utils.h"
#include "internal_api/trex_platform_api.h"
#include "trex_dp_port_events.h"
#include "trex_stream.h"
@@ -65,7 +66,7 @@ public:
m_owner_name = owner_name;
/* internal data */
- m_handler = generate_handler();
+ m_handler = utl_generate_random_str(m_seed, 8);
m_is_free = false;
}
@@ -83,7 +84,6 @@ public:
private:
- std::string generate_handler();
/* is this port owned by someone ? */
bool m_is_free;
@@ -178,6 +178,14 @@ public:
void stop_traffic(void);
/**
+ * remove all RX filters
+ * valid only when port is stopped
+ *
+ * @author imarom (28-Mar-16)
+ */
+ void remove_rx_filters(void);
+
+ /**
* pause traffic
* throws TrexException in case of an error
*/
diff --git a/src/stateless/cp/trex_stream.cpp b/src/stateless/cp/trex_stream.cpp
index 9c7898a8..e3f0ba7c 100644
--- a/src/stateless/cp/trex_stream.cpp
+++ b/src/stateless/cp/trex_stream.cpp
@@ -106,6 +106,15 @@ void TrexStream::Dump(FILE *fd){
}
}
+ if (m_rx_check.m_enabled) {
+ fprintf(fd, " Flow stat enabled:\n");
+ fprintf(fd, " seq check %s latency check %s packet group id %d hw_id %d\n"
+ , m_rx_check.m_seq_enabled ? "enabled":"disabled"
+ , m_rx_check.m_latency ? "enabled":"disabled", m_rx_check.m_pg_id, m_rx_check.m_hw_id
+ );
+ } else {
+ fprintf(fd, " Flow stat disabled\n");
+ }
fprintf(fd," rate :\n\n");
fprintf(fd," pps : %f\n", m_rate.get_pps());
diff --git a/src/stateless/cp/trex_streams_compiler.cpp b/src/stateless/cp/trex_streams_compiler.cpp
index 563236c2..d6971d68 100644
--- a/src/stateless/cp/trex_streams_compiler.cpp
+++ b/src/stateless/cp/trex_streams_compiler.cpp
@@ -477,8 +477,10 @@ TrexStreamsCompiler::compile_stream(TrexStream *stream,
TrexStream *fixed_rx_flow_stat_stream = stream->clone(true);
- // not checking for errors. We assume that if add_stream succeeded, start_stream will too.
- get_stateless_obj()->m_rx_flow_stat.start_stream(fixed_rx_flow_stat_stream, fixed_rx_flow_stat_stream->m_rx_check.m_hw_id);
+ get_stateless_obj()->m_rx_flow_stat.start_stream(fixed_rx_flow_stat_stream);
+ // CFlowStatRuleMgr keeps state of the stream object. We duplicated the stream here (in order not
+ // change the packet kept in the stream). We want the state to be saved in the original stream.
+ get_stateless_obj()->m_rx_flow_stat.copy_state(fixed_rx_flow_stat_stream, stream);
/* can this stream be split to many cores ? */
if (!stream->is_splitable(dp_core_count)) {
diff --git a/src/stateless/dp/trex_stateless_dp_core.cpp b/src/stateless/dp/trex_stateless_dp_core.cpp
index ba25f61d..f125a46a 100644
--- a/src/stateless/dp/trex_stateless_dp_core.cpp
+++ b/src/stateless/dp/trex_stateless_dp_core.cpp
@@ -399,6 +399,7 @@ TrexStatelessDpCore::idle_state_loop() {
int counter = 0;
while (m_state == STATE_IDLE) {
+ m_core->m_node_gen.m_v_if->flush_dp_rx_queue();
bool had_msg = periodic_check_for_cp_messages();
if (had_msg) {
counter = 0;
diff --git a/src/stateless/rx/trex_stateless_rx_core.cpp b/src/stateless/rx/trex_stateless_rx_core.cpp
index ab7c08d1..26f537f8 100644
--- a/src/stateless/rx/trex_stateless_rx_core.cpp
+++ b/src/stateless/rx/trex_stateless_rx_core.cpp
@@ -2,6 +2,7 @@
#include "bp_sim.h"
#include "flow_stat_parser.h"
#include "latency.h"
+#include "pal/linux/sanb_atomic.h"
#include "trex_stateless_messaging.h"
#include "trex_stateless_rx_core.h"
@@ -59,6 +60,8 @@ void CRxCoreStateless::idle_state_loop() {
if (had_msg) {
counter = 0;
continue;
+ } else {
+ flush_rx();
}
/* enter deep sleep only if enough time had passed */
@@ -72,8 +75,8 @@ void CRxCoreStateless::idle_state_loop() {
}
void CRxCoreStateless::start() {
- static int count = 0;
- static int i = 0;
+ int count = 0;
+ int i = 0;
bool do_try_rx_queue =CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ? true : false;
while (true) {
@@ -91,7 +94,11 @@ void CRxCoreStateless::start() {
} else {
if (m_state == STATE_QUIT)
break;
+ count = 0;
+ i = 0;
+ set_working_msg_ack(false);
idle_state_loop();
+ set_working_msg_ack(true);
}
if (do_try_rx_queue) {
try_rx_queues();
@@ -101,7 +108,7 @@ void CRxCoreStateless::start() {
}
void CRxCoreStateless::handle_rx_pkt(CLatencyManagerPerPort *lp, rte_mbuf_t *m) {
- Cxl710Parser parser;
+ CFlowStatParser parser;
if (parser.parse(rte_pktmbuf_mtod(m, uint8_t *), m->pkt_len) == 0) {
uint16_t ip_id;
@@ -162,6 +169,30 @@ void CRxCoreStateless::try_rx_queues() {
}
}
+// exactly the same as try_rx, without the handle_rx_pkt
+// purpose is to flush rx queues when core is in idle state
+void CRxCoreStateless::flush_rx() {
+ rte_mbuf_t * rx_pkts[64];
+ int i, total_pkts = 0;
+ for (i = 0; i < m_max_ports; i++) {
+ CLatencyManagerPerPort * lp = &m_ports[i];
+ rte_mbuf_t * m;
+ m_cpu_dp_u.start_work();
+ /* try to read 64 packets clean up the queue */
+ uint16_t cnt_p = lp->m_io->rx_burst(rx_pkts, 64);
+ total_pkts += cnt_p;
+ if (cnt_p) {
+ int j;
+ for (j = 0; j < cnt_p; j++) {
+ m = rx_pkts[j];
+ rte_pktmbuf_free(m);
+ }
+ /* commit only if there was work to do ! */
+ m_cpu_dp_u.commit();
+ }/* if work */
+ }// all ports
+}
+
int CRxCoreStateless::try_rx() {
rte_mbuf_t * rx_pkts[64];
int i, total_pkts = 0;
@@ -211,6 +242,12 @@ int CRxCoreStateless::get_rx_stats(uint8_t port_id, rx_per_flow_t *rx_stats, int
return 0;
}
+void CRxCoreStateless::set_working_msg_ack(bool val) {
+ sanb_smp_memory_barrier();
+ m_ack_start_work_msg = val;
+ sanb_smp_memory_barrier();
+}
+
double CRxCoreStateless::get_cpu_util() {
m_cpu_cp_u.Update();
return m_cpu_cp_u.GetVal();
diff --git a/src/stateless/rx/trex_stateless_rx_core.h b/src/stateless/rx/trex_stateless_rx_core.h
index 5ab12f4e..b78256c2 100644
--- a/src/stateless/rx/trex_stateless_rx_core.h
+++ b/src/stateless/rx/trex_stateless_rx_core.h
@@ -54,6 +54,8 @@ class CRxCoreStateless {
void work() {m_state = STATE_WORKING;}
void idle() {m_state = STATE_IDLE;}
void quit() {m_state = STATE_QUIT;}
+ bool is_working() const {return (m_ack_start_work_msg == true);}
+ void set_working_msg_ack(bool val);
double get_cpu_util();
private:
@@ -62,6 +64,7 @@ class CRxCoreStateless {
void idle_state_loop();
void handle_rx_pkt(CLatencyManagerPerPort * lp, rte_mbuf_t * m);
void handle_rx_queue_msgs(uint8_t thread_id, CNodeRing * r);
+ void flush_rx();
int try_rx();
void try_rx_queues();
bool is_flow_stat_id(uint16_t id);
@@ -71,10 +74,13 @@ class CRxCoreStateless {
uint32_t m_max_ports;
bool m_has_streams;
CLatencyManagerPerPort m_ports[TREX_MAX_PORTS];
- state_e m_state; /* state of all ports */
+ state_e m_state;
CNodeRing *m_ring_from_cp;
CNodeRing *m_ring_to_cp;
CCpuUtlDp m_cpu_dp_u;
CCpuUtlCp m_cpu_cp_u;
+ // Used for acking "work" (go out of idle) messages from cp
+ volatile bool m_ack_start_work_msg __rte_cache_aligned;
+
};
#endif