diff options
Diffstat (limited to 'scripts')
27 files changed, 1943 insertions, 214 deletions
diff --git a/scripts/automation/regression/cfg/client_cfg.yaml b/scripts/automation/regression/cfg/client_cfg.yaml new file mode 100644 index 00000000..5c3d19ef --- /dev/null +++ b/scripts/automation/regression/cfg/client_cfg.yaml @@ -0,0 +1,48 @@ +#vlan: true +vlan: false + +groups: + +- ip_start : 16.0.0.1 + ip_end : 16.0.1.255 + initiator : + next_hop: 1.1.1.1 + src_ip : 1.1.1.2 + responder : + next_hop: 1.1.2.1 + src_ip : 1.1.2.2 + + count : 1 + +- ip_start : 17.0.0.1 + ip_end : 17.0.1.255 + initiator : + next_hop: 1.1.3.1 + src_ip : 1.1.3.2 + responder : + next_hop: 1.1.4.1 + src_ip : 1.1.4.2 + + count : 1 + +- ip_start : 18.0.0.1 + ip_end : 18.0.1.255 + initiator : + next_hop: 1.1.5.1 + src_ip : 1.1.5.2 + responder : + next_hop: 1.1.6.1 + src_ip : 1.1.6.2 + + count : 1 + +- ip_start : 19.0.0.1 + ip_end : 19.0.1.255 + initiator : + next_hop: 1.1.7.1 + src_ip : 1.1.7.2 + responder : + next_hop: 1.1.8.1 + src_ip : 1.1.8.2 + + count : 1 diff --git a/scripts/automation/regression/setups/trex15/benchmark.yaml b/scripts/automation/regression/setups/trex03/benchmark.yaml index b366b3fb..b366b3fb 100644 --- a/scripts/automation/regression/setups/trex15/benchmark.yaml +++ b/scripts/automation/regression/setups/trex03/benchmark.yaml diff --git a/scripts/automation/regression/setups/trex15/config.yaml b/scripts/automation/regression/setups/trex03/config.yaml index c5fc3b22..17c4c91e 100644 --- a/scripts/automation/regression/setups/trex15/config.yaml +++ b/scripts/automation/regression/setups/trex03/config.yaml @@ -34,6 +34,6 @@ # expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test trex: - hostname : csi-trex-15 + hostname : csi-trex-03 cores : 1 modes : [loopback, virt_nics, VM] diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex06/benchmark.yaml index 8bc9d29c..2f51a3fd 100644 --- a/scripts/automation/regression/setups/trex17/benchmark.yaml +++ b/scripts/automation/regression/setups/trex06/benchmark.yaml @@ -140,7 +140,7 @@ test_CPU_benchmark: bw_per_core : 1 - name : stl/pcap.py - kwargs : {ipg_usec: 2, loop_count: 0} + kwargs : {ipg_usec: 3, loop_count: 0} cpu_util : 1 bw_per_core : 1 diff --git a/scripts/automation/regression/setups/trex17/config.yaml b/scripts/automation/regression/setups/trex06/config.yaml index 7ad6a20a..da0eb2dd 100644 --- a/scripts/automation/regression/setups/trex17/config.yaml +++ b/scripts/automation/regression/setups/trex06/config.yaml @@ -34,6 +34,6 @@ # expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test trex: - hostname : csi-trex-17 + hostname : csi-trex-06 cores : 1 modes : [loopback, virt_nics, VM] diff --git a/scripts/automation/regression/setups/trex11/backup/benchmark.yaml b/scripts/automation/regression/setups/trex11/backup/benchmark.yaml new file mode 100644 index 00000000..b366b3fb --- /dev/null +++ b/scripts/automation/regression/setups/trex11/backup/benchmark.yaml @@ -0,0 +1,155 @@ +################################################################ +#### TRex benchmark configuration file #### +################################################################ + +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 106.652 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 11.577 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 2.030 + + +test_static_routing_imix_asymmetric: + multiplier : 0.8 + cores : 1 + bw_per_core : 13.742 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 4, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex11/backup/config.yaml b/scripts/automation/regression/setups/trex11/backup/config.yaml new file mode 100644 index 00000000..782b7542 --- /dev/null +++ b/scripts/automation/regression/setups/trex11/backup/config.yaml @@ -0,0 +1,38 @@ +################################################################ +#### TRex nightly test configuration file #### +################################################################ + + +### TRex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-11 + cores : 1 + modes : ['loopback', 'VM', 'virt_nics'] diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml index b366b3fb..a4969d2d 100644 --- a/scripts/automation/regression/setups/trex11/benchmark.yaml +++ b/scripts/automation/regression/setups/trex11/benchmark.yaml @@ -1,31 +1,44 @@ -################################################################ +############################################################### #### TRex benchmark configuration file #### -################################################################ +############################################################### -### stateful ### +#### common templates ### -test_jumbo: - multiplier : 2.8 - cores : 1 - bw_per_core : 106.652 +#test_jumbo: +# multiplier : 2.8 +# cores : 1 +# bw_per_core : 962.464 test_routing_imix: multiplier : 0.5 cores : 1 - bw_per_core : 11.577 + bw_per_core : 48.130 test_routing_imix_64: multiplier : 28 cores : 1 - bw_per_core : 2.030 + bw_per_core : 12.699 test_static_routing_imix_asymmetric: - multiplier : 0.8 + multiplier : 0.5 cores : 1 - bw_per_core : 13.742 + bw_per_core : 50.561 + + +test_ipv6_simple: + multiplier : 0.5 + cores : 1 + bw_per_core : 19.5 + + +test_rx_check_http: &rx_http + multiplier : 1000 + cores : 1 + rx_sample_rate : 128 + bw_per_core : 49.464 @@ -140,16 +153,21 @@ test_CPU_benchmark: bw_per_core : 1 - name : stl/pcap.py - kwargs : {ipg_usec: 4, loop_count: 0} + kwargs : {ipg_usec: 2, loop_count: 0} cpu_util : 1 bw_per_core : 1 - - name : stl/udp_rand_len_9k.py - cpu_util : 1 - bw_per_core : 1 + #- name : stl/udp_rand_len_9k.py + # cpu_util : 1 + # bw_per_core : 1 - - name : stl/hlt/hlt_udp_rand_len_9k.py - cpu_util : 1 - bw_per_core : 1 + #- name : stl/hlt/hlt_udp_rand_len_9k.py + # cpu_util : 1 + # bw_per_core : 1 + +test_all_profiles : + mult : "5%" + skip : ['udp_rand_len_9k.py','udp_inc_len_9k.py'] # due to VIC 9K defect trex-282 + diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml index 782b7542..393c8749 100644 --- a/scripts/automation/regression/setups/trex11/config.yaml +++ b/scripts/automation/regression/setups/trex11/config.yaml @@ -4,15 +4,16 @@ ### TRex configuration: -# hostname - can be DNS name or IP for the TRex machine for ssh to the box -# password - root password for TRex machine -# is_dual - should the TRex inject with -p ? -# version_path - path to the TRex version and executable -# cores - how many cores should be used -# latency - rate of latency packets injected by the TRex -# modes - list of modes (tagging) of this setup (loopback, virtual etc.) -# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. -# * virtual - virtual OS (accept low CPU utilization in tests) +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) ### Router configuration: # hostname - the router hostname as apears in ______# cli prefix @@ -35,4 +36,6 @@ trex: hostname : csi-trex-11 cores : 1 - modes : ['loopback', 'VM', 'virt_nics'] + modes : ['loopback'] + + diff --git a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py new file mode 100644 index 00000000..852e745d --- /dev/null +++ b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py @@ -0,0 +1,52 @@ +#!/router/bin/python +from .trex_general_test import CTRexGeneral_Test, CTRexScenario +from CPlatform import CStaticRouteConfig +from .tests_exceptions import * +#import sys +import time +from nose.tools import nottest + +# Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors. +# If resolve will fail, TRex will exit with exit code != 0 +class CTRexClientCfg_Test(CTRexGeneral_Test): + """This class defines the IMIX testcase of the TRex traffic generator""" + def __init__(self, *args, **kwargs): + # super(CTRexClientCfg_Test, self).__init__() + CTRexGeneral_Test.__init__(self, *args, **kwargs) + + def setUp(self): + if CTRexScenario.setup_name == 'kiwi02': + self.skip("Can't run currently on kiwi02") + super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process + pass + + def test_client_cfg(self): + # test initializtion + if self.is_loopback: + return + else: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + ret = self.trex.start_trex( + c = 1, + m = 1, + d = 10, + f = 'cap2/dns.yaml', + v = 3, + client_cfg = 'automation/regression/cfg/client_cfg.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + print("\nLATEST RESULT OBJECT:") + print(trex_res) + + self.check_general_scenario_results(trex_res) + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py index acf5dc61..73dac734 100644 --- a/scripts/automation/regression/stateless_tests/stl_client_test.py +++ b/scripts/automation/regression/stateless_tests/stl_client_test.py @@ -241,6 +241,7 @@ class STLClient_Test(CStlGeneral_Test): return default_mult = self.get_benchmark_param('mult',default="30%") + skip_tests = self.get_benchmark_param('skip',default=[]) try: print("\n"); @@ -248,6 +249,17 @@ class STLClient_Test(CStlGeneral_Test): for profile in self.profiles: + skip=False + if skip_tests: + for skip_test in skip_tests: + if skip_test in profile: + skip=True; + break; + if skip: + print("skipping testing profile due to config file {0}...\n".format(profile)) + continue; + + print("now testing profile {0}...\n".format(profile)) p1 = STLProfile.load(profile, port_id = self.tx_port) diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py index d28fca54..4dad712f 100644 --- a/scripts/automation/regression/stateless_tests/stl_rx_test.py +++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py @@ -61,6 +61,13 @@ class STLRX_Test(CStlGeneral_Test): 'latency_9k_max_latency': 250, }, + 'rte_enic_pmd': { + 'rate_percent': 1, + 'total_pkts': 50, + 'rate_latency': 1, + 'latency_9k_enable': False, + }, + } diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py index e9d2b8a0..5d992c6e 100755 --- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py +++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py @@ -150,10 +150,8 @@ class CTRexClient(object): user = user or self.__default_user try: d = int(d) - if d < 30 and not trex_development: # test duration should be at least 30 seconds, unless trex_development flag is specified. - raise ValueError except ValueError: - raise ValueError('d parameter must be integer, specifying how long TRex run, and must be larger than 30 secs.') + raise ValueError('d parameter must be integer, specifying how long TRex run.') trex_cmd_options.update( {'f' : f, 'd' : d} ) if not trex_cmd_options.get('l'): diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py index b23b5f1f..b33b0447 100755 --- a/scripts/automation/trex_control_plane/stl/console/trex_console.py +++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py @@ -202,7 +202,7 @@ class TRexConsole(TRexGeneralCmd): func_name = f.__name__ if func_name.startswith("do_"): func_name = func_name[3:] - + if not inst.stateless_client.is_connected(): print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold')) return @@ -313,6 +313,7 @@ class TRexConsole(TRexGeneralCmd): def do_shell (self, line): self.do_history(line) + @verify_connected def do_push (self, line): '''Push a local PCAP file\n''' self.stateless_client.push_line(line) @@ -320,6 +321,7 @@ class TRexConsole(TRexGeneralCmd): def help_push (self): self.do_push("-h") + @verify_connected def do_portattr (self, line): '''Change/show port(s) attributes\n''' self.stateless_client.set_port_attr_line(line) @@ -328,6 +330,22 @@ class TRexConsole(TRexGeneralCmd): self.do_portattr("-h") @verify_connected + def do_set_rx_sniffer (self, line): + '''Sets a port sniffer on RX channel as PCAP recorder''' + self.stateless_client.set_rx_sniffer_line(line) + + def help_sniffer (self): + self.do_set_rx_sniffer("-h") + + @verify_connected + def do_resolve (self, line): + '''Resolve ARP for ports''' + self.stateless_client.resolve_line(line) + + def help_sniffer (self): + self.do_resolve("-h") + + @verify_connected def do_map (self, line): '''Maps ports topology\n''' ports = self.stateless_client.get_acquired_ports() @@ -416,6 +434,7 @@ class TRexConsole(TRexGeneralCmd): '''Release ports\n''' self.stateless_client.release_line(line) + @verify_connected def do_reacquire (self, line): '''reacquire all the ports under your logged user name''' self.stateless_client.reacquire_line(line) diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py new file mode 100644 index 00000000..22cceb8f --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py @@ -0,0 +1,123 @@ +import stl_path +from trex_stl_lib.api import * + +import imp +import time +import json +from pprint import pprint +import argparse + +# IMIX test +# it maps the ports to sides +# then it load a predefind profile 'IMIX' +# and attach it to both sides and inject +# at a certain rate for some time +# finally it checks that all packets arrived +def imix_test (server): + + + # create client + c = STLClient(server = server) + passed = True + + + try: + + # connect to server + c.connect() + + # take all the ports + c.reset() + + dir_0 = [0] + dir_1 = [1] + + print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1) + + # load IMIX profile + profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py') + profile1 = STLProfile.load_py(profile_file, direction=0) + profile2 = STLProfile.load_py(profile_file, direction=1) + stream1 = profile1.get_streams() + stream2 = profile2.get_streams() + + # add both streams to ports + c.add_streams(stream1, ports = dir_0) + c.add_streams(stream2, ports = dir_1) + + + # clear the stats before injecting + c.clear_stats() + + c.start(ports = (dir_0 + dir_1), mult = "100kpps", total = True) + + while True: + for rate in range(200,3100,10): + + # choose rate and start traffic for 10 seconds on 5 mpps + #mult = "30%" + my_mult = ("%dkpps"%rate) + print "Injecting {0} <--> {1} on total rate of '{2}' ".format(dir_0, dir_1, my_mult) + c.clear_stats() + + + c.update(ports = (dir_0 + dir_1), mult = my_mult) + + #time.sleep(1); + + # block until done + #c.wait_on_traffic(ports = (dir_0 + dir_1)) + + # read the stats after the test + stats = c.get_stats() + + # use this for debug info on all the stats + pprint(stats) + + # sum dir 0 + dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0]) + dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0]) + + # sum dir 1 + dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1]) + dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1]) + + + lost_0 = dir_0_opackets - dir_1_ipackets + lost_1 = dir_1_opackets - dir_0_ipackets + + print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets) + print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets) + + print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_1, lost_0) + print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_0, lost_1) + + if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc. + passed = True + else: + passed = False + + + except STLError as e: + passed = False + print e + + finally: + c.disconnect() + + if passed: + print "\nTest has passed :-)\n" + else: + print "\nTest has failed :-(\n" + +parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic") +parser.add_argument('-s', '--server', + dest='server', + help='Remote trex address', + default='127.0.0.1', + type = str) +args = parser.parse_args() + +# run the tests +imix_test(args.server) + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py index 6b53e67e..cc20e088 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py @@ -12,7 +12,7 @@ from .trex_stl_types import * from .trex_stl_async_client import CTRexAsyncClient from .utils import parsing_opts, text_tables, common -from .utils.common import list_intersect, list_difference, is_sub_list, PassiveTimer +from .utils.common import * from .utils.text_opts import * from functools import wraps @@ -24,6 +24,8 @@ import re import random import json import traceback +import os.path + ############################ logger ############################# ############################ ############################# @@ -32,9 +34,10 @@ import traceback # logger API for the client class LoggerApi(object): # verbose levels - VERBOSE_QUIET = 0 - VERBOSE_REGULAR = 1 - VERBOSE_HIGH = 2 + VERBOSE_QUIET = 0 + VERBOSE_REGULAR_SYNC = 1 + VERBOSE_REGULAR = 2 + VERBOSE_HIGH = 3 def __init__(self): self.level = LoggerApi.VERBOSE_REGULAR @@ -62,7 +65,7 @@ class LoggerApi(object): # simple log message with verbose - def log (self, msg, level = VERBOSE_REGULAR, newline = True): + def log (self, msg, level = VERBOSE_REGULAR_SYNC, newline = True): if not self.check_verbose(level): return @@ -90,19 +93,20 @@ class LoggerApi(object): # supress object getter - def supress (self): + def supress (self, level = VERBOSE_QUIET): class Supress(object): - def __init__ (self, logger): + def __init__ (self, logger, level): self.logger = logger + self.level = level def __enter__ (self): self.saved_level = self.logger.get_verbose() - self.logger.set_verbose(LoggerApi.VERBOSE_QUIET) + self.logger.set_verbose(self.level) def __exit__ (self, type, value, traceback): self.logger.set_verbose(self.saved_level) - return Supress(self) + return Supress(self, level) @@ -322,22 +326,25 @@ class EventsHandler(object): # port attr changed elif (event_type == 8): + port_id = int(data['port_id']) - if data['attr'] == self.client.ports[port_id].attr: - return # false alarm - old_info = self.client.ports[port_id].get_info() - self.__async_event_port_attr_changed(port_id, data['attr']) - new_info = self.client.ports[port_id].get_info() + + diff = self.__async_event_port_attr_changed(port_id, data['attr']) + if not diff: + return + + ev = "port {0} attributes changed".format(port_id) - for key, old_val in old_info.items(): - new_val = new_info[key] - if old_val != new_val: - ev += '\n {key}: {old} -> {new}'.format( - key = key, - old = old_val.lower() if type(old_val) is str else old_val, - new = new_val.lower() if type(new_val) is str else new_val) + for key, (old_val, new_val) in diff.items(): + ev += '\n {key}: {old} -> {new}'.format( + key = key, + old = old_val.lower() if type(old_val) is str else old_val, + new = new_val.lower() if type(new_val) is str else new_val) + show_event = True - + + + # server stopped elif (event_type == 100): ev = "Server has stopped" @@ -394,7 +401,7 @@ class EventsHandler(object): def __async_event_port_attr_changed (self, port_id, attr): if port_id in self.client.ports: - self.client.ports[port_id].async_event_port_attr_changed(attr) + return self.client.ports[port_id].async_event_port_attr_changed(attr) # add event to log def __add_event_log (self, origin, ev_type, msg, show = False): @@ -652,7 +659,7 @@ class STLClient(object): return rc - + def __add_streams(self, stream_list, port_id_list = None): port_id_list = self.__ports(port_id_list) @@ -801,16 +808,75 @@ class STLClient(object): return rc + def __resolve (self, port_id_list = None, retries = 0): + port_id_list = self.__ports(port_id_list) + + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].arp_resolve(retries)) + + return rc + + def __set_port_attr (self, port_id_list = None, attr_dict = None): port_id_list = self.__ports(port_id_list) rc = RC() + for port_id, port_attr_dict in zip(port_id_list, attr_dict): + rc.add(self.ports[port_id].set_attr(**port_attr_dict)) + + return rc + + + def __set_rx_sniffer (self, port_id_list, base_filename, limit): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + head, tail = os.path.splitext(base_filename) + filename = "{0}-{1}{2}".format(head, port_id, tail) + rc.add(self.ports[port_id].set_rx_sniffer(filename, limit)) + + return rc + + + def __remove_rx_sniffer (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + for port_id in port_id_list: - rc.add(self.ports[port_id].set_attr(attr_dict)) + rc.add(self.ports[port_id].remove_rx_sniffer()) return rc + def __set_rx_queue (self, port_id_list, size): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].set_rx_queue(size)) + + return rc + + def __remove_rx_queue (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].remove_rx_queue()) + + return rc + + def __get_rx_queue_pkts (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].get_rx_queue_pkts()) + + return rc # connect to server @@ -1010,7 +1076,7 @@ class STLClient(object): if not port_id in valid_ports: raise STLError("Port ID '{0}' is not a valid port ID - valid values: {1}".format(port_id, valid_ports)) - return port_id_list + return list_remove_dup(port_id_list) # transmit request on the RPC link @@ -1315,6 +1381,12 @@ class STLClient(object): if port_obj.is_active()] + def get_resolvable_ports (self): + return [port_id + for port_id, port_obj in self.ports.items() + if port_obj.is_acquired() and port_obj.get_dst_addr()['ipv4'] is not None] + + # get paused ports def get_paused_ports (self, owned = True): if owned: @@ -1340,6 +1412,7 @@ class STLClient(object): # get stats + @__api_check(True) def get_stats (self, ports = None, sync_now = True): """ Return dictionary containing statistics information gathered from the server. @@ -1587,7 +1660,7 @@ class STLClient(object): ports = ports if ports is not None else self.get_all_ports() ports = self._validate_port_list(ports) - return [self.ports[port_id].get_info() for port_id in ports] + return [self.ports[port_id].get_formatted_info() for port_id in ports] ############################ Commands ############################# @@ -1702,6 +1775,9 @@ class STLClient(object): self.__release(ports) raise STLError(rc) + for port_id in ports: + if not self.ports[port_id].is_resolved(): + self.logger.log(format_text('*** Warning - Port {0} destination is unresolved ***'.format(port_id), 'bold')) @__api_check(True) def release (self, ports = None): @@ -1727,30 +1803,80 @@ class STLClient(object): if not rc: raise STLError(rc) + + @__api_check(True) - def ping(self): + def ping_rpc_server(self): """ - Pings the server + Pings the RPC server :parameters: - None - + None :raises: + :exc:`STLError` """ - + self.logger.pre_cmd("Pinging the server on '{0}' port '{1}': ".format(self.connection_info['server'], self.connection_info['sync_port'])) rc = self._transmit("ping", api_class = None) - + self.logger.post_cmd(rc) if not rc: raise STLError(rc) + @__api_check(True) + def ping_ip (self, src_port, dst_ipv4, pkt_size = 64, count = 5): + """ + Pings an IP address through a port + + :parameters: + src_port - on which port_id to send the ICMP PING request + dst_ipv4 - which IP to ping + pkt_size - packet size to use + count - how many times to ping + :raises: + + :exc:`STLError` + + """ + # validate src port + validate_type('src_port', src_port, int) + if src_port not in self.get_all_ports(): + raise STLError("src port is not a valid port id") + + if not is_valid_ipv4(dst_ipv4): + raise STLError("dst_ipv4 is not a valid IPv4 address: '{0}'".format(dst_ipv4)) + + if (pkt_size < 64) or (pkt_size > 9216): + raise STLError("pkt_size should be a value between 64 and 9216: '{0}'".format(pkt_size)) + + validate_type('count', count, int) + + + + self.logger.pre_cmd("Pinging {0} from port {1} with {2} bytes of data:".format(dst_ipv4, + src_port, + pkt_size)) + + # no async messages + with self.logger.supress(level = LoggerApi.VERBOSE_REGULAR_SYNC): + self.logger.log('') + for i in range(count): + rc = self.ports[src_port].ping(ping_ipv4 = dst_ipv4, pkt_size = pkt_size) + if not rc: + raise STLError(rc) + + self.logger.log(rc.data()) + + if i != (count - 1): + time.sleep(1) + + + + @__api_check(True) def server_shutdown (self, force = False): """ Sends the server a request for total shutdown @@ -1835,7 +1961,7 @@ class STLClient(object): @__api_check(True) - def reset(self, ports = None): + def reset(self, ports = None, restart = False): """ Force acquire ports, stop the traffic, remove all streams and clear stats @@ -1843,7 +1969,9 @@ class STLClient(object): ports : list Ports on which to execute the command - + restart: bool + Restart the NICs (link down / up) + :raises: + :exc:`STLError` @@ -1853,12 +1981,34 @@ class STLClient(object): ports = ports if ports is not None else self.get_all_ports() ports = self._validate_port_list(ports) - # force take the port and ignore any streams on it - self.acquire(ports, force = True, sync_streams = False) - self.stop(ports, rx_delay_ms = 0) - self.remove_all_streams(ports) - self.clear_stats(ports) + + if restart: + self.logger.pre_cmd("Hard resetting ports {0}:".format(ports)) + else: + self.logger.pre_cmd("Resetting ports {0}:".format(ports)) + + + try: + with self.logger.supress(): + # force take the port and ignore any streams on it + self.acquire(ports, force = True, sync_streams = False) + self.stop(ports, rx_delay_ms = 0) + self.remove_all_streams(ports) + self.clear_stats(ports) + self.set_port_attr(ports, + promiscuous = False, + link_up = True if restart else None, + rxf = 'hw') + self.remove_rx_sniffer(ports) + self.remove_rx_queue(ports) + + except STLError as e: + self.logger.post_cmd(False) + raise e + + self.logger.post_cmd(RC_OK()) + @__api_check(True) def remove_all_streams (self, ports = None): @@ -1997,7 +2147,25 @@ class STLClient(object): raise STLError(rc) - + # common checks for start API + def __pre_start_check (self, ports, force): + + # verify link status + ports_link_down = [port_id for port_id in ports if not self.ports[port_id].is_up()] + if ports_link_down and not force: + raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down) + + # verify ports are stopped or force stop them + active_ports = [port_id for port_id in ports if self.ports[port_id].is_active()] + if active_ports and not force: + raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports)) + + # warn if ports are not resolved + unresolved_ports = [port_id for port_id in ports if not self.ports[port_id].is_resolved()] + if unresolved_ports and not force: + raise STLError("Port(s) {0} have unresolved destination addresses - please resolve them or specify 'force'".format(unresolved_ports)) + + @__api_check(True) def start (self, ports = None, @@ -2052,11 +2220,10 @@ class STLClient(object): validate_type('total', total, bool) validate_type('core_mask', core_mask, (int, list)) - # verify link status - ports_link_down = [port_id for port_id in ports if self.ports[port_id].attr.get('link',{}).get('up') == False] - if not force and ports_link_down: - raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down) - + + # some sanity checks before attempting start + self.__pre_start_check(ports, force) + ######################### # decode core mask argument decoded_mask = self.__decode_core_mask(ports, core_mask) @@ -2070,17 +2237,12 @@ class STLClient(object): raise STLArgumentError('mult', mult) - # verify ports are stopped or force stop them + # stop active ports if needed active_ports = list(set(self.get_active_ports()).intersection(ports)) - if active_ports: - if not force: - raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports)) - else: - rc = self.stop(active_ports) - if not rc: - raise STLError(rc) - + if active_ports and force: + self.stop(active_ports) + # start traffic self.logger.pre_cmd("Starting traffic on port(s) {0}:".format(ports)) rc = self.__start(mult_obj, duration, ports, force, decoded_mask) @@ -2117,7 +2279,7 @@ class STLClient(object): return ports = self._validate_port_list(ports) - + self.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports)) rc = self.__stop(ports) self.logger.post_cmd(rc) @@ -2629,16 +2791,28 @@ class STLClient(object): @__api_check(True) - def set_port_attr (self, ports = None, promiscuous = None, link_up = None, led_on = None, flow_ctrl = None): + def set_port_attr (self, + ports = None, + promiscuous = None, + link_up = None, + led_on = None, + flow_ctrl = None, + rxf = None, + ipv4 = None, + dest = None, + resolve = True): """ Set port attributes :parameters: - promiscuous - True or False - link_up - True or False - led_on - True or False - flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable - + promiscuous - True or False + link_up - True or False + led_on - True or False + flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable + rxf - 'hw' for hardware rules matching packets only or 'all' all packets + ipv4 - configure IPv4 address for port(s). for multiple ports should be a list of IPv4 addresses in the same length of the ports array + dest - configure destination address for port(s) in either IPv4 or MAC format. for multiple ports should be a list in the same length of the ports array + resolve - if true, in case a destination address is configured as IPv4 try to resolve it :raises: + :exe:'STLError' @@ -2652,29 +2826,229 @@ class STLClient(object): validate_type('link_up', link_up, (bool, type(None))) validate_type('led_on', led_on, (bool, type(None))) validate_type('flow_ctrl', flow_ctrl, (int, type(None))) - - # build attributes - attr_dict = {} - if promiscuous is not None: - attr_dict['promiscuous'] = {'enabled': promiscuous} - if link_up is not None: - attr_dict['link_status'] = {'up': link_up} - if led_on is not None: - attr_dict['led_status'] = {'on': led_on} - if flow_ctrl is not None: - attr_dict['flow_ctrl_mode'] = {'mode': flow_ctrl} + validate_choice('rxf', rxf, ['hw', 'all']) + + # common attributes for all ports + cmn_attr_dict = {} + + cmn_attr_dict['promiscuous'] = promiscuous + cmn_attr_dict['link_status'] = link_up + cmn_attr_dict['led_status'] = led_on + cmn_attr_dict['flow_ctrl_mode'] = flow_ctrl + cmn_attr_dict['rx_filter_mode'] = rxf + + # each port starts with a set of the common attributes + attr_dict = [dict(cmn_attr_dict) for _ in ports] + + # default value for IPv4 / dest is none for all ports + if ipv4 is None: + ipv4 = [None] * len(ports) + if dest is None: + dest = [None] * len(ports) + + ipv4 = listify(ipv4) + if len(ipv4) != len(ports): + raise STLError("'ipv4' must be a list in the same length of ports - 'ports': {0}, 'ip': {1}".format(ports, ipv4)) + + dest = listify(dest) + if len(dest) != len(ports): + raise STLError("'dest' must be a list in the same length of ports - 'ports': {0}, 'dest': {1}".format(ports, dest)) + + # update each port attribute with ipv4 + for addr, port_attr in zip(ipv4, attr_dict): + port_attr['ipv4'] = addr + + # update each port attribute with dest + for addr, port_attr in zip(dest, attr_dict): + port_attr['dest'] = addr + - # no attributes to set - if not attr_dict: - return - self.logger.pre_cmd("Applying attributes on port(s) {0}:".format(ports)) rc = self.__set_port_attr(ports, attr_dict) self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + # automatic resolve + if resolve: + # find any port with a dest configured as IPv4 + resolve_ports = [port_id for port_id, port_dest in zip(ports, dest) if is_valid_ipv4(port_dest)] + + if resolve_ports: + self.resolve(ports = resolve_ports) + + + + + @__api_check(True) + def resolve (self, ports = None, retries = 0): + """ + Resolves ports (ARP resolution) + + :parameters: + ports - for which ports to apply a unique sniffer (each port gets a unique file) + retires - how many times to retry on each port (intervals of 100 milliseconds) + :raises: + + :exe:'STLError' + + """ + # by default - resolve all the ports that are configured with IPv4 dest + ports = ports if ports is not None else self.get_resolvable_ports() + ports = self._validate_port_list(ports) + + active_ports = list_intersect(ports, self.get_active_ports()) + if active_ports: + raise STLError('Port(s) {0} are active, please stop them before resolving'.format(active_ports)) + + + self.logger.pre_cmd('Resolving destination on port(s) {0}:'.format(ports)) + with self.logger.supress(): + rc = self.__resolve(ports, retries) + + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + # print the ARP transaction + self.logger.log(rc) + self.logger.log('') + + + + @__api_check(True) + def set_rx_sniffer (self, ports = None, base_filename = 'rx.pcap', limit = 1000): + """ + Sets a RX sniffer for port(s) written to a PCAP file + + :parameters: + ports - for which ports to apply a unique sniffer (each port gets a unique file) + base_filename - filename will be appended with '-<port_number>', e.g. rx.pcap --> rx-0.pcap, rx-1.pcap etc. + limit - limit how many packets will be written + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + # check arguments + validate_type('base_filename', base_filename, basestring) + validate_type('limit', limit, (int)) + if limit <= 0: + raise STLError("'limit' must be a positive value") + + self.logger.pre_cmd("Setting RX sniffers on port(s) {0}:".format(ports)) + rc = self.__set_rx_sniffer(ports, base_filename, limit) + self.logger.post_cmd(rc) + + + if not rc: + raise STLError(rc) + + + + @__api_check(True) + def remove_rx_sniffer (self, ports = None): + """ + Removes RX sniffer from port(s) + + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + self.logger.pre_cmd("Removing RX sniffers on port(s) {0}:".format(ports)) + rc = self.__remove_rx_sniffer(ports) + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + @__api_check(True) + def set_rx_queue (self, ports = None, size = 1000): + """ + Sets RX queue for port(s) + The queue is cyclic and will hold last 'size' packets + + :parameters: + ports - for which ports to apply a queue + size - size of the queue + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + # check arguments + validate_type('size', size, (int)) + if size <= 0: + raise STLError("'size' must be a positive value") + + self.logger.pre_cmd("Setting RX queue on port(s) {0}:".format(ports)) + rc = self.__set_rx_queue(ports, size) + self.logger.post_cmd(rc) + + + if not rc: + raise STLError(rc) + + + + @__api_check(True) + def remove_rx_queue (self, ports = None): + """ + Removes RX queue from port(s) + + :parameters: + ports - for which ports to remove the RX queue + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + self.logger.pre_cmd("Removing RX queue on port(s) {0}:".format(ports)) + rc = self.__remove_rx_queue(ports) + self.logger.post_cmd(rc) if not rc: raise STLError(rc) + + + @__api_check(True) + def get_rx_queue_pkts (self, ports = None): + """ + Returns any packets queued on the RX side by the server + return value is a dictonary per port + + :parameters: + ports - for which ports to fetch + """ + + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + rc = self.__get_rx_queue_pkts(ports) + if not rc: + raise STLError(rc) + + # decode the data back to the user + result = {} + for port, r in zip(ports, rc.data()): + result[port] = r + + return result + + def clear_events (self): """ Clear all events @@ -2718,10 +3092,29 @@ class STLClient(object): @__console def ping_line (self, line): - '''pings the server''' - self.ping() - return RC_OK() + '''pings the server / specific IP''' + + # no parameters - so ping server + if not line: + self.ping_rpc_server() + return True + + parser = parsing_opts.gen_parser(self, + "ping", + self.ping_line.__doc__, + parsing_opts.SOURCE_PORT, + parsing_opts.PING_IPV4, + parsing_opts.PKT_SIZE, + parsing_opts.PING_COUNT) + + opts = parser.parse_args(line.split()) + if not opts: + return opts + + # IP ping + self.ping_ip(opts.source_port, opts.ping_ipv4, opts.pkt_size, opts.count) + @__console def shutdown_line (self, line): '''shutdown the server''' @@ -2849,13 +3242,14 @@ class STLClient(object): parser = parsing_opts.gen_parser(self, "reset", self.reset_line.__doc__, - parsing_opts.PORT_LIST_WITH_ALL) + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.PORT_RESTART) opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) if not opts: return opts - self.reset(ports = opts.ports) + self.reset(ports = opts.ports, restart = opts.restart) return RC_OK() @@ -2891,14 +3285,19 @@ class STLClient(object): # just for sanity - will be checked on the API as well self.__decode_core_mask(opts.ports, core_mask) + # for better use experience - check this first + try: + self.__pre_start_check(opts.ports, opts.force) + except STLError as e: + msg = e.brief() + self.logger.log(format_text(msg, 'bold')) + return RC_ERR(msg) + + + # stop ports if needed active_ports = list_intersect(self.get_active_ports(), opts.ports) - if active_ports: - if not opts.force: - msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports) - self.logger.log(format_text(msg, 'bold')) - return RC_ERR(msg) - else: - self.stop(active_ports) + if active_ports and opts.force: + self.stop(active_ports) # process tunables @@ -3240,7 +3639,7 @@ class STLClient(object): '''Sets port attributes ''' parser = parsing_opts.gen_parser(self, - "port_attr", + "portattr", self.set_port_attr_line.__doc__, parsing_opts.PORT_LIST_WITH_ALL, parsing_opts.PROMISCUOUS, @@ -3248,24 +3647,27 @@ class STLClient(object): parsing_opts.LED_STATUS, parsing_opts.FLOW_CTRL, parsing_opts.SUPPORTED, + parsing_opts.RX_FILTER_MODE, + parsing_opts.IPV4, + parsing_opts.DEST ) opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) if not opts: return opts - opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom) - opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link) - opts.led = parsing_opts.ON_OFF_DICT.get(opts.led) - opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl) + opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom) + opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link) + opts.led = parsing_opts.ON_OFF_DICT.get(opts.led) + opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl) # if no attributes - fall back to printing the status - if not filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp]): + if not list(filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp, opts.rx_filter_mode, opts.ipv4, opts.dest])): self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports))) return if opts.supp: - info = self.ports[0].get_info() # assume for now all ports are same + info = self.ports[0].get_formatted_info() # assume for now all ports are same print('') print('Supported attributes for current NICs:') print(' Promiscuous: yes') @@ -3274,15 +3676,79 @@ class STLClient(object): print(' Flow control: %s' % info['fc_supported']) print('') else: - return self.set_port_attr(opts.ports, opts.prom, opts.link, opts.led, opts.flow_ctrl) + self.set_port_attr(opts.ports, + opts.prom, + opts.link, + opts.led, + opts.flow_ctrl, + opts.rx_filter_mode, + opts.ipv4, + opts.dest) + + + + + @__console + def set_rx_sniffer_line (self, line): + '''Sets a port sniffer on RX channel in form of a PCAP file''' + + parser = parsing_opts.gen_parser(self, + "set_rx_sniffer", + self.set_rx_sniffer_line.__doc__, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.OUTPUT_FILENAME, + parsing_opts.LIMIT, + parsing_opts.ALL_FILES) + + opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) + if not opts: + return opts + + rxf = 'all' if opts.all else None + + if rxf: + self.set_port_attr(opts.ports, rxf = rxf) + + self.set_rx_sniffer(opts.ports, opts.output_filename, opts.limit) + return RC_OK() + @__console + def resolve_line (self, line): + '''Performs a port ARP resolution''' + + parser = parsing_opts.gen_parser(self, + "resolve", + self.resolve_line.__doc__, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.RETRIES) + + opts = parser.parse_args(line.split(), default_ports = self.get_resolvable_ports(), verify_acquired = True) + if not opts: + return opts + + ports = list_intersect(opts.ports, self.get_resolvable_ports()) + if not ports: + if not opts.ports: + msg = 'resolve - no ports with IPv4 destination' + else: + msg = 'pause - none of ports {0} are configured with IPv4 destination'.format(opts.ports) + + self.logger.log(msg) + return RC_ERR(msg) + + self.resolve(ports = ports, retries = opts.retries) + + return RC_OK() + + + @__console def show_profile_line (self, line): '''Shows profile information''' parser = parsing_opts.gen_parser(self, - "port", + "profile", self.show_profile_line.__doc__, parsing_opts.FILE_PATH) @@ -3378,3 +3844,6 @@ class STLClient(object): else: return "{0} {1}>".format(prefix, self.get_acquired_ports()) + + + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py index 1461fcec..93a930e4 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py @@ -172,6 +172,10 @@ class JsonRpcClient(object): self.disconnect() return RC_ERR("*** [RPC] - Failed to send message to server") + except KeyboardInterrupt as e: + # must restore the socket to a sane state + self.reconnect() + raise e tries = 0 while True: @@ -184,6 +188,10 @@ class JsonRpcClient(object): self.disconnect() return RC_ERR("*** [RPC] - Failed to get server response from {0}".format(self.transport)) + except KeyboardInterrupt as e: + # must restore the socket to a sane state + self.reconnect() + raise e return response @@ -255,11 +263,6 @@ class JsonRpcClient(object): self.connected = True - rc = self.invoke_rpc_method('ping', api_class = None) - if not rc: - self.connected = False - return rc - return RC_OK() @@ -267,12 +270,6 @@ class JsonRpcClient(object): # connect using current values return self.connect() - if not self.connected: - return RC_ERR("Not connected to server") - - # reconnect - return self.connect(self.server, self.port) - def is_connected(self): return self.connected diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py index cec3761f..ef74a85e 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py @@ -4,12 +4,14 @@ from collections import namedtuple, OrderedDict from .trex_stl_packet_builder_scapy import STLPktBuilder from .trex_stl_streams import STLStream from .trex_stl_types import * +from .trex_stl_rx_features import ARPResolver, PingResolver from . import trex_stl_stats from .utils.constants import FLOW_CTRL_DICT_REVERSED import base64 import copy from datetime import datetime, timedelta +import threading StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata']) @@ -50,7 +52,9 @@ class Port(object): def __init__ (self, port_id, user, comm_link, session_id, info): self.port_id = port_id + self.state = self.STATE_IDLE + self.handler = None self.comm_link = comm_link self.transmit = comm_link.transmit @@ -62,7 +66,7 @@ class Port(object): self.streams = {} self.profile = None self.session_id = session_id - self.attr = {} + self.status = {} self.port_stats = trex_stl_stats.CPortStats(self) @@ -72,31 +76,31 @@ class Port(object): self.owner = '' self.last_factor_type = None - + + self.__attr = {} + self.attr_lock = threading.Lock() + # decorator to verify port is up def up(func): - def func_wrapper(*args): + def func_wrapper(*args, **kwargs): port = args[0] if not port.is_up(): return port.err("{0} - port is down".format(func.__name__)) - return func(*args) + return func(*args, **kwargs) return func_wrapper # owned def owned(func): - def func_wrapper(*args): + def func_wrapper(*args, **kwargs): port = args[0] - if not port.is_up(): - return port.err("{0} - port is down".format(func.__name__)) - if not port.is_acquired(): return port.err("{0} - port is not owned".format(func.__name__)) - return func(*args) + return func(*args, **kwargs) return func_wrapper @@ -106,9 +110,6 @@ class Port(object): def func_wrapper(*args, **kwargs): port = args[0] - if not port.is_up(): - return port.err("{0} - port is down".format(func.__name__)) - if not port.is_acquired(): return port.err("{0} - port is not owned".format(func.__name__)) @@ -128,16 +129,16 @@ class Port(object): return RC_OK(data) def get_speed_bps (self): - return (self.info['speed'] * 1000 * 1000 * 1000) + return (self.get_speed_gbps() * 1000 * 1000 * 1000) - def get_formatted_speed (self): - return "{0} Gbps".format(self.info['speed']) + def get_speed_gbps (self): + return self.__attr['speed'] def is_acquired(self): return (self.handler != None) def is_up (self): - return (self.state != self.STATE_DOWN) + return self.__attr['link']['up'] def is_active(self): return (self.state == self.STATE_TX ) or (self.state == self.STATE_PAUSE) or (self.state == self.STATE_PCAP_TX) @@ -165,7 +166,6 @@ class Port(object): # take the port - @up def acquire(self, force = False, sync_streams = True): params = {"port_id": self.port_id, "user": self.user, @@ -185,7 +185,6 @@ class Port(object): # sync all the streams with the server - @up def sync_streams (self): params = {"port_id": self.port_id} @@ -201,7 +200,6 @@ class Port(object): return self.ok() # release the port - @up def release(self): params = {"port_id": self.port_id, "handler": self.handler} @@ -219,7 +217,6 @@ class Port(object): - @up def sync(self): params = {"port_id": self.port_id} @@ -250,10 +247,10 @@ class Port(object): self.next_available_id = int(rc.data()['max_stream_id']) + 1 - # attributes - self.attr = rc.data()['attr'] - if 'speed' in rc.data(): - self.info['speed'] = rc.data()['speed'] // 1000 + self.status = rc.data() + + # replace the attributes in a thread safe manner + self.set_ts_attr(rc.data()['attr']) return self.ok() @@ -424,8 +421,8 @@ class Port(object): # save this for TUI self.last_factor_type = mul['type'] - - return self.ok() + + return rc # stop traffic @@ -445,8 +442,9 @@ class Port(object): return self.err(rc.err()) self.state = self.STATE_STREAMS + self.last_factor_type = None - + # timestamp for last tx self.tx_stopped_ts = datetime.now() @@ -487,6 +485,101 @@ class Port(object): return self.ok() + + @owned + def set_rx_sniffer (self, pcap_filename, limit): + + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "capture", + "enabled": True, + "pcap_filename": pcap_filename, + "limit": limit} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + + @owned + def remove_rx_sniffer (self): + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "capture", + "enabled": False} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + + @owned + def set_arp_resolution (self, ipv4, mac): + + params = {"handler": self.handler, + "port_id": self.port_id, + "ipv4": ipv4, + "mac": mac} + + rc = self.transmit("set_arp_resolution", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + + + + @owned + def set_rx_queue (self, size): + + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "queue", + "enabled": True, + "size": size} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + @owned + def remove_rx_queue (self): + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "queue", + "enabled": False} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + @owned + def get_rx_queue_pkts (self): + params = {"handler": self.handler, + "port_id": self.port_id} + + rc = self.transmit("get_rx_queue_pkts", params) + if rc.bad(): + return self.err(rc.err()) + + pkts = rc.data()['pkts'] + + # decode the packets from base64 to binary + for i in range(len(pkts)): + pkts[i]['binary'] = base64.b64decode(pkts[i]['binary']) + + return RC_OK(pkts) + + @owned def pause (self): @@ -568,21 +661,44 @@ class Port(object): @owned - def set_attr (self, attr_dict): + def set_attr (self, **kwargs): + + json_attr = {} + + if kwargs.get('promiscuous') is not None: + json_attr['promiscuous'] = {'enabled': kwargs.get('promiscuous')} + + if kwargs.get('link_status') is not None: + json_attr['link_status'] = {'up': kwargs.get('link_status')} + + if kwargs.get('led_status') is not None: + json_attr['led_status'] = {'on': kwargs.get('led_status')} + + if kwargs.get('flow_ctrl_mode') is not None: + json_attr['flow_ctrl_mode'] = {'on': kwargs.get('flow_ctrl_mode')} + + if kwargs.get('rx_filter_mode') is not None: + json_attr['rx_filter_mode'] = {'mode': kwargs.get('rx_filter_mode')} + + if kwargs.get('ipv4') is not None: + json_attr['ipv4'] = {'addr': kwargs.get('ipv4')} + + if kwargs.get('dest') is not None: + json_attr['dest'] = {'addr': kwargs.get('dest')} + params = {"handler": self.handler, "port_id": self.port_id, - "attr": attr_dict} + "attr": json_attr} rc = self.transmit("set_port_attr", params) if rc.bad(): return self.err(rc.err()) + # update the dictionary from the server explicitly + return self.sync() - #self.attr.update(attr_dict) - - return self.ok() - + @writeable def push_remote (self, pcap_filename, ipg_usec, speedup, count, duration, is_dual, slave_handler): @@ -607,7 +723,16 @@ class Port(object): def get_profile (self): return self.profile - + # invalidates the current ARP + def invalidate_arp (self): + dest = self.__attr['dest'] + + if dest['type'] != 'mac': + return self.set_attr(dest = dest['ipv4']) + else: + return self.ok() + + def print_profile (self, mult, duration): if not self.get_profile(): return @@ -648,24 +773,32 @@ class Port(object): format_time(exp_time_factor_sec))) print("\n") - # generate port info - def get_info (self): + # generate formatted (console friendly) port info + def get_formatted_info (self, sync = True): + + # sync the status + if sync: + self.sync() + + # get a copy of the current attribute set (safe against manipulation) + attr = self.get_ts_attr() + info = dict(self.info) info['status'] = self.get_port_state_name() - if 'link' in self.attr: - info['link'] = 'UP' if self.attr['link']['up'] else 'DOWN' + if 'link' in attr: + info['link'] = 'UP' if attr['link']['up'] else 'DOWN' else: info['link'] = 'N/A' - if 'fc' in self.attr: - info['fc'] = FLOW_CTRL_DICT_REVERSED.get(self.attr['fc']['mode'], 'N/A') + if 'fc' in attr: + info['fc'] = FLOW_CTRL_DICT_REVERSED.get(attr['fc']['mode'], 'N/A') else: info['fc'] = 'N/A' - if 'promiscuous' in self.attr: - info['prom'] = "on" if self.attr['promiscuous']['enabled'] else "off" + if 'promiscuous' in attr: + info['prom'] = "on" if attr['promiscuous']['enabled'] else "off" else: info['prom'] = "N/A" @@ -692,34 +825,122 @@ class Port(object): else: info['is_virtual'] = 'N/A' + # speed + info['speed'] = self.get_speed_gbps() + + # RX filter mode + info['rx_filter_mode'] = 'hardware match' if attr['rx_filter_mode'] == 'hw' else 'fetch all' + + # src MAC and IPv4 + info['src_mac'] = attr['src_mac'] + info['src_ipv4'] = attr['src_ipv4'] + + if info['src_ipv4'] is None: + info['src_ipv4'] = 'Not Configured' + + # dest + dest = attr['dest'] + if dest['type'] == 'mac': + info['dest'] = dest['mac'] + info['arp'] = '-' + + elif dest['type'] == 'ipv4': + info['dest'] = dest['ipv4'] + info['arp'] = dest['arp'] + + elif dest['type'] == 'ipv4_u': + info['dest'] = dest['ipv4'] + info['arp'] = 'unresolved' + + + # RX info + rx_info = self.status['rx_info'] + + # RX sniffer + sniffer = rx_info['sniffer'] + info['rx_sniffer'] = '{0}\n[{1} / {2}]'.format(sniffer['pcap_filename'], sniffer['count'], sniffer['limit']) if sniffer['is_active'] else 'off' + + + # RX queue + queue = rx_info['queue'] + info['rx_queue'] = '[{0} / {1}]'.format(queue['count'], queue['size']) if queue['is_active'] else 'off' + return info def get_port_state_name(self): return self.STATES_MAP.get(self.state, "Unknown") + def get_src_addr (self): + src_mac = self.__attr['src_mac'] + src_ipv4 = self.__attr['src_ipv4'] + + return {'mac': src_mac, 'ipv4': src_ipv4} + + + def get_dst_addr (self): + dest = self.__attr['dest'] + + if dest['type'] == 'mac': + return {'ipv4': None, 'mac': dest['mac']} + + elif dest['type'] == 'ipv4': + return {'ipv4': dest['ipv4'], 'mac': dest['arp']} + + elif dest['type'] == 'ipv4_u': + return {'ipv4': dest['ipv4'], 'mac': None} + + else: + assert(0) + + + # port is considered resolved if it's dest is either MAC or resolved IPv4 + def is_resolved (self): + return (self.get_dst_addr()['mac'] is not None) + + # return True if the port is valid for resolve (has an IPv4 address as dest) + def is_resolvable (self): + return (self.get_dst_addr()['ipv4'] is not None) + + @writeable + def arp_resolve (self, retries): + return ARPResolver(self).resolve(retries) + + @writeable + def ping (self, ping_ipv4, pkt_size): + return PingResolver(self, ping_ipv4, pkt_size).resolve() + + ################# stats handler ###################### def generate_port_stats(self): return self.port_stats.generate_stats() def generate_port_status(self): - info = self.get_info() + info = self.get_formatted_info() - return {"driver": info['driver'], - "description": info.get('description', 'N/A')[:18], - "HW src mac": info['hw_macaddr'], - "SW src mac": info['src_macaddr'], - "SW dst mac": info['dst_macaddr'], - "PCI Address": info['pci_addr'], - "NUMA Node": info['numa'], + return {"driver": info['driver'], + "description": info.get('description', 'N/A')[:18], + "src MAC": info['src_mac'], + "src IPv4": info['src_ipv4'], + "Destination": info['dest'], + "ARP Resolution": info['arp'], + "PCI Address": info['pci_addr'], + "NUMA Node": info['numa'], "--": "", "---": "", - "link speed": "{speed} Gb/s".format(speed=info['speed']), + "----": "", + "-----": "", + "link speed": info['speed'], "port status": info['status'], "link status": info['link'], "promiscuous" : info['prom'], "flow ctrl" : info['fc'], + + "RX Filter Mode": info['rx_filter_mode'], + "RX Queueing": info['rx_queue'], + "RX sniffer": info['rx_sniffer'], + } def clear_stats(self): @@ -756,17 +977,54 @@ class Port(object): return {"streams" : OrderedDict(sorted(data.items())) } - + ######## attributes are a complex type (dict) that might be manipulated through the async thread ############# + + # get in a thread safe manner a duplication of attributes + def get_ts_attr (self): + with self.attr_lock: + return dict(self.__attr) + + # set in a thread safe manner a new dict of attributes + def set_ts_attr (self, new_attr): + with self.attr_lock: + self.__attr = new_attr + + ################# events handler ###################### def async_event_port_job_done (self): # until thread is locked - order is important self.tx_stopped_ts = datetime.now() self.state = self.STATE_STREAMS + self.last_factor_type = None - def async_event_port_attr_changed (self, attr): - self.info['speed'] = attr['speed'] // 1000 - self.attr = attr + def async_event_port_attr_changed (self, new_attr): + + # get a thread safe duplicate + cur_attr = self.get_ts_attr() + + # check if anything changed + if new_attr == cur_attr: + return None + + # generate before + before = self.get_formatted_info(sync = False) + + # update + self.set_ts_attr(new_attr) + + # generate after + after = self.get_formatted_info(sync = False) + + # return diff + diff = {} + for key, new_value in after.items(): + old_value = before.get(key, 'N/A') + if new_value != old_value: + diff[key] = (old_value, new_value) + + return diff + # rest of the events are used for TUI / read only sessions def async_event_port_stopped (self): @@ -792,3 +1050,4 @@ class Port(object): def async_event_released (self): self.owner = '' + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py new file mode 100644 index 00000000..3754e608 --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py @@ -0,0 +1,255 @@ + +from .trex_stl_streams import STLStream, STLTXSingleBurst +from .trex_stl_packet_builder_scapy import STLPktBuilder + +from scapy.layers.l2 import Ether, ARP +from scapy.layers.inet import IP, ICMP + +import time + +# a generic abstract class for resolving using the server +class Resolver(object): + def __init__ (self, port, queue_size = 100): + self.port = port + + # code to execute before sending any request - return RC object + def pre_send (self): + raise NotImplementedError() + + # return a list of streams for request + def generate_request (self): + raise NotImplementedError() + + # return None for more packets otherwise RC object + def on_pkt_rx (self, pkt): + raise NotImplementedError() + + # return value in case of timeout + def on_timeout_err (self, retries): + raise NotImplementedError() + + ##################### API ###################### + def resolve (self, retries = 0): + + # first cleanup + rc = self.port.remove_all_streams() + if not rc: + return rc + + # call the specific class implementation + rc = self.pre_send() + if not rc: + return rc + + # start the iteration + try: + + # add the stream(s) + self.port.add_streams(self.generate_request()) + + rc = self.port.set_attr(rx_filter_mode = 'all') + if not rc: + return rc + + rc = self.port.set_rx_queue(size = 100) + if not rc: + return rc + + return self.resolve_wrapper(retries) + + finally: + # best effort restore + self.port.set_attr(rx_filter_mode = 'hw') + self.port.remove_rx_queue() + self.port.remove_all_streams() + + + # main resolve function + def resolve_wrapper (self, retries): + + # retry for 'retries' + index = 0 + while True: + rc = self.resolve_iteration() + if rc is not None: + return rc + + if index >= retries: + return self.on_timeout_err(retries) + + index += 1 + time.sleep(0.1) + + + + def resolve_iteration (self): + + mult = {'op': 'abs', 'type' : 'percentage', 'value': 100} + rc = self.port.start(mul = mult, force = False, duration = -1, mask = 0xffffffff) + if not rc: + return rc + + # save the start timestamp + self.start_ts = rc.data()['ts'] + + # block until traffic finishes + while self.port.is_active(): + time.sleep(0.01) + + return self.wait_for_rx_response() + + + def wait_for_rx_response (self): + + # we try to fetch response for 5 times + polling = 5 + + while polling > 0: + + # fetch the queue + rx_pkts = self.port.get_rx_queue_pkts() + + # might be an error + if not rx_pkts: + return rx_pkts + + # for each packet - examine it + for pkt in rx_pkts.data(): + rc = self.on_pkt_rx(pkt) + if rc is not None: + return rc + + if polling == 0: + return None + + polling -= 1 + time.sleep(0.1) + + + + + +class ARPResolver(Resolver): + def __init__ (self, port_id): + super(ARPResolver, self).__init__(port_id) + + # before resolve + def pre_send (self): + self.dst = self.port.get_dst_addr() + self.src = self.port.get_src_addr() + + if self.dst['ipv4'] is None: + return self.port.err("Port has a non-IPv4 destination: '{0}'".format(self.dst['mac'])) + + if self.src['ipv4'] is None: + return self.port.err('Port must have an IPv4 source address configured') + + # invalidate the current ARP resolution (if exists) + return self.port.invalidate_arp() + + + # return a list of streams for request + def generate_request (self): + + base_pkt = Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(psrc = self.src['ipv4'], pdst = self.dst['ipv4'], hwsrc = self.src['mac']) + s1 = STLStream( packet = STLPktBuilder(pkt = base_pkt), mode = STLTXSingleBurst(total_pkts = 1) ) + + return [s1] + + + # return None in case more packets are needed else the status rc + def on_pkt_rx (self, pkt): + scapy_pkt = Ether(pkt['binary']) + if not 'ARP' in scapy_pkt: + return None + + arp = scapy_pkt['ARP'] + + # check this is the right ARP (ARP reply with the address) + if (arp.op != 2) or (arp.psrc != self.dst['ipv4']): + return None + + + rc = self.port.set_arp_resolution(arp.psrc, arp.hwsrc) + if not rc: + return rc + + return self.port.ok('Port {0} - Recieved ARP reply from: {1}, hw: {2}'.format(self.port.port_id, arp.psrc, arp.hwsrc)) + + + def on_timeout_err (self, retries): + return self.port.err('failed to receive ARP response ({0} retries)'.format(retries)) + + + + + #################### ping resolver #################### + +class PingResolver(Resolver): + def __init__ (self, port, ping_ip, pkt_size): + super(PingResolver, self).__init__(port) + self.ping_ip = ping_ip + self.pkt_size = pkt_size + + def pre_send (self): + + self.src = self.port.get_src_addr() + self.dst = self.port.get_dst_addr() + + if self.src['ipv4'] is None: + return self.port.err('Ping - port does not have an IPv4 address configured') + + if self.dst['mac'] is None: + return self.port.err('Ping - port has an unresolved destination, cannot determine next hop MAC address') + + if self.ping_ip == self.src['ipv4']: + return self.port.err('Ping - cannot ping own IP') + + return self.port.ok() + + + # return a list of streams for request + def generate_request (self): + + src = self.port.get_src_addr() + dst = self.port.get_dst_addr() + + base_pkt = Ether(dst = dst['mac'])/IP(src = src['ipv4'], dst = self.ping_ip)/ICMP(type = 8) + pad = max(0, self.pkt_size - len(base_pkt)) + + base_pkt = base_pkt / (pad * 'x') + + #base_pkt.show2() + s1 = STLStream( packet = STLPktBuilder(pkt = base_pkt), mode = STLTXSingleBurst(total_pkts = 1) ) + + return [s1] + + # return None for more packets otherwise RC object + def on_pkt_rx (self, pkt): + scapy_pkt = Ether(pkt['binary']) + if not 'ICMP' in scapy_pkt: + return None + + #scapy_pkt.show2() + ip = scapy_pkt['IP'] + + icmp = scapy_pkt['ICMP'] + + dt = pkt['ts'] - self.start_ts + + if icmp.type == 0: + # echo reply + return self.port.ok('Reply from {0}: bytes={1}, time={2:.2f}ms, TTL={3}'.format(ip.src, len(pkt['binary']), dt * 1000, ip.ttl)) + + # unreachable + elif icmp.type == 3: + return self.port.ok('Reply from {0}: Destination host unreachable'.format(icmp.src)) + else: + scapy_pkt.show2() + return self.port.err('unknown ICMP reply') + + + + # return the str of a timeout err + def on_timeout_err (self, retries): + return self.port.ok('Request timed out.') diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py index 9f601484..6a59126f 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py @@ -670,12 +670,18 @@ class CTRexInfoGenerator(object): ("promiscuous", []), ("flow ctrl", []), ("--", []), - ("HW src mac", []), - ("SW src mac", []), - ("SW dst mac", []), + ("src IPv4", []), + ("src MAC", []), ("---", []), + ("Destination", []), + ("ARP Resolution", []), + ("----", []), ("PCI Address", []), ("NUMA Node", []), + ("-----", []), + ("RX Filter Mode", []), + ("RX Queueing", []), + ("RX sniffer", []), ] ) @@ -1103,13 +1109,7 @@ class CPortStats(CTRexStats): port_state = format_text(port_state, 'bold') if self._port_obj: - if 'link' in self._port_obj.attr: - if self._port_obj.attr.get('link', {}).get('up') == False: - link_state = format_text('DOWN', 'red', 'bold') - else: - link_state = 'UP' - else: - link_state = 'N/A' + link_state = 'UP' if self._port_obj.is_up() else format_text('DOWN', 'red', 'bold') else: link_state = '' @@ -1130,7 +1130,7 @@ class CPortStats(CTRexStats): return {"owner": owner, "state": "{0}".format(port_state), 'link': link_state, - "speed": self._port_obj.get_formatted_speed() if self._port_obj else '', + "speed": "%g Gb/s" % self._port_obj.get_speed_gbps() if self._port_obj else '', "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True), format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' , "--": " ", diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py index aa6c4218..81015ddc 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py @@ -135,6 +135,12 @@ def validate_type(arg_name, arg, valid_types): else: raise STLError('validate_type: valid_types should be type or list or tuple of types') + +def validate_choice (arg_name, arg, choices): + if arg is not None and not arg in choices: + raise STLError("validate_choice: argument '{0}' can only be one of '{1}'".format(arg_name, choices)) + + # throws STLError if not exactly one argument is present def verify_exclusive_arg (args_list): if not (len(list(filter(lambda x: x is not None, args_list))) == 1): diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py index 72ee8972..cbbacb27 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py @@ -3,6 +3,8 @@ import sys import string import random import time +import socket +import re try: import pwd @@ -86,3 +88,23 @@ class PassiveTimer(object): return (time.time() > self.expr_sec) +def is_valid_ipv4 (addr): + try: + socket.inet_pton(socket.AF_INET, addr) + return True + except (socket.error, TypeError): + return False + +def is_valid_mac (mac): + return bool(re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac.lower())) + +def list_remove_dup (l): + tmp = list() + + for x in l: + if not x in tmp: + tmp.append(x) + + return tmp + + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py index 34cafd79..7ae22e89 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py @@ -1,6 +1,6 @@ import argparse from collections import namedtuple, OrderedDict -from .common import list_intersect, list_difference +from .common import list_intersect, list_difference, is_valid_ipv4, is_valid_mac, list_remove_dup from .text_opts import format_text from ..trex_stl_types import * from .constants import ON_OFF_DICT, UP_DOWN_DICT, FLOW_CTRL_DICT @@ -45,6 +45,21 @@ FLOW_CTRL = 28 SUPPORTED = 29 FILE_PATH_NO_CHECK = 30 +OUTPUT_FILENAME = 31 +ALL_FILES = 32 +LIMIT = 33 +PORT_RESTART = 34 + +IPV4 = 35 +DEST = 36 +RETRIES = 37 + +RX_FILTER_MODE = 38 +SOURCE_PORT = 39 +PING_IPV4 = 40 +PING_COUNT = 41 +PKT_SIZE = 42 + GLOBAL_STATS = 50 PORT_STATS = 51 PORT_STATUS = 52 @@ -218,8 +233,30 @@ def is_valid_file(filename): return filename +def check_ipv4_addr (ipv4_str): + if not is_valid_ipv4(ipv4_str): + raise argparse.ArgumentTypeError("invalid IPv4 address: '{0}'".format(ipv4_str)) + + return ipv4_str +def check_pkt_size (pkt_size): + try: + pkt_size = int(pkt_size) + except ValueError: + raise argparse.ArgumentTypeError("invalid packet size type: '{0}'".format(pkt_size)) + + if (pkt_size < 64) or (pkt_size > 9216): + raise argparse.ArgumentTypeError("invalid packet size: '{0}' - valid range is 64 to 9216".format(pkt_size)) + + return pkt_size + +def check_dest_addr (addr): + if not (is_valid_ipv4(addr) or is_valid_mac(addr)): + raise argparse.ArgumentTypeError("not a valid IPv4 or MAC address: '{0}'".format(addr)) + + return addr + def decode_tunables (tunable_str): tunables = {} @@ -304,6 +341,62 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'dest': 'flow_ctrl', 'choices': FLOW_CTRL_DICT}), + RX_FILTER_MODE: ArgumentPack(['--rxf'], + {'help': 'Set RX filtering mode', + 'dest': 'rx_filter_mode', + 'choices': ['hw', 'all']}), + + + IPV4: ArgumentPack(['--ipv4'], + {'help': 'IPv4 address(s) for the port(s)', + 'dest': 'ipv4', + 'nargs': '+', + 'default': None, + 'type': check_ipv4_addr}), + + DEST: ArgumentPack(['--dest'], + {'help': 'Destination address(s) for the port(s) in either IPv4 or MAC format', + 'dest': 'dest', + 'nargs': '+', + 'default': None, + 'type': check_dest_addr}), + + RETRIES: ArgumentPack(['-r', '--retries'], + {'help': 'retries count [default is zero]', + 'dest': 'retries', + 'default': 0, + 'type': int}), + + + OUTPUT_FILENAME: ArgumentPack(['-o', '--output'], + {'help': 'Output PCAP filename', + 'dest': 'output_filename', + 'default': None, + 'required': True, + 'type': str}), + + + PORT_RESTART: ArgumentPack(['-r', '--restart'], + {'help': 'hard restart port(s)', + 'dest': 'restart', + 'default': False, + 'action': 'store_true'}), + + + ALL_FILES: ArgumentPack(['--all'], + {'help': 'change RX port filter to fetch all packets', + 'dest': 'all', + 'default': False, + 'action': "store_true"}), + + + LIMIT: ArgumentPack(['-l', '--limit'], + {'help': 'Limit the packet count to be written to the file', + 'dest': 'limit', + 'default': 1000, + 'type': int}), + + SUPPORTED: ArgumentPack(['--supp'], {'help': 'Show which attributes are supported by current NICs', 'default': None, @@ -326,6 +419,32 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'help': "A list of ports on which to apply the command", 'default': []}), + + SOURCE_PORT: ArgumentPack(['--port', '-p'], + {'dest':'source_port', + 'type': int, + 'help': 'source port for the action', + 'required': True}), + + PING_IPV4: ArgumentPack(['-d'], + {'help': 'which IPv4 to ping', + 'dest': 'ping_ipv4', + 'required': True, + 'type': check_ipv4_addr}), + + PING_COUNT: ArgumentPack(['-n', '--count'], + {'help': 'How many times to ping [default is 5]', + 'dest': 'count', + 'default': 5, + 'type': int}), + + PKT_SIZE: ArgumentPack(['-s'], + {'dest':'pkt_size', + 'help': 'packet size to use', + 'default': 64, + 'type': check_pkt_size}), + + ALL_PORTS: ArgumentPack(['-a'], {"action": "store_true", "dest": "all_ports", @@ -461,6 +580,7 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], ALL_PORTS], {'required': False}), + STREAM_FROM_PATH_OR_FILE: ArgumentGroup(MUTEX, [FILE_PATH, FILE_FROM_DB], {'required': True}), @@ -531,6 +651,8 @@ class CCmdArgParser(argparse.ArgumentParser): else: opts.ports = default_ports + opts.ports = list_remove_dup(opts.ports) + # so maybe we have ports configured invalid_ports = list_difference(opts.ports, self.stateless_client.get_all_ports()) if invalid_ports: diff --git a/scripts/t-rex-64 b/scripts/t-rex-64 index 5515ba03..fc8318a5 100755 --- a/scripts/t-rex-64 +++ b/scripts/t-rex-64 @@ -26,7 +26,7 @@ cd $(dirname $0) export LD_LIBRARY_PATH=$PWD #Add dummy lib in case we don't find it, e.g. there is no OFED installed -if ldd _t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then +if ldd _$(basename $0) | grep "libibverbs.so" | grep -q "not found"; then export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs fi diff --git a/scripts/t-rex-64-valgrind b/scripts/t-rex-64-valgrind new file mode 100755 index 00000000..d11491ba --- /dev/null +++ b/scripts/t-rex-64-valgrind @@ -0,0 +1,69 @@ +#! /bin/bash +if [ "$(id -u)" != 0 ]; then + echo 'Error: Please run as root (sudo etc.)' + exit -1 +fi + +INPUT_ARGS=${@//[]/-} # replace bizarre minuses with normal one + +./trex-cfg $INPUT_ARGS +RESULT=$? +if [ $RESULT -ne 0 ]; then + exit $RESULT +fi + +pci_desc_re='^(\S+) - (.+)$' +source find_python.sh +while read line +do + if [[ "$line" =~ $pci_desc_re ]]; then + pci_name="pci$(echo ${BASH_REMATCH[1]} | tr ':' '_' | tr '.' '_')" # make alphanumeric name + export $pci_name="${BASH_REMATCH[2]}" + fi +done <<< "$($PYTHON dpdk_setup_ports.py --dump-pci-description)" + +cd $(dirname $0) +export LD_LIBRARY_PATH=$PWD + +#Add dummy lib in case we don't find it, e.g. there is no OFED installed +if ldd ./_t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then +export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs +fi + +export VALGRIND_LIB=/auto/proj-pcube-b/apps/PL-b/tools/valgrind-dpdk/lib/valgrind +export VALGRIND_BIN="/auto/proj-pcube-b/apps/PL-b/tools/valgrind-dpdk/bin/valgrind --leak-check=full --error-exitcode=1 --suppressions=valgrind.sup" +export GLIBCXX_FORCE_NEW=1 + +if [ -t 0 ] && [ -t 1 ]; then + export is_tty=true + saveterm="$(stty -g)" +else + export is_tty=false +fi + +# if we have a new core run optimized trex +if grep -q avx /proc/cpuinfo ; then + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? + if [ $RESULT -eq 132 ]; then + echo " WARNING this program is optimized for the new Intel processors. " + echo " try the ./t-rex-64-o application that should work for any Intel processor but might be slower. " + echo " try to run t-rex-64-o .. " + + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? + fi +else + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? +fi + +if $is_tty; then + stty $saveterm +fi + +if [ $RESULT -ne 0 ]; then + exit $RESULT +fi + + diff --git a/scripts/trex_show_threads.py b/scripts/trex_show_threads.py index fabe6d68..1824d073 100755 --- a/scripts/trex_show_threads.py +++ b/scripts/trex_show_threads.py @@ -58,8 +58,8 @@ def isnum (x): def find_trex_pid (): procs = [x for x in os.listdir('/proc/') if isnum(x)] for proc in procs: - cmd = open('/proc/{0}/{1}'.format(proc, 'cmdline')).readline() - if '_t-rex' in cmd: + cmd = open('/proc/{0}/{1}'.format(proc, 'comm')).readline() + if cmd.startswith('_t-rex-64'): return proc return None diff --git a/scripts/valgrind.sup b/scripts/valgrind.sup new file mode 100644 index 00000000..b6bcc883 --- /dev/null +++ b/scripts/valgrind.sup @@ -0,0 +1,57 @@ +{ + DL issue + Memcheck:Cond + fun:index + fun:expand_dynamic_string_token + fun:fillin_rpath + fun:_dl_init_paths + fun:dl_main + fun:_dl_sysdep_start + fun:_dl_start_final + fun:_dl_start + obj:/lib/x86_64-linux-gnu/ld-2.19.so + obj:* + obj:* + obj:* + obj:* +} + +{ + DPDK threads + Memcheck:Leak + match-leak-kinds: possible + fun:calloc + fun:allocate_dtv + fun:_dl_allocate_tls + fun:allocate_stack + fun:pthread_create@@GLIBC_2.2.5 + fun:rte_eal_init + fun:_Z9main_testiPPc + fun:(below main) +} + +{ + DPDK interrupt thread + Memcheck:Leak + match-leak-kinds: possible + fun:calloc + fun:allocate_dtv + fun:_dl_allocate_tls + fun:allocate_stack + fun:pthread_create@@GLIBC_2.2.5 + fun:rte_eal_intr_init + fun:rte_eal_init + fun:_Z9main_testiPPc + fun:(below main) +} + +{ + DPDK epoll ctl + Memcheck:Param + epoll_ctl(event) + fun:epoll_ctl + fun:eal_intr_thread_main + fun:start_thread + fun:clone +} + |