diff options
Diffstat (limited to 'scripts')
81 files changed, 4268 insertions, 751 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py index 0017e7db..606235a6 100755 --- a/scripts/automation/regression/CPlatform.py +++ b/scripts/automation/regression/CPlatform.py @@ -499,6 +499,8 @@ class CPlatform(object): client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' ) + client_net_next_hop_v4 = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() ) + server_net_next_hop_v4 = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() ) client_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str)) @@ -526,12 +528,23 @@ class CPlatform(object): next_hop = server_net_next_hop, intf = dual_if.client_if.get_name(), dest_mac = dual_if.client_if.get_ipv6_dest_mac())) + # For latency packets (which are IPv4), we need to configure also static ARP + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = server_net_next_hop_v4, + dest_mac = dual_if.client_if.get_ipv6_dest_mac())) + if dual_if.server_if.get_ipv6_dest_mac(): conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format( mode = unconfig_str, - next_hop = client_net_next_hop, + next_hop = client_net_next_hop, intf = dual_if.server_if.get_name(), dest_mac = dual_if.server_if.get_ipv6_dest_mac())) + # For latency packets (which are IPv4), we need to configure also static ARP + conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format( + mode = unconfig_str, + next_hop = client_net_next_hop_v4, + dest_mac = dual_if.server_if.get_ipv6_dest_mac())) conf_t_command_set.append('{mode}route-map {pre}_{p1}_to_{p2} permit 10'.format( mode = unconfig_str, diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py index c7c61ea6..130e0545 100755 --- a/scripts/automation/regression/aggregate_results.py +++ b/scripts/automation/regression/aggregate_results.py @@ -8,10 +8,8 @@ import sys, os from collections import OrderedDict import copy import datetime, time -try: - import cPickle as pickle -except: - import pickle +import traceback +import yaml import subprocess, shlex from ansi2html import Ansi2HTMLConverter @@ -25,6 +23,15 @@ FUNCTIONAL_CATEGORY = 'Functional' # how to display those categories ERROR_CATEGORY = 'Error' +def try_write(file, text): + try: + file.write(text) + except: + try: + file.write(text.encode('utf-8')) + except: + file.write(text.decode('utf-8')) + def pad_tag(text, tag): return '<%s>%s</%s>' % (tag, text, tag) @@ -256,6 +263,7 @@ if __name__ == '__main__': build_url = os.environ.get('BUILD_URL') build_id = os.environ.get('BUILD_ID') trex_repo = os.environ.get('TREX_CORE_REPO') + last_commit_info_file = os.environ.get('LAST_COMMIT_INFO') python_ver = os.environ.get('PYTHON_VER') if not scenario: print('Warning: no environment variable SCENARIO, using default') @@ -283,19 +291,25 @@ if __name__ == '__main__': trex_last_commit_info = '' trex_last_commit_hash = trex_info_dict.get('Git SHA') - if trex_last_commit_hash and trex_repo: + if last_commit_info_file and os.path.exists(last_commit_info_file): + with open(last_commit_info_file) as f: + trex_last_commit_info = f.read().strip().replace('\n', '<br>\n') + elif trex_last_commit_hash and trex_repo: try: - print('Getting TRex commit with hash %s' % trex_last_commit_hash) - command = 'git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash) + command = 'git show %s -s' % trex_last_commit_hash print('Executing: %s' % command) - proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (trex_last_commit_info, stderr) = proc.communicate() - print('Stdout:\n\t' + trex_last_commit_info.replace('\n', '\n\t')) - print('Stderr:', stderr) - print('Return code:', proc.returncode) - trex_last_commit_info = trex_last_commit_info.replace('\n', '<br>') + proc = subprocess.Popen(shlex.split(command), stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd = trex_repo) + (stdout, stderr) = proc.communicate() + stdout = stdout.decode('utf-8', errors = 'replace') + print('Stdout:\n\t' + stdout.replace('\n', '\n\t')) + if stderr or proc.returncode: + print('Return code: %s' % proc.returncode) + trex_last_commit_info = stdout.replace('\n', '<br>\n') except Exception as e: + traceback.print_exc() print('Error getting last commit: %s' % e) + else: + print('Could not find info about commit!') ##### get xmls: report_<setup name>.xml @@ -520,7 +534,7 @@ if __name__ == '__main__': # save html with open(args.output_htmlfile, 'w') as f: print('Writing output file: %s' % args.output_htmlfile) - f.write(html_output) + try_write(f, html_output) html_output = None # mail report (only error tests, expanded) @@ -596,7 +610,7 @@ if __name__ == '__main__': else: mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True) else: - mail_output += '<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n' + mail_output += u'<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n' mail_output += '\n</body>\n</html>' ##### save outputs @@ -605,17 +619,17 @@ if __name__ == '__main__': # mail content with open(args.output_mailfile, 'w') as f: print('Writing output file: %s' % args.output_mailfile) - f.write(mail_output) + try_write(f, mail_output) # build status category_dict_status = {} if os.path.exists(args.build_status_file): print('Reading: %s' % args.build_status_file) - with open(args.build_status_file, 'rb') as f: + with open(args.build_status_file, 'r') as f: try: - category_dict_status = pickle.load(f) + category_dict_status = yaml.safe_load(f.read()) except Exception as e: - print('Error during pickle load: %s' % e) + print('Error during YAML load: %s' % e) if type(category_dict_status) is not dict: print('%s is corrupt, truncating' % args.build_status_file) category_dict_status = {} @@ -635,15 +649,15 @@ if __name__ == '__main__': current_status = 'Fixed' category_dict_status[scenario] = current_status - with open(args.build_status_file, 'wb') as f: + with open(args.build_status_file, 'w') as f: print('Writing output file: %s' % args.build_status_file) - pickle.dump(category_dict_status, f) + yaml.dump(category_dict_status, f) # last successful commit - if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and jobs_list > 0 and scenario == 'nightly': + if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and len(jobs_list) > 0 and scenario == 'nightly': with open(args.last_passed_commit, 'w') as f: print('Writing output file: %s' % args.last_passed_commit) - f.write(trex_last_commit_hash) + try_write(f, trex_last_commit_hash) # mail title mailtitle_output = scenario.capitalize() @@ -653,7 +667,8 @@ if __name__ == '__main__': with open(args.output_titlefile, 'w') as f: print('Writing output file: %s' % args.output_titlefile) - f.write(mailtitle_output) + try_write(f, mailtitle_output) # exit + print('Status: %s' % current_status) sys.exit(exit_status) diff --git a/scripts/automation/regression/cfg/client_cfg.yaml b/scripts/automation/regression/cfg/client_cfg.yaml new file mode 100644 index 00000000..5c3d19ef --- /dev/null +++ b/scripts/automation/regression/cfg/client_cfg.yaml @@ -0,0 +1,48 @@ +#vlan: true +vlan: false + +groups: + +- ip_start : 16.0.0.1 + ip_end : 16.0.1.255 + initiator : + next_hop: 1.1.1.1 + src_ip : 1.1.1.2 + responder : + next_hop: 1.1.2.1 + src_ip : 1.1.2.2 + + count : 1 + +- ip_start : 17.0.0.1 + ip_end : 17.0.1.255 + initiator : + next_hop: 1.1.3.1 + src_ip : 1.1.3.2 + responder : + next_hop: 1.1.4.1 + src_ip : 1.1.4.2 + + count : 1 + +- ip_start : 18.0.0.1 + ip_end : 18.0.1.255 + initiator : + next_hop: 1.1.5.1 + src_ip : 1.1.5.2 + responder : + next_hop: 1.1.6.1 + src_ip : 1.1.6.2 + + count : 1 + +- ip_start : 19.0.0.1 + ip_end : 19.0.1.255 + initiator : + next_hop: 1.1.7.1 + src_ip : 1.1.7.2 + responder : + next_hop: 1.1.8.1 + src_ip : 1.1.8.2 + + count : 1 diff --git a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py index 5ff6b318..66cb666c 100755 --- a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py +++ b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py @@ -25,7 +25,7 @@ def compare_lines(golden, output): raise CompareLinesNumDiff('Number of lines on golden is: %s, in output: %s\nGolden:\n%s\nGenerated:\n%s\n' % (len(golden_lines), len(output_lines), golden, output)) for line_num, (golden_line, output_line) in enumerate(zip(golden_lines, output_lines)): if golden_line != output_line: - raise CompareLinesDiff('Produced YAML differs from golden at line %s.Golden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line)) + raise CompareLinesDiff('Produced YAML differs from golden at line %s.\nGolden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line)) def create_config(cpu_topology, interfaces, *args, **kwargs): config = ConfigCreator(cpu_topology, interfaces, *args, **kwargs) @@ -112,7 +112,7 @@ class TRexCfgCreator_Test: latency_thread_id: 1 dual_if: - socket: 0 - threads: [2] + threads: [2,3,4] ''' output = create_config(cpu_topology, interfaces) verify_master_core0(output) @@ -308,16 +308,16 @@ class TRexCfgCreator_Test: platform: master_thread_id: 0 - latency_thread_id: 16 + latency_thread_id: 12 dual_if: - socket: 0 - threads: [1,17,2,18,3,19,4] + threads: [1,2,3,16,17,18,19] - socket: 1 - threads: [8,24,9,25,10,26,11] + threads: [8,9,10,11,24,25,26] - socket: 0 - threads: [20,5,21,6,22,7,23] + threads: [4,5,6,7,20,21,22] ''' output = create_config(cpu_topology, interfaces) verify_master_core0(output) @@ -446,10 +446,10 @@ class TRexCfgCreator_Test: latency_thread_id: 31 dual_if: - socket: 0 - threads: [1,17,2,18,3,19,4,20,5,21,6,22,7,23,16] + threads: [1,2,3,4,5,6,7,16,17,18,19,20,21,22,23] - socket: 1 - threads: [8,24,9,25,10,26,11,27,12,28,13,29,14,30,15] + threads: [8,9,10,11,12,13,14,15,24,25,26,27,28,29,30] ''' output = create_config(cpu_topology, interfaces) verify_master_core0(output) @@ -575,13 +575,13 @@ class TRexCfgCreator_Test: platform: master_thread_id: 0 - latency_thread_id: 16 + latency_thread_id: 8 dual_if: - socket: 0 - threads: [1,17,2,18,3,19,4] + threads: [1,2,3,16,17,18,19] - socket: 0 - threads: [20,5,21,6,22,7,23] + threads: [4,5,6,7,20,21,22] ''' output = create_config(cpu_topology, interfaces) verify_master_core0(output) @@ -694,5 +694,6 @@ class TRexCfgCreator_Test: @classmethod def tearDownClass(cls): - sys.path.remove(CTRexScenario.scripts_path) + if CTRexScenario.scripts_path in sys.path: + sys.path.remove(CTRexScenario.scripts_path) del sys.modules['dpdk_setup_ports'] diff --git a/scripts/automation/regression/setups/trex15/benchmark.yaml b/scripts/automation/regression/setups/trex03/benchmark.yaml index b366b3fb..b366b3fb 100644 --- a/scripts/automation/regression/setups/trex15/benchmark.yaml +++ b/scripts/automation/regression/setups/trex03/benchmark.yaml diff --git a/scripts/automation/regression/setups/trex15/config.yaml b/scripts/automation/regression/setups/trex03/config.yaml index c5fc3b22..17c4c91e 100644 --- a/scripts/automation/regression/setups/trex15/config.yaml +++ b/scripts/automation/regression/setups/trex03/config.yaml @@ -34,6 +34,6 @@ # expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test trex: - hostname : csi-trex-15 + hostname : csi-trex-03 cores : 1 modes : [loopback, virt_nics, VM] diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex06/benchmark.yaml index 8bc9d29c..2f51a3fd 100644 --- a/scripts/automation/regression/setups/trex17/benchmark.yaml +++ b/scripts/automation/regression/setups/trex06/benchmark.yaml @@ -140,7 +140,7 @@ test_CPU_benchmark: bw_per_core : 1 - name : stl/pcap.py - kwargs : {ipg_usec: 2, loop_count: 0} + kwargs : {ipg_usec: 3, loop_count: 0} cpu_util : 1 bw_per_core : 1 diff --git a/scripts/automation/regression/setups/trex17/config.yaml b/scripts/automation/regression/setups/trex06/config.yaml index 7ad6a20a..da0eb2dd 100644 --- a/scripts/automation/regression/setups/trex17/config.yaml +++ b/scripts/automation/regression/setups/trex06/config.yaml @@ -34,6 +34,6 @@ # expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test trex: - hostname : csi-trex-17 + hostname : csi-trex-06 cores : 1 modes : [loopback, virt_nics, VM] diff --git a/scripts/automation/regression/setups/trex07/backup/benchmark.yaml b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml new file mode 100644 index 00000000..0dc340b0 --- /dev/null +++ b/scripts/automation/regression/setups/trex07/backup/benchmark.yaml @@ -0,0 +1,244 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +#### common templates ### + +stat_route_dict: &stat_route_dict + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + +nat_dict: &nat_dict + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + + +### stateful ### + +test_jumbo: + multiplier : 17 + cores : 1 + bw_per_core : 543.232 + + +test_routing_imix: + multiplier : 10 + cores : 1 + bw_per_core : 34.128 + + +test_routing_imix_64: + multiplier : 430 + cores : 1 + bw_per_core : 5.893 + + +test_static_routing_imix: &test_static_routing_imix + stat_route_dict : *stat_route_dict + multiplier : 8 + cores : 1 + bw_per_core : 34.339 + +test_static_routing_imix_asymmetric: *test_static_routing_imix + + +test_ipv6_simple: + multiplier : 9 + cores : 2 + bw_per_core : 19.064 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 6000 + cores : 1 + nat_opened : 500000 + allow_timeout_dev : True + bw_per_core : 44.445 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_simple_mode3: *test_nat_simple + +test_nat_learning: *test_nat_simple + + +test_nbar_simple: + multiplier : 7.5 + cores : 2 + bw_per_core : 17.174 + nbar_classification: + rtp : 32.57 + http : 30.25 + oracle_sqlnet : 11.23 + exchange : 10.80 + citrix : 5.62 + rtsp : 2.84 + dns : 1.95 + smtp : 0.57 + pop3 : 0.36 + ssl : 0.17 + sctp : 0.13 + sip : 0.09 + unknown : 3.41 + + +test_rx_check_http: &rx_http + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 39.560 + +test_rx_check_http_ipv6: + << : *rx_http + bw_per_core : 49.237 + +test_rx_check_http_negative_disabled: + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + + +test_rx_check_sfr: &rx_sfr + multiplier : 10 + cores : 3 + rx_sample_rate : 16 + bw_per_core : 16.082 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 19.198 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex07/backup/config.yaml b/scripts/automation/regression/setups/trex07/backup/config.yaml new file mode 100644 index 00000000..db6e9bf8 --- /dev/null +++ b/scripts/automation/regression/setups/trex07/backup/config.yaml @@ -0,0 +1,66 @@ +################################################################ +#### TRex nightly test configuration file #### +################################################################ + + +### TRex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-07 + cores : 4 + +router: + model : ASR1001x + hostname : csi-asr-01 + ip_address : 10.56.216.120 + image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0 + clean_config : clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : Te0/0/0 + src_mac_addr : 0000.0001.0002 + dest_mac_addr : 0000.0001.0001 + server : + name : Te0/0/1 + src_mac_addr : 0000.0002.0002 + dest_mac_addr : 0000.0002.0001 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.217.7 + root_dir : /scratch/tftp/ + images_path : /asr1001x/ diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml index 0dc340b0..6e861836 100644 --- a/scripts/automation/regression/setups/trex07/benchmark.yaml +++ b/scripts/automation/regression/setups/trex07/benchmark.yaml @@ -4,120 +4,57 @@ #### common templates ### -stat_route_dict: &stat_route_dict - clients_start : 16.0.0.1 - servers_start : 48.0.0.1 - dual_port_mask : 1.0.0.0 - client_destination_mask : 255.0.0.0 - server_destination_mask : 255.0.0.0 - -nat_dict: &nat_dict - clients_net_start : 16.0.0.0 - client_acl_wildcard_mask : 0.0.0.255 - dual_port_mask : 1.0.0.0 - pool_start : 200.0.0.0 - pool_netmask : 255.255.255.0 - - -### stateful ### - test_jumbo: - multiplier : 17 - cores : 1 - bw_per_core : 543.232 + multiplier : 120 + cores : 2 + bw_per_core : 962.464 test_routing_imix: - multiplier : 10 - cores : 1 - bw_per_core : 34.128 + multiplier : 60 + cores : 4 + bw_per_core : 48.130 test_routing_imix_64: - multiplier : 430 - cores : 1 - bw_per_core : 5.893 - + multiplier : 4000 + cores : 7 + bw_per_core : 12.699 -test_static_routing_imix: &test_static_routing_imix - stat_route_dict : *stat_route_dict - multiplier : 8 - cores : 1 - bw_per_core : 34.339 -test_static_routing_imix_asymmetric: *test_static_routing_imix +test_static_routing_imix_asymmetric: + multiplier : 50 + cores : 3 + bw_per_core : 50.561 test_ipv6_simple: - multiplier : 9 - cores : 2 - bw_per_core : 19.064 - - -test_nat_simple_mode1: &test_nat_simple - stat_route_dict : *stat_route_dict - nat_dict : *nat_dict - multiplier : 6000 - cores : 1 - nat_opened : 500000 - allow_timeout_dev : True - bw_per_core : 44.445 - -test_nat_simple_mode2: *test_nat_simple - -test_nat_simple_mode3: *test_nat_simple - -test_nat_learning: *test_nat_simple - - -test_nbar_simple: - multiplier : 7.5 - cores : 2 - bw_per_core : 17.174 - nbar_classification: - rtp : 32.57 - http : 30.25 - oracle_sqlnet : 11.23 - exchange : 10.80 - citrix : 5.62 - rtsp : 2.84 - dns : 1.95 - smtp : 0.57 - pop3 : 0.36 - ssl : 0.17 - sctp : 0.13 - sip : 0.09 - unknown : 3.41 + multiplier : 50 + cores : 7 + bw_per_core : 19.5 test_rx_check_http: &rx_http - multiplier : 15000 - cores : 1 - rx_sample_rate : 16 - bw_per_core : 39.560 + multiplier : 99000 + cores : 7 + rx_sample_rate : 128 + bw_per_core : 49.464 test_rx_check_http_ipv6: << : *rx_http bw_per_core : 49.237 -test_rx_check_http_negative_disabled: - << : *rx_http - stat_route_dict : *stat_route_dict - nat_dict : *nat_dict - - test_rx_check_sfr: &rx_sfr - multiplier : 10 - cores : 3 - rx_sample_rate : 16 - bw_per_core : 16.082 + multiplier : 35 + cores : 7 + rx_sample_rate : 128 + bw_per_core : 20.871 test_rx_check_sfr_ipv6: << : *rx_sfr bw_per_core : 19.198 - ### stateless ### test_CPU_benchmark: @@ -178,10 +115,10 @@ test_CPU_benchmark: cpu_util : 1 bw_per_core : 1 - - name : stl/udp_for_benchmarks.py - kwargs : {packet_len: 9000, stream_count: 100} - cpu_util : 1 - bw_per_core : 1 + #- name : stl/udp_for_benchmarks.py + # kwargs : {packet_len: 9000, stream_count: 100} + # cpu_util : 1 + # bw_per_core : 1 # not enough memory + queue full if memory increase # - name : stl/udp_for_benchmarks.py @@ -241,4 +178,56 @@ test_CPU_benchmark: cpu_util : 1 bw_per_core : 1 - +test_performance_vm_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 9.6 + max: 13.3 + + +test_performance_vm_single_cpu_cached: + cfg: + mult : "10%" + mpps_per_core_golden : + min: 16.0 + max: 25.0 + + + +test_performance_syn_attack_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 9.0 + max: 14.0 + +test_performance_vm_multi_cpus: + cfg: + core_count : 7 + mult : "90%" + mpps_per_core_golden : + min: 8.5 + max: 12.0 + + +test_performance_vm_multi_cpus_cached: + cfg: + core_count : 7 + mult : "35%" + mpps_per_core_golden : + min: 9.0 + max: 15.0 + +test_performance_syn_attack_multi_cpus: + cfg: + core_count : 7 + mult : "90%" + mpps_per_core_golden : + min: 8.0 + max: 16.0 + + +test_all_profiles : + mult : "5%" + diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml index db6e9bf8..10472c4f 100644 --- a/scripts/automation/regression/setups/trex07/config.yaml +++ b/scripts/automation/regression/setups/trex07/config.yaml @@ -35,32 +35,7 @@ trex: hostname : csi-trex-07 - cores : 4 + cores : 8 + modes : ['loopback'] -router: - model : ASR1001x - hostname : csi-asr-01 - ip_address : 10.56.216.120 - image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin - line_password : cisco - en_password : cisco - mgmt_interface : GigabitEthernet0 - clean_config : clean_config.cfg - intf_masking : 255.255.255.0 - ipv6_mask : 64 - interfaces : - - client : - name : Te0/0/0 - src_mac_addr : 0000.0001.0002 - dest_mac_addr : 0000.0001.0001 - server : - name : Te0/0/1 - src_mac_addr : 0000.0002.0002 - dest_mac_addr : 0000.0002.0001 - vrf_name : null -tftp: - hostname : ats-asr-srv-1 - ip_address : 10.56.217.7 - root_dir : /scratch/tftp/ - images_path : /asr1001x/ diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml index 8f83e8f9..935b3e55 100644 --- a/scripts/automation/regression/setups/trex08/benchmark.yaml +++ b/scripts/automation/regression/setups/trex08/benchmark.yaml @@ -179,3 +179,53 @@ test_CPU_benchmark: bw_per_core : 1 +test_performance_vm_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 15.1 + max: 20.3 + + +test_performance_vm_single_cpu_cached: + cfg: + mult : "10%" + mpps_per_core_golden : + min: 29.1 + max: 32.0 + + + +test_performance_syn_attack_single_cpu: + cfg: + mult : "90%" + mpps_per_core_golden : + min: 13.2 + max: 15.0 + +test_performance_vm_multi_cpus: + cfg: + core_count : 7 + mult : "40%" + mpps_per_core_golden : + min: 15.0 + max: 20.0 + + +test_performance_vm_multi_cpus_cached: + cfg: + core_count : 7 + mult : "40%" + mpps_per_core_golden : + min: 29.0 + max: 34.0 + +test_performance_syn_attack_multi_cpus: + cfg: + core_count : 7 + mult : "40%" + mpps_per_core_golden : + min: 13.0 + max: 17.0 + + diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml index d1f5f56c..50f08351 100644 --- a/scripts/automation/regression/setups/trex09/benchmark.yaml +++ b/scripts/automation/regression/setups/trex09/benchmark.yaml @@ -187,7 +187,7 @@ test_performance_vm_single_cpu: cfg: mult : "90%" mpps_per_core_golden : - min: 16.2 + min: 13.0 max: 17.3 @@ -195,7 +195,7 @@ test_performance_vm_single_cpu_cached: cfg: mult : "90%" mpps_per_core_golden : - min: 29.5 + min: 28.5 max: 31.2 @@ -221,7 +221,7 @@ test_performance_vm_multi_cpus_cached: core_count : 2 mult : "90%" mpps_per_core_golden : - min: 28.8 + min: 26.8 max: 29.5 test_performance_syn_attack_multi_cpus: diff --git a/scripts/automation/regression/setups/trex11/backup/benchmark.yaml b/scripts/automation/regression/setups/trex11/backup/benchmark.yaml new file mode 100644 index 00000000..b366b3fb --- /dev/null +++ b/scripts/automation/regression/setups/trex11/backup/benchmark.yaml @@ -0,0 +1,155 @@ +################################################################ +#### TRex benchmark configuration file #### +################################################################ + +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 106.652 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 11.577 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 2.030 + + +test_static_routing_imix_asymmetric: + multiplier : 0.8 + cores : 1 + bw_per_core : 13.742 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 4, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex11/backup/config.yaml b/scripts/automation/regression/setups/trex11/backup/config.yaml new file mode 100644 index 00000000..782b7542 --- /dev/null +++ b/scripts/automation/regression/setups/trex11/backup/config.yaml @@ -0,0 +1,38 @@ +################################################################ +#### TRex nightly test configuration file #### +################################################################ + + +### TRex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback, virtual etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * virtual - virtual OS (accept low CPU utilization in tests) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-11 + cores : 1 + modes : ['loopback', 'VM', 'virt_nics'] diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml index b366b3fb..5ebcdd55 100644 --- a/scripts/automation/regression/setups/trex11/benchmark.yaml +++ b/scripts/automation/regression/setups/trex11/benchmark.yaml @@ -1,32 +1,58 @@ -################################################################ +############################################################### #### TRex benchmark configuration file #### -################################################################ +############################################################### -### stateful ### +#### common templates ### -test_jumbo: - multiplier : 2.8 - cores : 1 - bw_per_core : 106.652 +#test_jumbo: +# multiplier : 2.8 +# cores : 1 +# bw_per_core : 962.464 test_routing_imix: multiplier : 0.5 cores : 1 - bw_per_core : 11.577 + bw_per_core : 48.130 test_routing_imix_64: multiplier : 28 cores : 1 - bw_per_core : 2.030 + bw_per_core : 12.699 test_static_routing_imix_asymmetric: - multiplier : 0.8 + multiplier : 0.5 + cores : 1 + bw_per_core : 50.561 + + +test_ipv6_simple: + multiplier : 0.5 + cores : 1 + bw_per_core : 19.5 + + +test_rx_check_http: &rx_http + multiplier : 1000 + cores : 1 + rx_sample_rate : 128 + bw_per_core : 49.464 + +test_rx_check_http_ipv6: + << : *rx_http + bw_per_core : 49.237 + +test_rx_check_sfr: &rx_sfr + multiplier : 8 cores : 1 - bw_per_core : 13.742 + rx_sample_rate : 128 + bw_per_core : 20.9 +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 23.9 ### stateless ### @@ -140,16 +166,21 @@ test_CPU_benchmark: bw_per_core : 1 - name : stl/pcap.py - kwargs : {ipg_usec: 4, loop_count: 0} + kwargs : {ipg_usec: 2, loop_count: 0} cpu_util : 1 bw_per_core : 1 - - name : stl/udp_rand_len_9k.py - cpu_util : 1 - bw_per_core : 1 + #- name : stl/udp_rand_len_9k.py + # cpu_util : 1 + # bw_per_core : 1 - - name : stl/hlt/hlt_udp_rand_len_9k.py - cpu_util : 1 - bw_per_core : 1 + #- name : stl/hlt/hlt_udp_rand_len_9k.py + # cpu_util : 1 + # bw_per_core : 1 + +test_all_profiles : + mult : "5%" + skip : ['udp_rand_len_9k.py','udp_inc_len_9k.py'] # due to VIC 9K defect trex-282 + diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml index 782b7542..393c8749 100644 --- a/scripts/automation/regression/setups/trex11/config.yaml +++ b/scripts/automation/regression/setups/trex11/config.yaml @@ -4,15 +4,16 @@ ### TRex configuration: -# hostname - can be DNS name or IP for the TRex machine for ssh to the box -# password - root password for TRex machine -# is_dual - should the TRex inject with -p ? -# version_path - path to the TRex version and executable -# cores - how many cores should be used -# latency - rate of latency packets injected by the TRex -# modes - list of modes (tagging) of this setup (loopback, virtual etc.) -# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. -# * virtual - virtual OS (accept low CPU utilization in tests) +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) ### Router configuration: # hostname - the router hostname as apears in ______# cli prefix @@ -35,4 +36,6 @@ trex: hostname : csi-trex-11 cores : 1 - modes : ['loopback', 'VM', 'virt_nics'] + modes : ['loopback'] + + diff --git a/scripts/automation/regression/setups/trex14/BU/benchmark.yaml b/scripts/automation/regression/setups/trex14/BU/benchmark.yaml new file mode 100644 index 00000000..04f13e79 --- /dev/null +++ b/scripts/automation/regression/setups/trex14/BU/benchmark.yaml @@ -0,0 +1,245 @@ +############################################################### +#### TRex benchmark configuration file #### +############################################################### + +#### common templates ### + +stat_route_dict: &stat_route_dict + clients_start : 16.0.0.1 + servers_start : 48.0.0.1 + dual_port_mask : 1.0.0.0 + client_destination_mask : 255.0.0.0 + server_destination_mask : 255.0.0.0 + +nat_dict: &nat_dict + clients_net_start : 16.0.0.0 + client_acl_wildcard_mask : 0.0.0.255 + dual_port_mask : 1.0.0.0 + pool_start : 200.0.0.0 + pool_netmask : 255.255.255.0 + + +### stateful ### + +test_jumbo: + multiplier : 17 + cores : 1 + bw_per_core : 543.232 + + +test_routing_imix: + multiplier : 10 + cores : 1 + bw_per_core : 34.128 + + +test_routing_imix_64: + multiplier : 430 + cores : 1 + bw_per_core : 5.893 + + +test_static_routing_imix: &test_static_routing_imix + stat_route_dict : *stat_route_dict + multiplier : 8 + cores : 1 + bw_per_core : 34.339 + +test_static_routing_imix_asymmetric: *test_static_routing_imix + + +test_ipv6_simple: + multiplier : 9 + cores : 2 + bw_per_core : 19.064 + + +test_nat_simple_mode1: &test_nat_simple + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + multiplier : 6000 + cores : 1 + nat_opened : 500000 + allow_timeout_dev : True + bw_per_core : 44.445 + +test_nat_simple_mode2: *test_nat_simple + +test_nat_simple_mode3: *test_nat_simple + +test_nat_learning: *test_nat_simple + + +test_nbar_simple: + multiplier : 7.5 + cores : 2 + bw_per_core : 17.174 + nbar_classification: + http : 32.58 + rtp-audio : 21.21 + oracle_sqlnet : 11.41 + exchange : 11.22 + rtp : 11.2 + citrix : 5.65 + rtsp : 2.87 + dns : 1.96 + smtp : 0.57 + pop3 : 0.37 + ssl : 0.28 + sctp : 0.13 + sip : 0.09 + unknown : 0.45 + + +test_rx_check_http: &rx_http + multiplier : 15000 + cores : 1 + rx_sample_rate : 16 + bw_per_core : 39.560 + +test_rx_check_http_ipv6: + << : *rx_http + bw_per_core : 49.237 + +test_rx_check_http_negative_disabled: + << : *rx_http + stat_route_dict : *stat_route_dict + nat_dict : *nat_dict + + +test_rx_check_sfr: &rx_sfr + multiplier : 10 + cores : 3 + rx_sample_rate : 16 + bw_per_core : 16.082 + +test_rx_check_sfr_ipv6: + << : *rx_sfr + bw_per_core : 19.198 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 2, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex14/BU/config.yaml b/scripts/automation/regression/setups/trex14/BU/config.yaml new file mode 100644 index 00000000..0fd6b70e --- /dev/null +++ b/scripts/automation/regression/setups/trex14/BU/config.yaml @@ -0,0 +1,67 @@ +################################################################ +#### TRex nightly test configuration file #### +################################################################ + + +### TRex configuration: +# hostname - can be DNS name or IP for the TRex machine for ssh to the box +# password - root password for TRex machine +# is_dual - should the TRex inject with -p ? +# version_path - path to the TRex version and executable +# cores - how many cores should be used +# latency - rate of latency packets injected by the TRex +# modes - list of modes (tagging) of this setup (loopback etc.) +# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped. +# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes) +# * virt_nics - NICs are virtual (VMXNET3 etc.) + +### Router configuration: +# hostname - the router hostname as apears in ______# cli prefix +# ip_address - the router's ip that can be used to communicate with +# image - the desired imaged wished to be loaded as the router's running config +# line_password - router password when access via Telent +# en_password - router password when changing to "enable" mode +# interfaces - an array of client-server pairs, representing the interfaces configurations of the router +# configurations - an array of configurations that could possibly loaded into the router during the test. +# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench + +### TFTP configuration: +# hostname - the tftp hostname +# ip_address - the tftp's ip address +# images_path - the tftp's relative path in which the router's images are located + +### Test_misc configuration: +# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test + +trex: + hostname : csi-trex-14 + cores : 4 + modes : [] + +router: + model : ASR1001x + hostname : csi-asr-01 + ip_address : 10.56.216.103 + image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin + line_password : cisco + en_password : cisco + mgmt_interface : GigabitEthernet0 + clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + intf_masking : 255.255.255.0 + ipv6_mask : 64 + interfaces : + - client : + name : Te0/0/0 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + server : + name : Te0/0/1 + src_mac_addr : 0000.0001.0000 + dest_mac_addr : 0000.0001.0000 + vrf_name : null + +tftp: + hostname : ats-asr-srv-1 + ip_address : 10.56.217.7 + root_dir : /scratch/tftp/ + images_path : /asr1001x/ diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml index 04f13e79..0dc340b0 100644 --- a/scripts/automation/regression/setups/trex14/benchmark.yaml +++ b/scripts/automation/regression/setups/trex14/benchmark.yaml @@ -75,20 +75,19 @@ test_nbar_simple: cores : 2 bw_per_core : 17.174 nbar_classification: - http : 32.58 - rtp-audio : 21.21 - oracle_sqlnet : 11.41 - exchange : 11.22 - rtp : 11.2 - citrix : 5.65 - rtsp : 2.87 - dns : 1.96 + rtp : 32.57 + http : 30.25 + oracle_sqlnet : 11.23 + exchange : 10.80 + citrix : 5.62 + rtsp : 2.84 + dns : 1.95 smtp : 0.57 - pop3 : 0.37 - ssl : 0.28 + pop3 : 0.36 + ssl : 0.17 sctp : 0.13 sip : 0.09 - unknown : 0.45 + unknown : 3.41 test_rx_check_http: &rx_http diff --git a/scripts/automation/regression/setups/trex14/config.yaml b/scripts/automation/regression/setups/trex14/config.yaml index 0fd6b70e..ffb61763 100644 --- a/scripts/automation/regression/setups/trex14/config.yaml +++ b/scripts/automation/regression/setups/trex14/config.yaml @@ -36,28 +36,27 @@ trex: hostname : csi-trex-14 cores : 4 - modes : [] router: model : ASR1001x hostname : csi-asr-01 - ip_address : 10.56.216.103 - image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin + ip_address : 10.56.216.120 + image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin line_password : cisco en_password : cisco mgmt_interface : GigabitEthernet0 - clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg + clean_config : clean_config.cfg intf_masking : 255.255.255.0 ipv6_mask : 64 interfaces : - client : name : Te0/0/0 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 + src_mac_addr : 0000.0001.0002 + dest_mac_addr : 0000.0001.0001 server : name : Te0/0/1 - src_mac_addr : 0000.0001.0000 - dest_mac_addr : 0000.0001.0000 + src_mac_addr : 0000.0002.0002 + dest_mac_addr : 0000.0002.0001 vrf_name : null tftp: diff --git a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py new file mode 100644 index 00000000..852e745d --- /dev/null +++ b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py @@ -0,0 +1,52 @@ +#!/router/bin/python +from .trex_general_test import CTRexGeneral_Test, CTRexScenario +from CPlatform import CStaticRouteConfig +from .tests_exceptions import * +#import sys +import time +from nose.tools import nottest + +# Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors. +# If resolve will fail, TRex will exit with exit code != 0 +class CTRexClientCfg_Test(CTRexGeneral_Test): + """This class defines the IMIX testcase of the TRex traffic generator""" + def __init__(self, *args, **kwargs): + # super(CTRexClientCfg_Test, self).__init__() + CTRexGeneral_Test.__init__(self, *args, **kwargs) + + def setUp(self): + if CTRexScenario.setup_name == 'kiwi02': + self.skip("Can't run currently on kiwi02") + super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process + pass + + def test_client_cfg(self): + # test initializtion + if self.is_loopback: + return + else: + self.router.configure_basic_interfaces() + self.router.config_pbr(mode = "config") + + ret = self.trex.start_trex( + c = 1, + m = 1, + d = 10, + f = 'cap2/dns.yaml', + v = 3, + client_cfg = 'automation/regression/cfg/client_cfg.yaml', + l = 1000) + + trex_res = self.trex.sample_to_run_finish() + + print("\nLATEST RESULT OBJECT:") + print(trex_res) + + self.check_general_scenario_results(trex_res) + + def tearDown(self): + CTRexGeneral_Test.tearDown(self) + pass + +if __name__ == "__main__": + pass diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py index e968d380..fe38ed34 100755 --- a/scripts/automation/regression/stateful_tests/trex_general_test.py +++ b/scripts/automation/regression/stateful_tests/trex_general_test.py @@ -198,11 +198,14 @@ class CTRexGeneral_Test(unittest.TestCase): def check_for_trex_crash(self): pass - def get_benchmark_param (self, param, sub_param = None, test_name = None): + def get_benchmark_param (self, param, sub_param = None, test_name = None,default=None): if not test_name: test_name = self.get_name() if test_name not in self.benchmark: - self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param)) + if default ==None: + self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param)) + else: + return default if sub_param: return self.benchmark[test_name][param].get(sub_param) else: @@ -254,7 +257,7 @@ class CTRexGeneral_Test(unittest.TestCase): allowed_latency = 1000 if max(trex_res.get_max_latency().values()) > allowed_latency: self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency) - + # check that avg latency does not exceed 1 msec if self.is_VM: allowed_latency = 9999999 @@ -263,6 +266,15 @@ class CTRexGeneral_Test(unittest.TestCase): if max(trex_res.get_avg_latency().values()) > allowed_latency: self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency) + ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+') + if not ports_names: + raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*') + for port_name in ports_names: + path = 'trex-latecny-v2.data.%s.hist.cnt' % port_name + lat_count = trex_res.get_last_value(path) + if lat_count == 0: + self.fail('LatencyError: Number of latency packets received on %s is 0' % port_name) + if not self.is_loopback: # check router number of drops --> deliberately masked- need to be figured out!!!!! pkt_drop_stats = self.router.get_drop_stats() @@ -356,7 +368,7 @@ class CTRexGeneral_Test(unittest.TestCase): print("Can't get TRex log:", e) if len(self.fail_reasons): sys.stdout.flush() - raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons)) + raise Exception('Test failed. Reasons:\n%s' % '\n'.join(self.fail_reasons)) sys.stdout.flush() def check_for_trex_crash(self): diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py index 36ac0ee1..73dac734 100644 --- a/scripts/automation/regression/stateless_tests/stl_client_test.py +++ b/scripts/automation/regression/stateless_tests/stl_client_test.py @@ -240,10 +240,26 @@ class STLClient_Test(CStlGeneral_Test): self.skip('skipping profile tests for virtual / non loopback') return + default_mult = self.get_benchmark_param('mult',default="30%") + skip_tests = self.get_benchmark_param('skip',default=[]) + try: - + print("\n"); + + for profile in self.profiles: + skip=False + if skip_tests: + for skip_test in skip_tests: + if skip_test in profile: + skip=True; + break; + if skip: + print("skipping testing profile due to config file {0}...\n".format(profile)) + continue; + + print("now testing profile {0}...\n".format(profile)) p1 = STLProfile.load(profile, port_id = self.tx_port) @@ -269,7 +285,7 @@ class STLClient_Test(CStlGeneral_Test): self.c.clear_stats() - self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%") + self.c.start(ports = [self.tx_port, self.rx_port], mult = default_mult) time.sleep(100 / 1000.0) if p1.is_pauseable() and p2.is_pauseable(): diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py index a556daf3..641f0a33 100644 --- a/scripts/automation/regression/stateless_tests/stl_performance_test.py +++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py @@ -296,6 +296,10 @@ class STLPerformance_Test(CStlGeneral_Test): # sample bps/pps for _ in range(0, 20): stats = self.c.get_stats(ports = 0) + if stats['global'][ 'queue_full']>10000: + assert 0, "Queue is full need to tune the multiplier" + + # CPU results are not valid cannot use them samples['bps'].append(stats[0]['tx_bps']) samples['pps'].append(stats[0]['tx_pps']) time.sleep(1) diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py index 524ad4bf..4dad712f 100644 --- a/scripts/automation/regression/stateless_tests/stl_rx_test.py +++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py @@ -51,6 +51,24 @@ class STLRX_Test(CStlGeneral_Test): 'latency_9k_enable': False, 'allow_packets_drop_num': 1, # allow 1 pkt drop }, + + 'librte_pmd_mlx5': { + 'rate_percent': 80, + 'total_pkts': 1000, + 'rate_latency': 1, + 'latency_9k_enable': True, + 'latency_9k_max_average': 100, + 'latency_9k_max_latency': 250, + }, + + 'rte_enic_pmd': { + 'rate_percent': 1, + 'total_pkts': 50, + 'rate_latency': 1, + 'latency_9k_enable': False, + }, + + } CStlGeneral_Test.setUp(self) @@ -63,7 +81,6 @@ class STLRX_Test(CStlGeneral_Test): port_info = self.c.get_port_info(ports = self.rx_port)[0] self.speed = port_info['speed'] - cap = port_info['rx']['caps'] if "flow_stats" not in cap or "latency" not in cap: self.skip('port {0} does not support RX'.format(self.rx_port)) @@ -400,12 +417,14 @@ class STLRX_Test(CStlGeneral_Test): s_port=random.sample(all_ports, random.randint(1, len(all_ports)) ) s_port=sorted(s_port) - if self.speed == 40 : + + if ((self.speed == 40) or (self.speed == 100)): # the NIC does not support all full rate in case both port works let's filter odd ports s_port=list(filter(lambda x: x % 2==0, s_port)) if len(s_port)==0: s_port=[0]; + error=1; for j in range(0,5): print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j)); diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py index 7b96f2f8..416a6e3b 100644 --- a/scripts/automation/regression/trex.py +++ b/scripts/automation/regression/trex.py @@ -35,7 +35,7 @@ class CTRexScenario: report_dir = 'reports' # logger = None test_types = {'functional_tests': [], 'stateful_tests': [], 'stateless_tests': []} - is_copied = False + pkg_updated = False GAManager = None no_daemon = False debug_image = False diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py index daa1abaf..09614770 100755 --- a/scripts/automation/regression/trex_unit_test.py +++ b/scripts/automation/regression/trex_unit_test.py @@ -137,35 +137,29 @@ class CTRexTestConfiguringPlugin(Plugin): parser.add_option('--stf', '--stateful', action="store_true", default = False, dest="stateful", help="Run stateful tests.") - parser.add_option('--pkg', action="store", - dest="pkg", - help="Run with given TRex package. Make sure the path available at server machine.") + parser.add_option('--pkg', type = str, + help="Run with given TRex package. Make sure the path available at server machine. Implies --restart-daemon.") + parser.add_option('--restart-daemon', action="store_true", default = False, + help="Flag that specifies to restart daemon. Implied by --pkg.") parser.add_option('--collect', action="store_true", default = False, - dest="collect", help="Alias to --collect-only.") parser.add_option('--warmup', action="store_true", default = False, - dest="warmup", help="Warm up the system for stateful: run 30 seconds 9k imix test without check of results.") parser.add_option('--test-client-package', '--test_client_package', action="store_true", default = False, - dest="test_client_package", help="Includes tests of client package.") parser.add_option('--long', action="store_true", default = False, - dest="long", help="Flag of long tests (stability).") parser.add_option('--ga', action="store_true", default = False, - dest="ga", help="Flag to send benchmarks to GA.") parser.add_option('--no-daemon', action="store_true", default = False, dest="no_daemon", help="Flag that specifies to use running stl server, no need daemons.") parser.add_option('--debug-image', action="store_true", default = False, - dest="debug_image", help="Flag that specifies to use t-rex-64-debug as TRex executable.") - parser.add_option('--trex-args', action='store', default = '', - dest="trex_args", + parser.add_option('--trex-args', default = '', help="Additional TRex arguments (--no-watchdog etc.).") - parser.add_option('-t', '--test', action='store', default = '', dest='test', - help='Test name to run (without file, class etc.)') + parser.add_option('-t', '--test', type = str, + help = 'Test name to run (without file, class etc.)') def configure(self, options, conf): @@ -174,10 +168,13 @@ class CTRexTestConfiguringPlugin(Plugin): self.stateless = options.stateless self.stateful = options.stateful self.pkg = options.pkg + self.restart_daemon = options.restart_daemon self.json_verbose = options.json_verbose self.telnet_verbose = options.telnet_verbose self.no_daemon = options.no_daemon CTRexScenario.test = options.test + if self.no_daemon and (self.pkg or self.restart_daemon): + raise Exception('You have specified both --no-daemon and either --pkg or --restart-daemon at same time.') if self.collect_only or self.functional: return if CTRexScenario.setup_dir and options.config_path: @@ -212,6 +209,7 @@ class CTRexTestConfiguringPlugin(Plugin): verbose = self.json_verbose, debug_image = options.debug_image, trex_args = options.trex_args) + if self.pkg or self.restart_daemon: if not CTRexScenario.trex.check_master_connectivity(): print('Could not connect to master daemon') sys.exit(-1) @@ -228,20 +226,20 @@ class CTRexTestConfiguringPlugin(Plugin): def begin (self): client = CTRexScenario.trex - if self.pkg and not CTRexScenario.is_copied: + if self.pkg and not CTRexScenario.pkg_updated: if client.master_daemon.is_trex_daemon_running() and client.get_trex_cmds() and not self.kill_running: - print("Can't update TRex, it's running") + print("Can't update TRex, it's running. Consider adding --kill-running flag.") sys.exit(-1) print('Updating TRex to %s' % self.pkg) if not client.master_daemon.update_trex(self.pkg): - print('Failed updating TRex') + print('Failed updating TRex.') sys.exit(-1) else: - print('Updated') - CTRexScenario.is_copied = True + print('Updated.') + CTRexScenario.pkg_updated = True if self.functional or self.collect_only: return - if not self.no_daemon: + if self.pkg or self.restart_daemon: print('Restarting TRex daemon server') res = client.restart_trex_daemon() if not res: diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py index e9d2b8a0..5d992c6e 100755 --- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py +++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py @@ -150,10 +150,8 @@ class CTRexClient(object): user = user or self.__default_user try: d = int(d) - if d < 30 and not trex_development: # test duration should be at least 30 seconds, unless trex_development flag is specified. - raise ValueError except ValueError: - raise ValueError('d parameter must be integer, specifying how long TRex run, and must be larger than 30 secs.') + raise ValueError('d parameter must be integer, specifying how long TRex run.') trex_cmd_options.update( {'f' : f, 'd' : d} ) if not trex_cmd_options.get('l'): diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py index b23b5f1f..627761ff 100755 --- a/scripts/automation/trex_control_plane/stl/console/trex_console.py +++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py @@ -202,7 +202,7 @@ class TRexConsole(TRexGeneralCmd): func_name = f.__name__ if func_name.startswith("do_"): func_name = func_name[3:] - + if not inst.stateless_client.is_connected(): print(format_text("\n'{0}' cannot be executed on offline mode\n".format(func_name), 'bold')) return @@ -313,6 +313,7 @@ class TRexConsole(TRexGeneralCmd): def do_shell (self, line): self.do_history(line) + @verify_connected def do_push (self, line): '''Push a local PCAP file\n''' self.stateless_client.push_line(line) @@ -320,6 +321,7 @@ class TRexConsole(TRexGeneralCmd): def help_push (self): self.do_push("-h") + @verify_connected def do_portattr (self, line): '''Change/show port(s) attributes\n''' self.stateless_client.set_port_attr_line(line) @@ -328,6 +330,42 @@ class TRexConsole(TRexGeneralCmd): self.do_portattr("-h") @verify_connected + def do_l2 (self, line): + '''Configures a port in L2 mode''' + self.stateless_client.set_l2_mode_line(line) + + def help_l2 (self): + self.do_l2("-h") + + @verify_connected + def do_l3 (self, line): + '''Configures a port in L3 mode''' + self.stateless_client.set_l3_mode_line(line) + + def help_l3 (self): + self.do_l3("-h") + + + @verify_connected + def do_set_rx_sniffer (self, line): + '''Sets a port sniffer on RX channel as PCAP recorder''' + self.stateless_client.set_rx_sniffer_line(line) + + def help_sniffer (self): + self.do_set_rx_sniffer("-h") + + @verify_connected + def do_resolve (self, line): + '''Resolve ARP for ports''' + self.stateless_client.resolve_line(line) + + def help_resolve (self): + self.do_resolve("-h") + + do_arp = do_resolve + help_arp = help_resolve + + @verify_connected def do_map (self, line): '''Maps ports topology\n''' ports = self.stateless_client.get_acquired_ports() @@ -416,6 +454,7 @@ class TRexConsole(TRexGeneralCmd): '''Release ports\n''' self.stateless_client.release_line(line) + @verify_connected def do_reacquire (self, line): '''reacquire all the ports under your logged user name''' self.stateless_client.reacquire_line(line) @@ -469,7 +508,7 @@ class TRexConsole(TRexGeneralCmd): ############# update @verify_connected def do_update(self, line): - '''update speed of port(s)currently transmitting traffic\n''' + '''update speed of port(s) currently transmitting traffic\n''' self.stateless_client.update_line(line) @@ -530,6 +569,13 @@ class TRexConsole(TRexGeneralCmd): '''Clear cached local statistics\n''' self.stateless_client.clear_stats_line(line) + @verify_connected + def do_service (self, line): + '''Sets port(s) service mode state''' + self.stateless_client.service_line(line) + + def help_service (self, line): + self.do_service("-h") def help_clear(self): self.do_clear("-h") diff --git a/scripts/automation/trex_control_plane/stl/console/trex_tui.py b/scripts/automation/trex_control_plane/stl/console/trex_tui.py index d7db6d30..37ef8000 100644 --- a/scripts/automation/trex_control_plane/stl/console/trex_tui.py +++ b/scripts/automation/trex_control_plane/stl/console/trex_tui.py @@ -645,14 +645,14 @@ class TrexTUI(): # regular state if self.state == self.STATE_ACTIVE: # if no connectivity - move to lost connecitivty - if not self.stateless_client.async_client.is_alive(): + if not self.stateless_client.async_client.is_active(): self.stateless_client._invalidate_stats(self.pm.ports) self.state = self.STATE_LOST_CONT # lost connectivity elif self.state == self.STATE_LOST_CONT: - # got it back + # if the async is alive (might be zomibe, but alive) try to reconnect if self.stateless_client.async_client.is_alive(): # move to state reconnect self.state = self.STATE_RECONNECT @@ -1153,7 +1153,7 @@ class AsyncKeysEngineConsole: # errors else: err_msgs = ascii_split(str(func_rc)) - self.last_status = format_text(err_msgs[0], 'red') + self.last_status = format_text(clear_formatting(err_msgs[0]), 'red') if len(err_msgs) > 1: self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1) color = 'red' diff --git a/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py new file mode 100644 index 00000000..22cceb8f --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/examples/stl_imix_bidir_update.py @@ -0,0 +1,123 @@ +import stl_path +from trex_stl_lib.api import * + +import imp +import time +import json +from pprint import pprint +import argparse + +# IMIX test +# it maps the ports to sides +# then it load a predefind profile 'IMIX' +# and attach it to both sides and inject +# at a certain rate for some time +# finally it checks that all packets arrived +def imix_test (server): + + + # create client + c = STLClient(server = server) + passed = True + + + try: + + # connect to server + c.connect() + + # take all the ports + c.reset() + + dir_0 = [0] + dir_1 = [1] + + print "Mapped ports to sides {0} <--> {1}".format(dir_0, dir_1) + + # load IMIX profile + profile_file = os.path.join(stl_path.STL_PROFILES_PATH, 'imix.py') + profile1 = STLProfile.load_py(profile_file, direction=0) + profile2 = STLProfile.load_py(profile_file, direction=1) + stream1 = profile1.get_streams() + stream2 = profile2.get_streams() + + # add both streams to ports + c.add_streams(stream1, ports = dir_0) + c.add_streams(stream2, ports = dir_1) + + + # clear the stats before injecting + c.clear_stats() + + c.start(ports = (dir_0 + dir_1), mult = "100kpps", total = True) + + while True: + for rate in range(200,3100,10): + + # choose rate and start traffic for 10 seconds on 5 mpps + #mult = "30%" + my_mult = ("%dkpps"%rate) + print "Injecting {0} <--> {1} on total rate of '{2}' ".format(dir_0, dir_1, my_mult) + c.clear_stats() + + + c.update(ports = (dir_0 + dir_1), mult = my_mult) + + #time.sleep(1); + + # block until done + #c.wait_on_traffic(ports = (dir_0 + dir_1)) + + # read the stats after the test + stats = c.get_stats() + + # use this for debug info on all the stats + pprint(stats) + + # sum dir 0 + dir_0_opackets = sum([stats[i]["opackets"] for i in dir_0]) + dir_0_ipackets = sum([stats[i]["ipackets"] for i in dir_0]) + + # sum dir 1 + dir_1_opackets = sum([stats[i]["opackets"] for i in dir_1]) + dir_1_ipackets = sum([stats[i]["ipackets"] for i in dir_1]) + + + lost_0 = dir_0_opackets - dir_1_ipackets + lost_1 = dir_1_opackets - dir_0_ipackets + + print "\nPackets injected from {0}: {1:,}".format(dir_0, dir_0_opackets) + print "Packets injected from {0}: {1:,}".format(dir_1, dir_1_opackets) + + print "\npackets lost from {0} --> {1}: {2:,} pkts".format(dir_0, dir_1, lost_0) + print "packets lost from {0} --> {1}: {2:,} pkts".format(dir_1, dir_0, lost_1) + + if (lost_0 <= 0) and (lost_1 <= 0): # less or equal because we might have incoming arps etc. + passed = True + else: + passed = False + + + except STLError as e: + passed = False + print e + + finally: + c.disconnect() + + if passed: + print "\nTest has passed :-)\n" + else: + print "\nTest has failed :-(\n" + +parser = argparse.ArgumentParser(description="Example for TRex Stateless, sending IMIX traffic") +parser.add_argument('-s', '--server', + dest='server', + help='Remote trex address', + default='127.0.0.1', + type = str) +args = parser.parse_args() + +# run the tests +imix_test(args.server) + diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/field_engine.json b/scripts/automation/trex_control_plane/stl/services/scapy_server/field_engine.json new file mode 100644 index 00000000..85d10e65 --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/field_engine.json @@ -0,0 +1,269 @@ +{ + "instructions": [ + { + "id": "STLVmFlowVar", + "parameters": ["name", "init_value", "max_value","min_value","step", "size","op"] + }, + { + "id": "STLVmWrFlowVar", + "parameters": ["fv_name", "pkt_offset","offset_fixup","add_val","is_big"] + }, + { + "id": "STLVmWrMaskFlowVar", + "parameters": ["fv_name", "pkt_offset", "pkt_cast_size","mask", "shift","add_value","is_big"] + }, + { + "id": "STLVmFixIpv4", + "parameters": ["offset"] + }, + { + "id": "STLVmTrimPktSize", + "parameters": ["fv_name"] + }, + { + "id": "STLVmTupleGen", + "parameters": ["name", "ip_min", "ip_max", "port_min", "port_max", "limit_flows", "flags"] + }, + { + "id": "STLVmFlowVarRepetableRandom", + "parameters": ["name", "size", "limit", "seed", "min_value", "max_value"] + }, + { + "id": "STLVmFixChecksumHw", + "parameters": ["l3_offset","l4_offset","l4_type"] + } + ], + + "instruction_params_meta": [ + { + "id": "name", + "name": "Name", + "type": "ENUM", + "editable": true, + "required": true, + "defaultValue": "Not defined" + }, + { + "id": "init_value", + "name": "Initial value", + "type": "STRING", + "defaultValue": "0" + }, + { + "id": "max_value", + "name": "Maximum value", + "type": "STRING", + "required": true, + "defaultValue": "0" + }, + { + "id": "min_value", + "name": "Minimum value", + "type": "STRING", + "required": true, + "defaultValue": "0" + }, + { + "id": "step", + "name": "Step", + "type": "NUMBER", + "required": true, + "defaultValue": "1" + }, + { + "id": "op", + "name": "Operation", + "type": "ENUM", + "defaultValue": "inc", + "dict": { + "dec": "Decrement", + "inc": "Increment", + "random": "Random" + }, + "required": true + }, + { + "id": "size", + "name": "Size", + "type": "ENUM", + "defaultValue": "4", + "dict": { + "1": "1", + "2": "2", + "4": "4", + "8": "8" + } + }, + { + "id": "fv_name", + "name": "Variable name", + "type": "ENUM", + "required": true, + "editable": true + }, + { + "id": "pkt_offset", + "name": "Offset", + "type": "ENUM", + "required": true, + "editable": true, + "defaultValue": 0 + }, + { + "id": "pkt_cast_size", + "name": "Packet cast size", + "type": "ENUM", + "defaultValue": 1, + "dict":{ + "1":1, + "2":2, + "4":4 + } + }, + { + "id": "shift", + "name": "Shift", + "type": "NUMBER", + "defaultValue": 0 + }, + { + "id": "mask", + "name": "Mask", + "type": "STRING", + "defaultValue": "0xff" + }, + { + "id": "offset_fixup", + "name": "offset_fixup", + "type": "NUMBER", + "defaultValue": 0 + }, + { + "id": "add_val", + "name": "add_val", + "type": "NUMBER", + "defaultValue": 0 + }, + { + "id": "add_value", + "name": "add_value", + "type": "NUMBER", + "defaultValue": 0 + }, + { + "id": "is_big", + "name": "is_big", + "type": "ENUM", + "defaultValue": "true", + "dict": { + "true": "true", + "false": "false" + } + }, + { + "id": "offset", + "name": "Offset", + "type": "ENUM", + "required": true, + "editable": true, + "defaultValue": 0 + }, + { + "id": "l3_offset", + "name": "L3 offset", + "type": "STRING", + "required": true, + "autocomplete": true, + "defaultValue": "IP" + }, + { + "id": "l4_offset", + "name": "L4 offset", + "type": "STRING", + "required": true, + "defaultValue": "TCP" + }, + { + "id": "ip_min", + "name": "Min IP", + "type": "STRING", + "defaultValue": "0.0.0.1" + }, + { + "id": "ip_max", + "name": "Max IP", + "type": "STRING", + "defaultValue": "0.0.0.10" + }, + { + "id": "port_max", + "name": "Max Port number", + "type": "NUMBER", + "defaultValue": 65535 + }, + { + "id": "port_min", + "name": "Min Port number", + "type": "NUMBER", + "defaultValue": 1025 + }, + { + "id": "limit_flows", + "name": "FLows limit", + "type": "NUMBER", + "defaultValue": 100000 + }, + { + "id": "limit", + "name": "Limit", + "type": "NUMBER", + "defaultValue": 100 + }, + { + "id": "seed", + "name": "Seed", + "type": "String", + "defaultValue": "None" + }, + { + "id": "flags", + "name": "Flags", + "type": "NUMBER", + "defaultValue": 0 + }, + { + "id": "l4_type", + "name": "L4 type", + "type": "ENUM", + "required": true, + "editable": false, + "defaultValue": "13", + "dict": { + "11": "L4_TYPE_UDP", + "13": "L4_TYPE_TCP" + } + } + ], + "supported_protocols": ["IP","TCP","UDP"], + "templates":[ + { + "id": "simple_flow_var", + "name": "Simple variable", + "instructionIds": ["STLVmFlowVar", "STLVmWrFlowVar"] + }, + { + "id": "rep_rand_var", + "name": "Repeatable random", + "instructionIds": ["STLVmFlowVarRepetableRandom", "STLVmWrFlowVar"] + } + ], + "global_params_meta":[ + { + "id": "cache_size", + "name": "Cache size", + "type": "NUMBER", + "defaultValue": "1000" + } + ] +} + diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py index 8d99fe92..e5f1b20c 100755 --- a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py +++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py @@ -1,17 +1,20 @@ import os import sys + stl_pathname = os.path.abspath(os.path.join(os.pardir, os.pardir)) sys.path.append(stl_pathname) from trex_stl_lib.api import * +import trex_stl_lib.trex_stl_packet_builder_scapy import tempfile import hashlib import base64 import numbers import random -import inspect +from inspect import getdoc import json +import re from pprint import pprint # add some layers as an example @@ -121,6 +124,45 @@ class Scapy_service_api(): """ pass + def build_pkt_ex(self, client_v_handler, pkt_model_descriptor, extra_options): + """ build_pkt_ex(self,client_v_handler,pkt_model_descriptor, extra_options) -> Dictionary (of Offsets,Show2 and Buffer) + Performs calculations on the given packet and returns results for that packet. + + Parameters + ---------- + pkt_descriptor - An array of dictionaries describing a network packet + extra_options - A dictionary of extra options required for building packet + + Returns + ------- + - The packets offsets: each field in every layer is mapped inside the Offsets Dictionary + - The Show2: A description of each field and its value in every layer of the packet + - The Buffer: The Hexdump of packet encoded in base64 + + Raises + ------ + will raise an exception when the Scapy string format is illegal, contains syntax error, contains non-supported + protocl, etc. + """ + pass + + def load_instruction_parameter_values(self, client_v_handler, pkt_model_descriptor, vm_instructions_model, parameter_id): + """ load_instruction_parameter_values(self,client_v_handler,pkt_model_descriptor, vm_instructions_model, parameter_id) -> Dictionary (of possible parameter values) + Returns possible valies for given pararameter id depends on current pkt structure and vm_instructions + model. + + Parameters + ---------- + pkt_descriptor - An array of dictionaries describing a network packet + vm_instructions_model - A dictionary of extra options required for building packet + parameter_id - A string of parameter id + + Returns + ------- + Possible parameter values map. + + """ + pass def get_tree(self,client_v_handler): """ get_tree(self) -> Dictionary describing an example of hierarchy in layers @@ -372,7 +414,15 @@ class Scapy_service(Scapy_service_api): self.version_minor = '01' self.server_v_hashed = self._generate_version_hash(self.version_major,self.version_minor) self.protocol_definitions = {} # protocolId -> prococol definition overrides data + self.field_engine_supported_protocols = {} + self.instruction_parameter_meta_definitions = [] + self.field_engine_parameter_meta_definitions = [] + self.field_engine_templates_definitions = [] + self.field_engine_instructions_meta = [] + self.field_engine_instruction_expressions = [] self._load_definitions_from_json() + self._load_field_engine_meta_from_json() + self._vm_instructions = dict([m for m in inspect.getmembers(trex_stl_lib.trex_stl_packet_builder_scapy, inspect.isclass) if m[1].__module__ == 'trex_stl_lib.trex_stl_packet_builder_scapy']) def _load_definitions_from_json(self): # load protocol definitions from a json file @@ -382,6 +432,27 @@ class Scapy_service(Scapy_service_api): for protocol in protocols: self.protocol_definitions[ protocol['id'] ] = protocol + def _load_field_engine_meta_from_json(self): + # load protocol definitions from a json file + self.instruction_parameter_meta_definitions = [] + self.field_engine_supported_protocols = {} + self.field_engine_parameter_meta_definitions = [] + self.field_engine_templates_definitions = [] + with open('field_engine.json', 'r') as f: + metas = json.load(f) + self.instruction_parameter_meta_definitions = metas["instruction_params_meta"] + self.field_engine_instructions_meta = metas["instructions"] + self._append_intructions_help() + self.field_engine_supported_protocols = metas["supported_protocols"] + self.field_engine_parameter_meta_definitions = metas["global_params_meta"] + self.field_engine_templates_definitions = metas["templates"] + + + def _append_intructions_help(self): + for instruction_meta in self.field_engine_instructions_meta: + clazz = eval(instruction_meta['id']) + instruction_meta['help'] = base64.b64encode(getdoc(clazz.__init__)).decode('ascii') + def _all_protocol_structs(self): old_stdout = sys.stdout sys.stdout = mystdout = StringIO() @@ -708,6 +779,134 @@ class Scapy_service(Scapy_service_api): pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor) return self._pkt_data(pkt) + + def build_pkt_ex(self, client_v_handler, pkt_model_descriptor, extra_options): + res = self.build_pkt(client_v_handler, pkt_model_descriptor) + pkt = self._packet_model_to_scapy_packet(pkt_model_descriptor) + + field_engine = {} + field_engine['instructions'] = [] + field_engine['error'] = None + try: + field_engine['instructions'] = self._generate_vm_instructions(pkt, extra_options['field_engine']) + except AssertionError as e: + field_engine['error'] = e.message + except CTRexPacketBuildException as e: + field_engine['error'] = e.message + + field_engine['vm_instructions_expressions'] = self.field_engine_instruction_expressions + res['field_engine'] = field_engine + return res + + def load_instruction_parameter_values(self, client_v_handler, pkt_model_descriptor, vm_instructions_model, parameter_id): + + given_protocol_ids = [str(proto['id']) for proto in pkt_model_descriptor] + + values = {} + if parameter_id == "name": + values = self._curent_pkt_protocol_fields(given_protocol_ids, "_") + + if parameter_id == "fv_name": + values = self._existed_flow_var_names(vm_instructions_model['field_engine']['instructions']) + + if parameter_id == "pkt_offset": + values = self._curent_pkt_protocol_fields(given_protocol_ids, ".") + + if parameter_id == "offset": + for ip_idx in range(given_protocol_ids.count("IP")): + value = "IP:{0}".format(ip_idx) + values[value] = value + + return {"map": values} + + def _existed_flow_var_names(self, instructions): + return dict((instruction['parameters']['name'], instruction['parameters']['name']) for instruction in instructions if self._nameParamterExist(instruction)) + + def _nameParamterExist(self, instruction): + try: + instruction['parameters']['name'] + return True + except KeyError: + return False + + def _curent_pkt_protocol_fields(self, given_protocol_ids, delimiter): + given_protocol_classes = [c for c in Packet.__subclasses__() if c.__name__ in given_protocol_ids] + protocol_fields = {} + for protocol_class in given_protocol_classes: + protocol_name = protocol_class.__name__ + protocol_count = given_protocol_ids.count(protocol_name) + for field_desc in protocol_class.fields_desc: + if delimiter == '.' and protocol_count > 1: + for idx in range(protocol_count): + formatted_name = "{0}:{1}{2}{3}".format(protocol_name, idx, delimiter, field_desc.name) + protocol_fields[formatted_name] = formatted_name + else: + formatted_name = "{0}{1}{2}".format(protocol_name, delimiter, field_desc.name) + protocol_fields[formatted_name] = formatted_name + + return protocol_fields + + def _generate_vm_instructions(self, pkt, field_engine_model_descriptor): + self.field_engine_instruction_expressions = [] + instructions = [] + instructions_def = field_engine_model_descriptor['instructions'] + for instruction_def in instructions_def: + instruction_id = instruction_def['id'] + instruction_class = self._vm_instructions[instruction_id] + parameters = {k: self._sanitize_value(k, v) for (k, v) in instruction_def['parameters'].iteritems()} + instructions.append(instruction_class(**parameters)) + + fe_parameters = field_engine_model_descriptor['global_parameters'] + + cache_size = None + if "cache_size" in fe_parameters: + assert self._is_int(fe_parameters['cache_size']), 'Cache size must be a number' + cache_size = int(fe_parameters['cache_size']) + + + pkt_builder = STLPktBuilder(pkt=pkt, vm=STLScVmRaw(instructions, cache_size=cache_size)) + pkt_builder.compile() + return pkt_builder.get_vm_data() + + def _sanitize_value(self, param_id, val): + if param_id == "pkt_offset": + if self._is_int(val): + return int(val) + elif val == "Ether.src": + return 0 + elif val == "Ether.dst": + return 6 + elif val == "Ether.type": + return 12 + else: + if val == "None" or val == "none": + return None + if val == "true": + return True + elif val == "false": + return False + elif re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", str(val.lower())): + return int(str(val).replace(":", ""), 16) + + if self._is_int(val): + return int(val) + + str_val = str(val) + return int(str_val, 16) if str_val.startswith("0x") else str_val + + def _get_instruction_parameter_meta(self, param_id): + for meta in self.instruction_parameter_meta_definitions: + if meta['id'] == param_id: + return meta + raise Scapy_Exception("Unable to get meta for {0}" % param_id) + + def _is_int(self, val): + try: + int(val) + return True + except ValueError: + return False + # @deprecated. to be removed def get_all(self,client_v_handler): if not (self._verify_version_handler(client_v_handler)): @@ -796,7 +995,11 @@ class Scapy_service(Scapy_service_api): "name": protoDef.get('name') or pkt_class.name, "fields": self._get_fields_definition(pkt_class, protoDef.get('fields') or []) }) - res = {"protocols": protocols} + res = {"protocols": protocols, + "feInstructionParameters": self.instruction_parameter_meta_definitions, + "feInstructions": self.field_engine_instructions_meta, + "feParameters": self.field_engine_parameter_meta_definitions, + "feTemplates": self.field_engine_templates_definitions} return res def get_payload_classes(self,client_v_handler, pkt_model_descriptor): @@ -889,8 +1092,6 @@ class Scapy_service(Scapy_service_api): pcap_bin = tmpPcap.read() return bytes_to_b64(pcap_bin) - - #--------------------------------------------------------------------------- diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py index 1db2c62b..e48880e8 100644 --- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py +++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py @@ -53,6 +53,9 @@ def get_version_handler(): def build_pkt(model_def): return pass_result(service.build_pkt(v_handler, model_def)) +def build_pkt_ex(model_def, instructions_def): + return pass_result(service.build_pkt_ex(v_handler, model_def, instructions_def)) + def build_pkt_get_scapy(model_def): return build_pkt_to_scapy(build_pkt(model_def)) diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py index 91a457dc..e1094a79 100644 --- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py +++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py @@ -113,15 +113,23 @@ def test_get_all(): def test_get_definitions_all(): get_definitions(None) - def_classnames = [pdef['id'] for pdef in get_definitions(None)['protocols']] + defs = get_definitions(None) + def_classnames = [pdef['id'] for pdef in defs['protocols']] assert("IP" in def_classnames) assert("Dot1Q" in def_classnames) assert("TCP" in def_classnames) + # All instructions should have a help description. + fe_instructions = defs['feInstructions'] + for instruction in fe_instructions: + print(instruction['help']) + assert("help" in instruction) + def test_get_definitions_ether(): res = get_definitions(["Ether"]) - assert(len(res) == 1) - assert(res['protocols'][0]['id'] == "Ether") + protocols = res['protocols'] + assert(len(protocols) == 1) + assert(protocols[0]['id'] == "Ether") def test_get_payload_classes(): eth_payloads = get_payload_classes([{"id":"Ether"}]) @@ -250,3 +258,34 @@ def test_ip_definitions(): assert(fields[9]['id'] == 'chksum') assert(fields[9]['auto'] == True) +def test_generate_vm_instructions(): + ip_pkt_model = [ + layer_def("Ether"), + layer_def("IP", src="16.0.0.1", dst="48.0.0.1") + ] + ip_instructions_model = {"field_engine": {"instructions": [{"id": "STLVmFlowVar", + "parameters": {"op": "inc", "min_value": "192.168.0.10", + "size": "1", "name": "ip_src", + "step": "1", + "max_value": "192.168.0.100"}}, + {"id": "STLVmWrFlowVar", + "parameters": {"pkt_offset": "IP.src", "is_big": "true", + "add_val": "0", "offset_fixup": "0", + "fv_name": "ip_src"}}, + {"id": "STLVmFlowVar", + "parameters": {"op": "dec", "min_value": "32", + "size": "1", "name": "ip_ttl", + "step": "4", "max_value": "64"}}, + {"id": "STLVmWrFlowVar", + "parameters": {"pkt_offset": "IP.ttl", "is_big": "true", + "add_val": "0", "offset_fixup": "0", + "fv_name": "ip_ttl"}}], + "global_parameters": {}}} + res = build_pkt_ex(ip_pkt_model, ip_instructions_model) + src_instruction = res['field_engine']['instructions']['instructions'][0] + assert(src_instruction['min_value'] == 3232235530) + assert(src_instruction['max_value'] == 3232235620) + + ttl_instruction = res['field_engine']['instructions']['instructions'][2] + assert(ttl_instruction['min_value'] == 32) + assert(ttl_instruction['max_value'] == 64) diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py index 2c95844b..11e87592 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_async_client.py @@ -137,6 +137,10 @@ class CTRexAsyncStatsManager(): class CTRexAsyncClient(): + THREAD_STATE_ACTIVE = 1 + THREAD_STATE_ZOMBIE = 2 + THREAD_STATE_DEAD = 3 + def __init__ (self, server, port, stateless_client): self.port = port @@ -159,7 +163,10 @@ class CTRexAsyncClient(): self.connected = False self.zipped = ZippedMsg() - + + self.t_state = self.THREAD_STATE_DEAD + + # connects the async channel def connect (self): @@ -173,8 +180,8 @@ class CTRexAsyncClient(): self.socket = self.context.socket(zmq.SUB) - # before running the thread - mark as active - self.active = True + # before running the thread - mark as active + self.t_state = self.THREAD_STATE_ACTIVE self.t = threading.Thread(target = self._run) # kill this thread on exit and don't add it to the join list @@ -198,26 +205,26 @@ class CTRexAsyncClient(): return RC_OK() - - # disconnect def disconnect (self): if not self.connected: return # mark for join - self.active = False - - # signal that the context was destroyed (exit the thread loop) + self.t_state = self.THREAD_STATE_DEAD self.context.term() - - # join self.t.join() + # done self.connected = False + # set the thread as a zombie (in case of server death) + def set_as_zombie (self): + self.last_data_recv_ts = None + self.t_state = self.THREAD_STATE_ZOMBIE + # thread function def _run (self): @@ -231,12 +238,19 @@ class CTRexAsyncClient(): self.monitor.reset() - while self.active: + while self.t_state != self.THREAD_STATE_DEAD: try: with self.monitor: line = self.socket.recv() + # last data recv. + self.last_data_recv_ts = time.time() + + # if thread was marked as zomibe - it does nothing besides fetching messages + if self.t_state == self.THREAD_STATE_ZOMBIE: + continue + self.monitor.on_recv_msg(line) # try to decomrpess @@ -246,7 +260,6 @@ class CTRexAsyncClient(): line = line.decode() - self.last_data_recv_ts = time.time() # signal once if not got_data: @@ -259,13 +272,14 @@ class CTRexAsyncClient(): # signal once if got_data: self.event_handler.on_async_dead() + self.set_as_zombie() got_data = False continue except zmq.ContextTerminated: # outside thread signaled us to exit - assert(not self.active) + assert(self.t_state != self.THREAD_STATE_ACTIVE) break msg = json.loads(line) @@ -283,16 +297,29 @@ class CTRexAsyncClient(): # closing of socket must be from the same thread self.socket.close(linger = 0) - def is_thread_alive (self): - return self.t.is_alive() - - # did we get info for the last 3 seconds ? + + # return True if the subscriber got data in the last 3 seconds + # even if zombie - will return true if got data def is_alive (self): + + # maybe the thread has exited with exception + if not self.t.is_alive(): + return False + + # simply no data if self.last_data_recv_ts == None: return False + # timeout of data return ( (time.time() - self.last_data_recv_ts) < 3 ) + + # more granular than active - it means that thread state is active we get info + # zomibes will return false + def is_active (self): + return self.is_alive() and self.t_state == self.THREAD_STATE_ACTIVE + + def get_stats (self): return self.stats diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py index 80a4c4dc..a42247e7 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py @@ -12,9 +12,10 @@ from .trex_stl_types import * from .trex_stl_async_client import CTRexAsyncClient from .utils import parsing_opts, text_tables, common -from .utils.common import list_intersect, list_difference, is_sub_list, PassiveTimer +from .utils.common import * from .utils.text_opts import * from functools import wraps +from texttable import ansi_len from collections import namedtuple from yaml import YAMLError @@ -24,6 +25,8 @@ import re import random import json import traceback +import os.path + ############################ logger ############################# ############################ ############################# @@ -32,9 +35,10 @@ import traceback # logger API for the client class LoggerApi(object): # verbose levels - VERBOSE_QUIET = 0 - VERBOSE_REGULAR = 1 - VERBOSE_HIGH = 2 + VERBOSE_QUIET = 0 + VERBOSE_REGULAR_SYNC = 1 + VERBOSE_REGULAR = 2 + VERBOSE_HIGH = 3 def __init__(self): self.level = LoggerApi.VERBOSE_REGULAR @@ -62,7 +66,7 @@ class LoggerApi(object): # simple log message with verbose - def log (self, msg, level = VERBOSE_REGULAR, newline = True): + def log (self, msg, level = VERBOSE_REGULAR_SYNC, newline = True): if not self.check_verbose(level): return @@ -90,19 +94,21 @@ class LoggerApi(object): # supress object getter - def supress (self): + def supress (self, level = VERBOSE_QUIET): class Supress(object): - def __init__ (self, logger): + def __init__ (self, logger, level): self.logger = logger + self.level = level def __enter__ (self): self.saved_level = self.logger.get_verbose() - self.logger.set_verbose(LoggerApi.VERBOSE_QUIET) + if self.level < self.saved_level: + self.logger.set_verbose(self.level) def __exit__ (self, type, value, traceback): self.logger.set_verbose(self.saved_level) - return Supress(self) + return Supress(self, level) @@ -175,8 +181,8 @@ class EventsHandler(object): def on_async_dead (self): if self.client.connected: msg = 'Lost connection to server' - self.__add_event_log('local', 'info', msg, True) self.client.connected = False + self.__add_event_log('local', 'info', msg, True) def on_async_alive (self): @@ -322,25 +328,30 @@ class EventsHandler(object): # port attr changed elif (event_type == 8): + port_id = int(data['port_id']) - if data['attr'] == self.client.ports[port_id].attr: - return # false alarm - old_info = self.client.ports[port_id].get_info() - self.__async_event_port_attr_changed(port_id, data['attr']) - new_info = self.client.ports[port_id].get_info() + + diff = self.__async_event_port_attr_changed(port_id, data['attr']) + if not diff: + return + + ev = "port {0} attributes changed".format(port_id) - for key, old_val in old_info.items(): - new_val = new_info[key] - if old_val != new_val: - ev += '\n {key}: {old} -> {new}'.format( - key = key, - old = old_val.lower() if type(old_val) is str else old_val, - new = new_val.lower() if type(new_val) is str else new_val) + for key, (old_val, new_val) in diff.items(): + ev += '\n {key}: {old} -> {new}'.format( + key = key, + old = old_val.lower() if type(old_val) is str else old_val, + new = new_val.lower() if type(new_val) is str else new_val) + show_event = True - + + + # server stopped elif (event_type == 100): ev = "Server has stopped" + # to avoid any new messages on async + self.client.async_client.set_as_zombie() self.__async_event_server_stopped() show_event = True @@ -392,7 +403,7 @@ class EventsHandler(object): def __async_event_port_attr_changed (self, port_id, attr): if port_id in self.client.ports: - self.client.ports[port_id].async_event_port_attr_changed(attr) + return self.client.ports[port_id].async_event_port_attr_changed(attr) # add event to log def __add_event_log (self, origin, ev_type, msg, show = False): @@ -650,7 +661,7 @@ class STLClient(object): return rc - + def __add_streams(self, stream_list, port_id_list = None): port_id_list = self.__ports(port_id_list) @@ -767,7 +778,7 @@ class STLClient(object): return rc - def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual): + def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec): port_id_list = self.__ports(port_id_list) rc = RC() @@ -783,7 +794,8 @@ class STLClient(object): count, duration, is_dual, - slave_handler)) + slave_handler, + min_ipg_usec)) return rc @@ -799,17 +811,85 @@ class STLClient(object): return rc + def __resolve (self, port_id_list = None, retries = 0): + port_id_list = self.__ports(port_id_list) + + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].arp_resolve(retries)) + + return rc + + def __set_port_attr (self, port_id_list = None, attr_dict = None): port_id_list = self.__ports(port_id_list) rc = RC() + for port_id, port_attr_dict in zip(port_id_list, attr_dict): + rc.add(self.ports[port_id].set_attr(**port_attr_dict)) + + return rc + + + def __set_rx_sniffer (self, port_id_list, base_filename, limit): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + head, tail = os.path.splitext(base_filename) + filename = "{0}-{1}{2}".format(head, port_id, tail) + rc.add(self.ports[port_id].set_rx_sniffer(filename, limit)) + + return rc + + + def __remove_rx_sniffer (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + for port_id in port_id_list: - rc.add(self.ports[port_id].set_attr(attr_dict)) + rc.add(self.ports[port_id].remove_rx_sniffer()) return rc + def __set_rx_queue (self, port_id_list, size): + port_id_list = self.__ports(port_id_list) + rc = RC() + for port_id in port_id_list: + rc.add(self.ports[port_id].set_rx_queue(size)) + + return rc + + def __remove_rx_queue (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].remove_rx_queue()) + + return rc + + def __get_rx_queue_pkts (self, port_id_list): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].get_rx_queue_pkts()) + + return rc + + def __set_service_mode (self, port_id_list, enabled): + port_id_list = self.__ports(port_id_list) + rc = RC() + + for port_id in port_id_list: + rc.add(self.ports[port_id].set_service_mode(enabled)) + + return rc + # connect to server def __connect(self): @@ -1008,7 +1088,7 @@ class STLClient(object): if not port_id in valid_ports: raise STLError("Port ID '{0}' is not a valid port ID - valid values: {1}".format(port_id, valid_ports)) - return port_id_list + return list_remove_dup(port_id_list) # transmit request on the RPC link @@ -1313,6 +1393,18 @@ class STLClient(object): if port_obj.is_active()] + def get_resolvable_ports (self): + return [port_id + for port_id, port_obj in self.ports.items() + if port_obj.is_acquired() and port_obj.get_dst_addr()['ipv4'] is not None] + + + def get_service_enabled_ports(self): + return [port_id + for port_id, port_obj in self.ports.items() + if port_obj.is_acquired() and port_obj.is_service_mode_on()] + + # get paused ports def get_paused_ports (self, owned = True): if owned: @@ -1338,6 +1430,7 @@ class STLClient(object): # get stats + @__api_check(True) def get_stats (self, ports = None, sync_now = True): """ Return dictionary containing statistics information gathered from the server. @@ -1585,7 +1678,7 @@ class STLClient(object): ports = ports if ports is not None else self.get_all_ports() ports = self._validate_port_list(ports) - return [self.ports[port_id].get_info() for port_id in ports] + return [self.ports[port_id].get_formatted_info() for port_id in ports] ############################ Commands ############################# @@ -1700,6 +1793,9 @@ class STLClient(object): self.__release(ports) raise STLError(rc) + for port_id in ports: + if not self.ports[port_id].is_resolved(): + self.logger.log(format_text('*** Warning - Port {0} destination is unresolved ***'.format(port_id), 'bold')) @__api_check(True) def release (self, ports = None): @@ -1725,30 +1821,144 @@ class STLClient(object): if not rc: raise STLError(rc) + + @__api_check(True) - def ping(self): + def ping_rpc_server(self): """ - Pings the server + Pings the RPC server :parameters: - None - + None :raises: + :exc:`STLError` """ - + self.logger.pre_cmd("Pinging the server on '{0}' port '{1}': ".format(self.connection_info['server'], self.connection_info['sync_port'])) rc = self._transmit("ping", api_class = None) + + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + @__api_check(True) + def set_l2_mode (self, port, dst_mac): + """ + Sets the port mode to L2 + + :parameters: + port - the port to set the source address + dst_mac - destination MAC + :raises: + + :exc:`STLError` + """ + validate_type('port', port, int) + if port not in self.get_all_ports(): + raise STLError("port {0} is not a valid port id".format(port)) + + if not is_valid_mac(dst_mac): + raise STLError("dest_mac is not a valid MAC address: '{0}'".format(dst_mac)) + + self.logger.pre_cmd("Setting port {0} in L2 mode: ".format(port)) + rc = self.ports[port].set_l2_mode(dst_mac) self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + @__api_check(True) + def set_l3_mode (self, port, src_ipv4, dst_ipv4): + """ + Sets the port mode to L3 + :parameters: + port - the port to set the source address + src_ipv4 - IPv4 source address for the port + dst_ipv4 - IPv4 destination address + :raises: + + :exc:`STLError` + """ + + validate_type('port', port, int) + if port not in self.get_all_ports(): + raise STLError("port {0} is not a valid port id".format(port)) + + if not is_valid_ipv4(src_ipv4): + raise STLError("src_ipv4 is not a valid IPv4 address: '{0}'".format(src_ipv4)) + + if not is_valid_ipv4(dst_ipv4): + raise STLError("dst_ipv4 is not a valid IPv4 address: '{0}'".format(dst_ipv4)) + + self.logger.pre_cmd("Setting port {0} in L3 mode: ".format(port)) + rc = self.ports[port].set_l3_mode(src_ipv4, dst_ipv4) + self.logger.post_cmd(rc) + if not rc: raise STLError(rc) + + # try to resolve + with self.logger.supress(level = LoggerApi.VERBOSE_REGULAR_SYNC): + self.logger.pre_cmd("ARP resolving address '{0}': ".format(dst_ipv4)) + rc = self.ports[port].arp_resolve(0) + self.logger.post_cmd(rc) + if not rc: + raise STLError(rc) + @__api_check(True) + def ping_ip (self, src_port, dst_ipv4, pkt_size = 64, count = 5): + """ + Pings an IP address through a port + + :parameters: + src_port - on which port_id to send the ICMP PING request + dst_ipv4 - which IP to ping + pkt_size - packet size to use + count - how many times to ping + :raises: + + :exc:`STLError` + + """ + # validate src port + validate_type('src_port', src_port, int) + if src_port not in self.get_all_ports(): + raise STLError("src port is not a valid port id") + + if not is_valid_ipv4(dst_ipv4): + raise STLError("dst_ipv4 is not a valid IPv4 address: '{0}'".format(dst_ipv4)) + + if (pkt_size < 64) or (pkt_size > 9216): + raise STLError("pkt_size should be a value between 64 and 9216: '{0}'".format(pkt_size)) + + validate_type('count', count, int) + + self.logger.pre_cmd("Pinging {0} from port {1} with {2} bytes of data:".format(dst_ipv4, + src_port, + pkt_size)) + + # no async messages + with self.logger.supress(level = LoggerApi.VERBOSE_REGULAR_SYNC): + self.logger.log('') + for i in range(count): + rc = self.ports[src_port].ping(ping_ipv4 = dst_ipv4, pkt_size = pkt_size) + if not rc: + raise STLError(rc) + + self.logger.log(rc.data()) + + if i != (count - 1): + time.sleep(1) + + + + @__api_check(True) def server_shutdown (self, force = False): """ Sends the server a request for total shutdown @@ -1795,6 +2005,8 @@ class STLClient(object): if not rc: raise STLError(rc) + return rc.data() + @__api_check(True) def get_util_stats(self): """ @@ -1814,7 +2026,6 @@ class STLClient(object): @__api_check(True) def get_xstats(self, port_id): - print(port_id) """ Get extended stats of port: all the counters as dict. @@ -1833,7 +2044,7 @@ class STLClient(object): @__api_check(True) - def reset(self, ports = None): + def reset(self, ports = None, restart = False): """ Force acquire ports, stop the traffic, remove all streams and clear stats @@ -1841,7 +2052,9 @@ class STLClient(object): ports : list Ports on which to execute the command - + restart: bool + Restart the NICs (link down / up) + :raises: + :exc:`STLError` @@ -1851,12 +2064,34 @@ class STLClient(object): ports = ports if ports is not None else self.get_all_ports() ports = self._validate_port_list(ports) - # force take the port and ignore any streams on it - self.acquire(ports, force = True, sync_streams = False) - self.stop(ports, rx_delay_ms = 0) - self.remove_all_streams(ports) - self.clear_stats(ports) + + if restart: + self.logger.pre_cmd("Hard resetting ports {0}:".format(ports)) + else: + self.logger.pre_cmd("Resetting ports {0}:".format(ports)) + + + try: + with self.logger.supress(): + # force take the port and ignore any streams on it + self.acquire(ports, force = True, sync_streams = False) + self.stop(ports, rx_delay_ms = 0) + self.remove_all_streams(ports) + self.clear_stats(ports) + self.set_port_attr(ports, + promiscuous = False, + link_up = True if restart else None) + self.set_service_mode(ports, False) + self.remove_rx_sniffer(ports) + self.remove_rx_queue(ports) + + except STLError as e: + self.logger.post_cmd(False) + raise e + + self.logger.post_cmd(RC_OK()) + @__api_check(True) def remove_all_streams (self, ports = None): @@ -1995,7 +2230,28 @@ class STLClient(object): raise STLError(rc) - + # common checks for start API + def __pre_start_check (self, ports, force): + + # verify link status + ports_link_down = [port_id for port_id in ports if not self.ports[port_id].is_up()] + if ports_link_down and not force: + raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down) + + # verify ports are stopped or force stop them + active_ports = [port_id for port_id in ports if self.ports[port_id].is_active()] + if active_ports and not force: + raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports)) + + # warn if ports are not resolved + unresolved_ports = [port_id for port_id in ports if not self.ports[port_id].is_resolved()] + if unresolved_ports and not force: + raise STLError("Port(s) {0} have unresolved destination addresses - please resolve them or specify 'force'".format(unresolved_ports)) + + if self.get_service_enabled_ports() and not force: + raise STLError("Port(s) {0} are under service mode - please disable service mode or specify 'force'".format(self.get_service_enabled_ports())) + + @__api_check(True) def start (self, ports = None, @@ -2050,11 +2306,10 @@ class STLClient(object): validate_type('total', total, bool) validate_type('core_mask', core_mask, (int, list)) - # verify link status - ports_link_down = [port_id for port_id in ports if self.ports[port_id].attr.get('link',{}).get('up') == False] - if not force and ports_link_down: - raise STLError("Port(s) %s - link DOWN - check the connection or specify 'force'" % ports_link_down) - + + # some sanity checks before attempting start + self.__pre_start_check(ports, force) + ######################### # decode core mask argument decoded_mask = self.__decode_core_mask(ports, core_mask) @@ -2068,17 +2323,12 @@ class STLClient(object): raise STLArgumentError('mult', mult) - # verify ports are stopped or force stop them + # stop active ports if needed active_ports = list(set(self.get_active_ports()).intersection(ports)) - if active_ports: - if not force: - raise STLError("Port(s) {0} are active - please stop them or specify 'force'".format(active_ports)) - else: - rc = self.stop(active_ports) - if not rc: - raise STLError(rc) - + if active_ports and force: + self.stop(active_ports) + # start traffic self.logger.pre_cmd("Starting traffic on port(s) {0}:".format(ports)) rc = self.__start(mult_obj, duration, ports, force, decoded_mask) @@ -2115,7 +2365,7 @@ class STLClient(object): return ports = self._validate_port_list(ports) - + self.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports)) rc = self.__stop(ports) self.logger.post_cmd(rc) @@ -2245,7 +2495,8 @@ class STLClient(object): speedup = 1.0, count = 1, duration = -1, - is_dual = False): + is_dual = False, + min_ipg_usec = None): """ Push a remote server-reachable PCAP file the path must be fullpath accessible to the server @@ -2258,7 +2509,8 @@ class STLClient(object): Ports on which to execute the command ipg_usec : float - Inter-packet gap in microseconds + Inter-packet gap in microseconds. + Exclusive with min_ipg_usec speedup : float A factor to adjust IPG. effectively IPG = IPG / speedup @@ -2275,6 +2527,10 @@ class STLClient(object): also requires that all the ports will be in master mode with their adjacent ports as slaves + min_ipg_usec : float + Minimum inter-packet gap in microseconds to guard from too small ipg. + Exclusive with ipg_usec + :raises: + :exc:`STLError` @@ -2288,6 +2544,7 @@ class STLClient(object): validate_type('count', count, int) validate_type('duration', duration, (float, int)) validate_type('is_dual', is_dual, bool) + validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None))) # for dual mode check that all are masters if is_dual: @@ -2306,7 +2563,7 @@ class STLClient(object): self.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports)) - rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual) + rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec) self.logger.post_cmd(rc) if not rc: @@ -2324,7 +2581,8 @@ class STLClient(object): force = False, vm = None, packet_hook = None, - is_dual = False): + is_dual = False, + min_ipg_usec = None): """ Push a local PCAP to the server This is equivalent to loading a PCAP file to a profile @@ -2340,7 +2598,8 @@ class STLClient(object): Ports on which to execute the command ipg_usec : float - Inter-packet gap in microseconds + Inter-packet gap in microseconds. + Exclusive with min_ipg_usec speedup : float A factor to adjust IPG. effectively IPG = IPG / speedup @@ -2366,6 +2625,10 @@ class STLClient(object): also requires that all the ports will be in master mode with their adjacent ports as slaves + min_ipg_usec : float + Minimum inter-packet gap in microseconds to guard from too small ipg. + Exclusive with ipg_usec + :raises: + :exc:`STLError` @@ -2380,6 +2643,9 @@ class STLClient(object): validate_type('duration', duration, (float, int)) validate_type('vm', vm, (list, type(None))) validate_type('is_dual', is_dual, bool) + validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None))) + if all([ipg_usec, min_ipg_usec]): + raise STLError('Please specify either ipg or minimal ipg, not both.') # no support for > 1MB PCAP - use push remote @@ -2392,7 +2658,7 @@ class STLClient(object): slave = port ^ 0x1 if slave in ports: - raise STLError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave)) + raise STLError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave)) if not slave in self.get_acquired_ports(): raise STLError("dual mode: adjacent port {0} must be owned during dual mode".format(slave)) @@ -2408,7 +2674,8 @@ class STLClient(object): speedup, count, vm = vm, - packet_hook = packet_hook) + packet_hook = packet_hook, + min_ipg_usec = min_ipg_usec) self.logger.post_cmd(RC_OK) except STLError as e: self.logger.post_cmd(RC_ERR(e)) @@ -2433,7 +2700,8 @@ class STLClient(object): count, vm = vm, packet_hook = packet_hook, - split_mode = split_mode) + split_mode = split_mode, + min_ipg_usec = min_ipg_usec) self.logger.post_cmd(RC_OK()) @@ -2441,7 +2709,7 @@ class STLClient(object): self.logger.post_cmd(RC_ERR(e)) raise - all_ports = ports + [p ^ 0x1 for p in ports] + all_ports = ports + [p ^ 0x1 for p in ports if profile_b] self.remove_all_streams(ports = all_ports) @@ -2450,7 +2718,8 @@ class STLClient(object): slave = port ^ 0x1 self.add_streams(profile_a.get_streams(), master) - self.add_streams(profile_b.get_streams(), slave) + if profile_b: + self.add_streams(profile_b.get_streams(), slave) return self.start(ports = all_ports, duration = duration) @@ -2612,7 +2881,7 @@ class STLClient(object): while set(self.get_active_ports()).intersection(ports): # make sure ASYNC thread is still alive - otherwise we will be stuck forever - if not self.async_client.is_thread_alive(): + if not self.async_client.is_active(): raise STLError("subscriber thread is dead") time.sleep(0.01) @@ -2626,16 +2895,22 @@ class STLClient(object): @__api_check(True) - def set_port_attr (self, ports = None, promiscuous = None, link_up = None, led_on = None, flow_ctrl = None): + def set_port_attr (self, + ports = None, + promiscuous = None, + link_up = None, + led_on = None, + flow_ctrl = None, + resolve = True): """ Set port attributes :parameters: - promiscuous - True or False - link_up - True or False - led_on - True or False - flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable - + promiscuous - True or False + link_up - True or False + led_on - True or False + flow_ctrl - 0: disable all, 1: enable tx side, 2: enable rx side, 3: full enable + resolve - if true, in case a destination address is configured as IPv4 try to resolve it :raises: + :exe:'STLError' @@ -2649,29 +2924,221 @@ class STLClient(object): validate_type('link_up', link_up, (bool, type(None))) validate_type('led_on', led_on, (bool, type(None))) validate_type('flow_ctrl', flow_ctrl, (int, type(None))) + + # common attributes for all ports + cmn_attr_dict = {} - # build attributes - attr_dict = {} - if promiscuous is not None: - attr_dict['promiscuous'] = {'enabled': promiscuous} - if link_up is not None: - attr_dict['link_status'] = {'up': link_up} - if led_on is not None: - attr_dict['led_status'] = {'on': led_on} - if flow_ctrl is not None: - attr_dict['flow_ctrl_mode'] = {'mode': flow_ctrl} + cmn_attr_dict['promiscuous'] = promiscuous + cmn_attr_dict['link_status'] = link_up + cmn_attr_dict['led_status'] = led_on + cmn_attr_dict['flow_ctrl_mode'] = flow_ctrl - # no attributes to set - if not attr_dict: - return - + # each port starts with a set of the common attributes + attr_dict = [dict(cmn_attr_dict) for _ in ports] + self.logger.pre_cmd("Applying attributes on port(s) {0}:".format(ports)) rc = self.__set_port_attr(ports, attr_dict) self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + + + @__api_check(True) + def set_service_mode (self, ports = None, enabled = True): + """ + Set service mode for port(s) + In service mode ports will respond to ARP, PING and etc. + + :parameters: + ports - for which ports to configure service mode on/off + enabled - True for activating service mode, False for disabling + :raises: + + :exe:'STLError' + + """ + # by default take all acquired ports + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + if enabled: + self.logger.pre_cmd('Enabling service mode on port(s) {0}:'.format(ports)) + else: + self.logger.pre_cmd('Disabling service mode on port(s) {0}:'.format(ports)) + + rc = self.__set_service_mode(ports, enabled) + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + @__api_check(True) + def resolve (self, ports = None, retries = 0, verbose = True): + """ + Resolves ports (ARP resolution) + + :parameters: + ports - for which ports to apply a unique sniffer (each port gets a unique file) + retires - how many times to retry on each port (intervals of 100 milliseconds) + verbose - log for each request the response + :raises: + + :exe:'STLError' + + """ + # by default - resolve all the ports that are configured with IPv4 dest + ports = ports if ports is not None else self.get_resolvable_ports() + ports = self._validate_port_list(ports) + + self.logger.pre_cmd('Resolving destination on port(s) {0}:'.format(ports)) + with self.logger.supress(): + rc = self.__resolve(ports, retries) + + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + # print the ARP transaction + if verbose: + self.logger.log(rc) + self.logger.log('') + + + + @__api_check(True) + def set_rx_sniffer (self, ports = None, base_filename = 'rx.pcap', limit = 1000): + """ + Sets a RX sniffer for port(s) written to a PCAP file + + :parameters: + ports - for which ports to apply a unique sniffer (each port gets a unique file) + base_filename - filename will be appended with '-<port_number>', e.g. rx.pcap --> rx-0.pcap, rx-1.pcap etc. + limit - limit how many packets will be written + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + # check arguments + validate_type('base_filename', base_filename, basestring) + validate_type('limit', limit, (int)) + if limit <= 0: + raise STLError("'limit' must be a positive value") + + self.logger.pre_cmd("Setting RX sniffers on port(s) {0}:".format(ports)) + rc = self.__set_rx_sniffer(ports, base_filename, limit) + self.logger.post_cmd(rc) + if not rc: raise STLError(rc) + + + @__api_check(True) + def remove_rx_sniffer (self, ports = None): + """ + Removes RX sniffer from port(s) + + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + self.logger.pre_cmd("Removing RX sniffers on port(s) {0}:".format(ports)) + rc = self.__remove_rx_sniffer(ports) + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + @__api_check(True) + def set_rx_queue (self, ports = None, size = 1000): + """ + Sets RX queue for port(s) + The queue is cyclic and will hold last 'size' packets + + :parameters: + ports - for which ports to apply a queue + size - size of the queue + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + # check arguments + validate_type('size', size, (int)) + if size <= 0: + raise STLError("'size' must be a positive value") + + self.logger.pre_cmd("Setting RX queue on port(s) {0}:".format(ports)) + rc = self.__set_rx_queue(ports, size) + self.logger.post_cmd(rc) + + + if not rc: + raise STLError(rc) + + + + @__api_check(True) + def remove_rx_queue (self, ports = None): + """ + Removes RX queue from port(s) + + :parameters: + ports - for which ports to remove the RX queue + :raises: + + :exe:'STLError' + + """ + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + self.logger.pre_cmd("Removing RX queue on port(s) {0}:".format(ports)) + rc = self.__remove_rx_queue(ports) + self.logger.post_cmd(rc) + + if not rc: + raise STLError(rc) + + + + @__api_check(True) + def get_rx_queue_pkts (self, ports = None): + """ + Returns any packets queued on the RX side by the server + return value is a dictonary per port + + :parameters: + ports - for which ports to fetch + """ + + ports = ports if ports is not None else self.get_acquired_ports() + ports = self._validate_port_list(ports) + + rc = self.__get_rx_queue_pkts(ports) + if not rc: + raise STLError(rc) + + # decode the data back to the user + result = {} + for port, r in zip(ports, rc.data()): + result[port] = r + + return result + + def clear_events (self): """ Clear all events @@ -2701,7 +3168,7 @@ class STLClient(object): try: rc = f(*args) except STLError as e: - client.logger.log("Log:\n" + format_text(e.brief() + "\n", 'bold')) + client.logger.log("\nAction has failed with the following error:\n" + format_text(e.brief() + "\n", 'bold')) return RC_ERR(e.brief()) # if got true - print time @@ -2715,10 +3182,30 @@ class STLClient(object): @__console def ping_line (self, line): - '''pings the server''' - self.ping() - return RC_OK() + '''pings the server / specific IP''' + + # no parameters - so ping server + if not line: + self.ping_rpc_server() + return True + + parser = parsing_opts.gen_parser(self, + "ping", + self.ping_line.__doc__, + parsing_opts.SINGLE_PORT, + parsing_opts.PING_IPV4, + parsing_opts.PKT_SIZE, + parsing_opts.PING_COUNT) + opts = parser.parse_args(line.split()) + if not opts: + return opts + + # IP ping + # source ports maps to ports as a single port + self.ping_ip(opts.ports[0], opts.ping_ipv4, opts.pkt_size, opts.count) + + @__console def shutdown_line (self, line): '''shutdown the server''' @@ -2846,13 +3333,14 @@ class STLClient(object): parser = parsing_opts.gen_parser(self, "reset", self.reset_line.__doc__, - parsing_opts.PORT_LIST_WITH_ALL) + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.PORT_RESTART) opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) if not opts: return opts - self.reset(ports = opts.ports) + self.reset(ports = opts.ports, restart = opts.restart) return RC_OK() @@ -2888,14 +3376,19 @@ class STLClient(object): # just for sanity - will be checked on the API as well self.__decode_core_mask(opts.ports, core_mask) + # for better use experience - check this first + try: + self.__pre_start_check(opts.ports, opts.force) + except STLError as e: + msg = e.brief() + self.logger.log(format_text(msg, 'bold')) + return RC_ERR(msg) + + + # stop ports if needed active_ports = list_intersect(self.get_active_ports(), opts.ports) - if active_ports: - if not opts.force: - msg = "Port(s) {0} are active - please stop them or add '--force'\n".format(active_ports) - self.logger.log(format_text(msg, 'bold')) - return RC_ERR(msg) - else: - self.stop(active_ports) + if active_ports and opts.force: + self.stop(active_ports) # process tunables @@ -2920,9 +3413,8 @@ class STLClient(object): self.add_streams(profile.get_streams(), ports = port) except STLError as e: - error = 'Unknown error.' - for line in e.brief().split('\n'): - if line: + for line in e.brief().splitlines(): + if ansi_len(line.strip()): error = line msg = format_text("\nError loading profile '{0}'".format(opts.file[0]), 'bold') self.logger.log(msg + '\n') @@ -3171,21 +3663,29 @@ class STLClient(object): @__console def push_line (self, line): '''Push a pcap file ''' + args = [self, + "push", + self.push_line.__doc__, + parsing_opts.REMOTE_FILE, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.COUNT, + parsing_opts.DURATION, + parsing_opts.IPG, + parsing_opts.MIN_IPG, + parsing_opts.SPEEDUP, + parsing_opts.FORCE, + parsing_opts.DUAL] + + parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK])) + opts = parser.parse_args(line.split(), verify_acquired = True) - parser = parsing_opts.gen_parser(self, - "push", - self.push_line.__doc__, - parsing_opts.FILE_PATH, - parsing_opts.REMOTE_FILE, - parsing_opts.PORT_LIST_WITH_ALL, - parsing_opts.COUNT, - parsing_opts.DURATION, - parsing_opts.IPG, - parsing_opts.SPEEDUP, - parsing_opts.FORCE, - parsing_opts.DUAL) + if not opts: + return opts + + if not opts.remote: + parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH])) + opts = parser.parse_args(line.split(), verify_acquired = True) - opts = parser.parse_args(line.split(), verify_acquired = True) if not opts: return opts @@ -3202,22 +3702,24 @@ class STLClient(object): if opts.remote: self.push_remote(opts.file[0], - ports = opts.ports, - ipg_usec = opts.ipg_usec, - speedup = opts.speedup, - count = opts.count, - duration = opts.duration, - is_dual = opts.dual) + ports = opts.ports, + ipg_usec = opts.ipg_usec, + min_ipg_usec = opts.min_ipg_usec, + speedup = opts.speedup, + count = opts.count, + duration = opts.duration, + is_dual = opts.dual) else: self.push_pcap(opts.file[0], - ports = opts.ports, - ipg_usec = opts.ipg_usec, - speedup = opts.speedup, - count = opts.count, - duration = opts.duration, - force = opts.force, - is_dual = opts.dual) + ports = opts.ports, + ipg_usec = opts.ipg_usec, + min_ipg_usec = opts.min_ipg_usec, + speedup = opts.speedup, + count = opts.count, + duration = opts.duration, + force = opts.force, + is_dual = opts.dual) @@ -3230,7 +3732,7 @@ class STLClient(object): '''Sets port attributes ''' parser = parsing_opts.gen_parser(self, - "port_attr", + "portattr", self.set_port_attr_line.__doc__, parsing_opts.PORT_LIST_WITH_ALL, parsing_opts.PROMISCUOUS, @@ -3244,18 +3746,18 @@ class STLClient(object): if not opts: return opts - opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom) - opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link) - opts.led = parsing_opts.ON_OFF_DICT.get(opts.led) - opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl) + opts.prom = parsing_opts.ON_OFF_DICT.get(opts.prom) + opts.link = parsing_opts.UP_DOWN_DICT.get(opts.link) + opts.led = parsing_opts.ON_OFF_DICT.get(opts.led) + opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl) # if no attributes - fall back to printing the status - if not filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp]): + if not list(filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp])): self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports))) return if opts.supp: - info = self.ports[0].get_info() # assume for now all ports are same + info = self.ports[0].get_formatted_info() # assume for now all ports are same print('') print('Supported attributes for current NICs:') print(' Promiscuous: yes') @@ -3264,15 +3766,115 @@ class STLClient(object): print(' Flow control: %s' % info['fc_supported']) print('') else: - return self.set_port_attr(opts.ports, opts.prom, opts.link, opts.led, opts.flow_ctrl) + self.set_port_attr(opts.ports, + opts.prom, + opts.link, + opts.led, + opts.flow_ctrl) + + + + + @__console + def set_rx_sniffer_line (self, line): + '''Sets a port sniffer on RX channel in form of a PCAP file''' + + parser = parsing_opts.gen_parser(self, + "set_rx_sniffer", + self.set_rx_sniffer_line.__doc__, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.OUTPUT_FILENAME, + parsing_opts.LIMIT) + + opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True) + if not opts: + return opts + + self.set_rx_sniffer(opts.ports, opts.output_filename, opts.limit) + + return RC_OK() + + + @__console + def resolve_line (self, line): + '''Performs a port ARP resolution''' + + parser = parsing_opts.gen_parser(self, + "resolve", + self.resolve_line.__doc__, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.RETRIES) + + opts = parser.parse_args(line.split(), default_ports = self.get_resolvable_ports(), verify_acquired = True) + if not opts: + return opts + + ports = list_intersect(opts.ports, self.get_resolvable_ports()) + if not ports: + if not opts.ports: + msg = 'resolve - no ports with IPv4 destination' + else: + msg = 'pause - none of ports {0} are configured with IPv4 destination'.format(opts.ports) + + self.logger.log(msg) + return RC_ERR(msg) + + self.resolve(ports = ports, retries = opts.retries) + + return RC_OK() + + + @__console + def set_l2_mode_line (self, line): + '''Configures a port in L2 mode''' + parser = parsing_opts.gen_parser(self, + "port", + self.set_l2_mode_line.__doc__, + parsing_opts.SINGLE_PORT, + parsing_opts.DST_MAC, + ) + opts = parser.parse_args(line.split()) + if not opts: + return opts + + + # source ports maps to ports as a single port + self.set_l2_mode(opts.ports[0], dst_mac = opts.dst_mac) + + return RC_OK() + + + @__console + def set_l3_mode_line (self, line): + '''Configures a port in L3 mode''' + + parser = parsing_opts.gen_parser(self, + "port", + self.set_l3_mode_line.__doc__, + parsing_opts.SINGLE_PORT, + parsing_opts.SRC_IPV4, + parsing_opts.DST_IPV4, + ) + + opts = parser.parse_args(line.split()) + if not opts: + return opts + + + # source ports maps to ports as a single port + self.set_l3_mode(opts.ports[0], src_ipv4 = opts.src_ipv4, dst_ipv4 = opts.dst_ipv4) + + return RC_OK() + + @__console def show_profile_line (self, line): '''Shows profile information''' parser = parsing_opts.gen_parser(self, - "port", + "profile", self.show_profile_line.__doc__, parsing_opts.FILE_PATH) @@ -3364,7 +3966,38 @@ class STLClient(object): return "{0}(read-only)>".format(prefix) elif self.is_all_ports_acquired(): - return "{0}>".format(prefix) + + p = prefix + + if self.get_service_enabled_ports(): + if self.get_service_enabled_ports() == self.get_acquired_ports(): + p += '(service)' + else: + p += '(service: {0})'.format(', '.join(map(str, self.get_service_enabled_ports()))) + + return "{0}>".format(p) else: - return "{0} {1}>".format(prefix, self.get_acquired_ports()) + return "{0} (ports: {1})>".format(prefix, ', '.join(map(str, self.get_acquired_ports()))) + + + + @__console + def service_line (self, line): + '''Configures port for service mode. + In service mode ports will reply to ARP, PING + and etc. + ''' + + parser = parsing_opts.gen_parser(self, + "service", + self.service_line.__doc__, + parsing_opts.PORT_LIST_WITH_ALL, + parsing_opts.SERVICE_OFF) + + opts = parser.parse_args(line.split()) + if not opts: + return opts + + self.set_service_mode(ports = opts.ports, enabled = opts.enabled) + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py index 1461fcec..72c9317a 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py @@ -32,13 +32,29 @@ class BatchMessage(object): id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False) self.batch_list.append(msg) - def invoke(self, block = False): + def invoke(self, block = False, chunk_size = 500000): if not self.rpc_client.connected: return RC_ERR("Not connected to server") - msg = json.dumps(self.batch_list) - - return self.rpc_client.send_msg(msg) + if chunk_size: + response_batch = RC() + size = 0 + new_batch = [] + for msg in self.batch_list: + size += len(json.dumps(msg)) + new_batch.append(msg) + if size > chunk_size: + batch_json = json.dumps(new_batch) + response_batch.add(self.rpc_client.send_msg(batch_json)) + size = 0 + new_batch = [] + if new_batch: + batch_json = json.dumps(new_batch) + response_batch.add(self.rpc_client.send_msg(batch_json)) + return response_batch + else: + batch_json = json.dumps(self.batch_list) + return self.rpc_client.send_msg(batch_json) # JSON RPC v2.0 client @@ -130,13 +146,13 @@ class JsonRpcClient(object): if self.zipper.check_threshold(buffer): response = self.send_raw_msg(self.zipper.compress(buffer)) - if response: - response = self.zipper.decompress(response) else: response = self.send_raw_msg(buffer) if not response: return response + elif self.zipper.is_compressed(response): + response = self.zipper.decompress(response) # return to string response = response.decode() @@ -172,6 +188,10 @@ class JsonRpcClient(object): self.disconnect() return RC_ERR("*** [RPC] - Failed to send message to server") + except KeyboardInterrupt as e: + # must restore the socket to a sane state + self.reconnect() + raise e tries = 0 while True: @@ -184,6 +204,10 @@ class JsonRpcClient(object): self.disconnect() return RC_ERR("*** [RPC] - Failed to get server response from {0}".format(self.transport)) + except KeyboardInterrupt as e: + # must restore the socket to a sane state + self.reconnect() + raise e return response @@ -255,11 +279,6 @@ class JsonRpcClient(object): self.connected = True - rc = self.invoke_rpc_method('ping', api_class = None) - if not rc: - self.connected = False - return rc - return RC_OK() @@ -267,12 +286,6 @@ class JsonRpcClient(object): # connect using current values return self.connect() - if not self.connected: - return RC_ERR("Not connected to server") - - # reconnect - return self.connect(self.server, self.port) - def is_connected(self): return self.connected diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py index dc06f9fb..6431b74a 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_packet_builder_scapy.py @@ -486,7 +486,7 @@ class CTRexScapyPktUtl(object): if f.name == field_name: return (l_offset+f.offset,f.get_size_bytes ()); - raise CTRexPacketBuildException(-11, "No layer %s-%d." % (name, save_cnt, field_name)); + raise CTRexPacketBuildException(-11, "No layer %s-%d." % (field_name, layer_cnt)) def get_layer_offet_by_str(self, layer_des): """ @@ -827,6 +827,7 @@ class STLVmFixChecksumHw(CTRexVmDescBase): self.l3_offset = l3_offset; # could be a name of offset self.l4_offset = l4_offset; # could be a name of offset self.l4_type = l4_type + self.l2_len = 0 def get_obj (self): @@ -838,8 +839,8 @@ class STLVmFixChecksumHw(CTRexVmDescBase): if type(self.l4_offset)==str: self.l4_offset = parent._pkt_layer_offset(self.l4_offset); - assert self.l4_offset >= self.l2_len+8, 'l4_offset should be higher than l3_offset offset' - self.l3_len = self.l4_offset - self.l2_len; + assert self.l4_offset >= self.l2_len+8, 'l4_offset should be higher than l3_offset offset' + self.l3_len = self.l4_offset - self.l2_len; class STLVmFixIpv4(CTRexVmDescBase): @@ -1084,58 +1085,58 @@ class STLVmWrMaskFlowVar(CTRexVmDescBase): class STLVmTrimPktSize(CTRexVmDescBase): - """ - Trim the packet size by the stream variable size. This instruction only changes the total packet size, and does not repair the fields to match the new size. + def __init__(self,fv_name): + """ + Trim the packet size by the stream variable size. This instruction only changes the total packet size, and does not repair the fields to match the new size. - :parameters: - fv_name : string - Stream variable name. The value of this variable is the new total packet size. + :parameters: + fv_name : string + Stream variable name. The value of this variable is the new total packet size. - For Example:: + For Example:: - def create_stream (self): - # pkt - p_l2 = Ether(); - p_l3 = IP(src="16.0.0.1",dst="48.0.0.1") - p_l4 = UDP(dport=12,sport=1025) - pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4)); - base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size)) - - l3_len_fix =-(len(p_l2)); - l4_len_fix =-(len(p_l2/p_l3)); - - - # vm - vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64, - max_value=len(base_pkt), - size=2, op="inc"), + def create_stream (self): + # pkt + p_l2 = Ether(); + p_l3 = IP(src="16.0.0.1",dst="48.0.0.1") + p_l4 = UDP(dport=12,sport=1025) + pyld_size = max(0, self.max_pkt_size_l3 - len(p_l3/p_l4)); + base_pkt = p_l2/p_l3/p_l4/('\x55'*(pyld_size)) - STLVmTrimPktSize("fv_rand"), # change total packet size <<< + l3_len_fix =-(len(p_l2)); + l4_len_fix =-(len(p_l2/p_l3)); - STLVmWrFlowVar(fv_name="fv_rand", - pkt_offset= "IP.len", - add_val=l3_len_fix), # fix ip len - STLVmFixIpv4(offset = "IP"), # fix checksum + # vm + vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64, + max_value=len(base_pkt), + size=2, op="inc"), - STLVmWrFlowVar(fv_name="fv_rand", - pkt_offset= "UDP.len", - add_val=l4_len_fix) # fix udp len - ] - ) - - pkt = STLPktBuilder(pkt = base_pkt, - vm = vm) - - return STLStream(packet = pkt, - mode = STLTXCont()) + STLVmTrimPktSize("fv_rand"), # change total packet size <<< + STLVmWrFlowVar(fv_name="fv_rand", + pkt_offset= "IP.len", + add_val=l3_len_fix), # fix ip len - """ + STLVmFixIpv4(offset = "IP"), # fix checksum + + STLVmWrFlowVar(fv_name="fv_rand", + pkt_offset= "UDP.len", + add_val=l4_len_fix) # fix udp len + ] + ) + + pkt = STLPktBuilder(pkt = base_pkt, + vm = vm) + + return STLStream(packet = pkt, + mode = STLTXCont()) + + + """ - def __init__(self,fv_name): super(STLVmTrimPktSize, self).__init__() self.name = fv_name validate_type('fv_name', fv_name, str) diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py index cec3761f..9eefc177 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py @@ -4,12 +4,14 @@ from collections import namedtuple, OrderedDict from .trex_stl_packet_builder_scapy import STLPktBuilder from .trex_stl_streams import STLStream from .trex_stl_types import * +from .trex_stl_rx_features import ARPResolver, PingResolver from . import trex_stl_stats from .utils.constants import FLOW_CTRL_DICT_REVERSED import base64 import copy from datetime import datetime, timedelta +import threading StreamOnPort = namedtuple('StreamOnPort', ['compiled_stream', 'metadata']) @@ -50,7 +52,9 @@ class Port(object): def __init__ (self, port_id, user, comm_link, session_id, info): self.port_id = port_id + self.state = self.STATE_IDLE + self.handler = None self.comm_link = comm_link self.transmit = comm_link.transmit @@ -62,7 +66,7 @@ class Port(object): self.streams = {} self.profile = None self.session_id = session_id - self.attr = {} + self.status = {} self.port_stats = trex_stl_stats.CPortStats(self) @@ -72,31 +76,31 @@ class Port(object): self.owner = '' self.last_factor_type = None - + + self.__attr = {} + self.attr_lock = threading.Lock() + # decorator to verify port is up def up(func): - def func_wrapper(*args): + def func_wrapper(*args, **kwargs): port = args[0] if not port.is_up(): return port.err("{0} - port is down".format(func.__name__)) - return func(*args) + return func(*args, **kwargs) return func_wrapper # owned def owned(func): - def func_wrapper(*args): + def func_wrapper(*args, **kwargs): port = args[0] - if not port.is_up(): - return port.err("{0} - port is down".format(func.__name__)) - if not port.is_acquired(): return port.err("{0} - port is not owned".format(func.__name__)) - return func(*args) + return func(*args, **kwargs) return func_wrapper @@ -106,14 +110,11 @@ class Port(object): def func_wrapper(*args, **kwargs): port = args[0] - if not port.is_up(): - return port.err("{0} - port is down".format(func.__name__)) - if not port.is_acquired(): return port.err("{0} - port is not owned".format(func.__name__)) if not port.is_writeable(): - return port.err("{0} - port is not in a writeable state".format(func.__name__)) + return port.err("{0} - port is active, please stop the port before executing command".format(func.__name__)) return func(*args, **kwargs) @@ -122,22 +123,22 @@ class Port(object): def err(self, msg): - return RC_ERR("port {0} : {1}\n".format(self.port_id, msg)) + return RC_ERR("port {0} : *** {1}".format(self.port_id, msg)) def ok(self, data = ""): return RC_OK(data) def get_speed_bps (self): - return (self.info['speed'] * 1000 * 1000 * 1000) + return (self.get_speed_gbps() * 1000 * 1000 * 1000) - def get_formatted_speed (self): - return "{0} Gbps".format(self.info['speed']) + def get_speed_gbps (self): + return self.__attr['speed'] def is_acquired(self): return (self.handler != None) def is_up (self): - return (self.state != self.STATE_DOWN) + return self.__attr['link']['up'] def is_active(self): return (self.state == self.STATE_TX ) or (self.state == self.STATE_PAUSE) or (self.state == self.STATE_PCAP_TX) @@ -165,7 +166,6 @@ class Port(object): # take the port - @up def acquire(self, force = False, sync_streams = True): params = {"port_id": self.port_id, "user": self.user, @@ -185,7 +185,6 @@ class Port(object): # sync all the streams with the server - @up def sync_streams (self): params = {"port_id": self.port_id} @@ -201,7 +200,6 @@ class Port(object): return self.ok() # release the port - @up def release(self): params = {"port_id": self.port_id, "handler": self.handler} @@ -219,7 +217,6 @@ class Port(object): - @up def sync(self): params = {"port_id": self.port_id} @@ -250,10 +247,10 @@ class Port(object): self.next_available_id = int(rc.data()['max_stream_id']) + 1 - # attributes - self.attr = rc.data()['attr'] - if 'speed' in rc.data(): - self.info['speed'] = rc.data()['speed'] // 1000 + self.status = rc.data() + + # replace the attributes in a thread safe manner + self.set_ts_attr(rc.data()['attr']) return self.ok() @@ -424,8 +421,8 @@ class Port(object): # save this for TUI self.last_factor_type = mul['type'] - - return self.ok() + + return rc # stop traffic @@ -445,8 +442,9 @@ class Port(object): return self.err(rc.err()) self.state = self.STATE_STREAMS + self.last_factor_type = None - + # timestamp for last tx self.tx_stopped_ts = datetime.now() @@ -487,6 +485,122 @@ class Port(object): return self.ok() + + @owned + def set_rx_sniffer (self, pcap_filename, limit): + + if not self.is_service_mode_on(): + return self.err('port service mode must be enabled for performing RX capturing. Please enable service mode') + + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "capture", + "enabled": True, + "pcap_filename": pcap_filename, + "limit": limit} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + + @owned + def remove_rx_sniffer (self): + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "capture", + "enabled": False} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + @writeable + def set_l2_mode (self, dst_mac): + if not self.is_service_mode_on(): + return self.err('port service mode must be enabled for configuring L2 mode. Please enable service mode') + + params = {"handler": self.handler, + "port_id": self.port_id, + "dst_mac": dst_mac} + + rc = self.transmit("set_l2", params) + if rc.bad(): + return self.err(rc.err()) + + return self.sync() + + + @writeable + def set_l3_mode (self, src_addr, dest_addr, resolved_mac = None): + if not self.is_service_mode_on(): + return self.err('port service mode must be enabled for configuring L3 mode. Please enable service mode') + + params = {"handler": self.handler, + "port_id": self.port_id, + "src_addr": src_addr, + "dst_addr": dest_addr} + + if resolved_mac: + params["resolved_mac"] = resolved_mac + + rc = self.transmit("set_l3", params) + if rc.bad(): + return self.err(rc.err()) + + return self.sync() + + + @owned + def set_rx_queue (self, size): + + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "queue", + "enabled": True, + "size": size} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + @owned + def remove_rx_queue (self): + params = {"handler": self.handler, + "port_id": self.port_id, + "type": "queue", + "enabled": False} + + rc = self.transmit("set_rx_feature", params) + if rc.bad(): + return self.err(rc.err()) + + return self.ok() + + @owned + def get_rx_queue_pkts (self): + params = {"handler": self.handler, + "port_id": self.port_id} + + rc = self.transmit("get_rx_queue_pkts", params) + if rc.bad(): + return self.err(rc.err()) + + pkts = rc.data()['pkts'] + + # decode the packets from base64 to binary + for i in range(len(pkts)): + pkts[i]['binary'] = base64.b64decode(pkts[i]['binary']) + + return RC_OK(pkts) + + @owned def pause (self): @@ -568,23 +682,60 @@ class Port(object): @owned - def set_attr (self, attr_dict): + def set_attr (self, **kwargs): + + json_attr = {} + + if kwargs.get('promiscuous') is not None: + json_attr['promiscuous'] = {'enabled': kwargs.get('promiscuous')} + + if kwargs.get('link_status') is not None: + json_attr['link_status'] = {'up': kwargs.get('link_status')} + + if kwargs.get('led_status') is not None: + json_attr['led_status'] = {'on': kwargs.get('led_status')} + + if kwargs.get('flow_ctrl_mode') is not None: + json_attr['flow_ctrl_mode'] = {'mode': kwargs.get('flow_ctrl_mode')} + + if kwargs.get('rx_filter_mode') is not None: + json_attr['rx_filter_mode'] = {'mode': kwargs.get('rx_filter_mode')} + params = {"handler": self.handler, "port_id": self.port_id, - "attr": attr_dict} + "attr": json_attr} rc = self.transmit("set_port_attr", params) if rc.bad(): return self.err(rc.err()) + # update the dictionary from the server explicitly + return self.sync() - #self.attr.update(attr_dict) - + + @owned + def set_service_mode (self, enabled): + rc = self.set_attr(rx_filter_mode = 'all' if enabled else 'hw') + if not rc: + return rc + + if not enabled: + rc = self.remove_rx_queue() + if not rc: + return rc + + rc = self.remove_rx_sniffer() + if not rc: + return rc + return self.ok() + def is_service_mode_on (self): + return self.get_rx_filter_mode() == 'all' + @writeable - def push_remote (self, pcap_filename, ipg_usec, speedup, count, duration, is_dual, slave_handler): + def push_remote (self, pcap_filename, ipg_usec, speedup, count, duration, is_dual, slave_handler, min_ipg_usec): params = {"handler": self.handler, "port_id": self.port_id, @@ -594,7 +745,8 @@ class Port(object): "count": count, "duration": duration, "is_dual": is_dual, - "slave_handler": slave_handler} + "slave_handler": slave_handler, + "min_ipg_usec": min_ipg_usec if min_ipg_usec else 0} rc = self.transmit("push_remote", params) if rc.bad(): @@ -607,7 +759,16 @@ class Port(object): def get_profile (self): return self.profile - + # invalidates the current ARP + def invalidate_arp (self): + dest = self.__attr['dest'] + + if dest['type'] != 'mac': + return self.set_attr(dest = dest['ipv4']) + else: + return self.ok() + + def print_profile (self, mult, duration): if not self.get_profile(): return @@ -648,24 +809,32 @@ class Port(object): format_time(exp_time_factor_sec))) print("\n") - # generate port info - def get_info (self): + # generate formatted (console friendly) port info + def get_formatted_info (self, sync = True): + + # sync the status + if sync: + self.sync() + + # get a copy of the current attribute set (safe against manipulation) + attr = self.get_ts_attr() + info = dict(self.info) info['status'] = self.get_port_state_name() - if 'link' in self.attr: - info['link'] = 'UP' if self.attr['link']['up'] else 'DOWN' + if 'link' in attr: + info['link'] = 'UP' if attr['link']['up'] else 'DOWN' else: info['link'] = 'N/A' - if 'fc' in self.attr: - info['fc'] = FLOW_CTRL_DICT_REVERSED.get(self.attr['fc']['mode'], 'N/A') + if 'fc' in attr: + info['fc'] = FLOW_CTRL_DICT_REVERSED.get(attr['fc']['mode'], 'N/A') else: info['fc'] = 'N/A' - if 'promiscuous' in self.attr: - info['prom'] = "on" if self.attr['promiscuous']['enabled'] else "off" + if 'promiscuous' in attr: + info['prom'] = "on" if attr['promiscuous']['enabled'] else "off" else: info['prom'] = "N/A" @@ -692,34 +861,139 @@ class Port(object): else: info['is_virtual'] = 'N/A' + # speed + info['speed'] = self.get_speed_gbps() + + # RX filter mode + info['rx_filter_mode'] = 'hardware match' if attr['rx_filter_mode'] == 'hw' else 'fetch all' + + # src MAC and IPv4 + info['src_mac'] = attr['src_mac'] + info['src_ipv4'] = attr['src_ipv4'] + + if info['src_ipv4'] is None: + info['src_ipv4'] = '-' + + # dest + dest = attr['dest'] + if dest['type'] == 'mac': + info['dest'] = dest['mac'] + info['arp'] = '-' + + elif dest['type'] == 'ipv4': + info['dest'] = dest['ipv4'] + info['arp'] = dest['arp'] + + elif dest['type'] == 'ipv4_u': + info['dest'] = dest['ipv4'] + info['arp'] = 'unresolved' + + + # RX info + rx_info = self.status['rx_info'] + + # RX sniffer + sniffer = rx_info['sniffer'] + info['rx_sniffer'] = '{0}\n[{1} / {2}]'.format(sniffer['pcap_filename'], sniffer['count'], sniffer['limit']) if sniffer['is_active'] else 'off' + + + # RX queue + queue = rx_info['queue'] + info['rx_queue'] = '[{0} / {1}]'.format(queue['count'], queue['size']) if queue['is_active'] else 'off' + + # Grat ARP + grat_arp = rx_info['grat_arp'] + if grat_arp['is_active']: + info['grat_arp'] = "every {0} seconds".format(grat_arp['interval_sec']) + else: + info['grat_arp'] = "off" + + return info def get_port_state_name(self): return self.STATES_MAP.get(self.state, "Unknown") + def get_src_addr (self): + src_mac = self.__attr['src_mac'] + src_ipv4 = self.__attr['src_ipv4'] + + return {'mac': src_mac, 'ipv4': src_ipv4} + + def get_rx_filter_mode (self): + return self.__attr['rx_filter_mode'] + + def get_dst_addr (self): + dest = self.__attr['dest'] + + if dest['type'] == 'mac': + return {'ipv4': None, 'mac': dest['mac']} + + elif dest['type'] == 'ipv4': + return {'ipv4': dest['ipv4'], 'mac': dest['arp']} + + elif dest['type'] == 'ipv4_u': + return {'ipv4': dest['ipv4'], 'mac': None} + + else: + assert(0) + + + # port is considered resolved if it's dest is either MAC or resolved IPv4 + def is_resolved (self): + return (self.get_dst_addr()['mac'] is not None) + + # return True if the port is valid for resolve (has an IPv4 address as dest) + def is_resolvable (self): + return (self.get_dst_addr()['ipv4'] is not None) + + @writeable + def arp_resolve (self, retries): + if not self.is_service_mode_on(): + return self.err('port service mode must be enabled for performing ARP resolution. Please enable service mode') + + return ARPResolver(self).resolve(retries) + + @writeable + def ping (self, ping_ipv4, pkt_size): + if not self.is_service_mode_on(): + return self.err('port service mode must be enabled for performing ping. Please enable service mode') + + return PingResolver(self, ping_ipv4, pkt_size).resolve() + + ################# stats handler ###################### def generate_port_stats(self): return self.port_stats.generate_stats() def generate_port_status(self): - info = self.get_info() + info = self.get_formatted_info() - return {"driver": info['driver'], - "description": info.get('description', 'N/A')[:18], - "HW src mac": info['hw_macaddr'], - "SW src mac": info['src_macaddr'], - "SW dst mac": info['dst_macaddr'], - "PCI Address": info['pci_addr'], - "NUMA Node": info['numa'], + return {"driver": info['driver'], + "description": info.get('description', 'N/A')[:18], + "src MAC": info['src_mac'], + "src IPv4": info['src_ipv4'], + "Destination": info['dest'], + "ARP Resolution": format_text("{0}".format(info['arp']), 'bold', 'red') if info['arp'] == 'unresolved' else info['arp'], + "PCI Address": info['pci_addr'], + "NUMA Node": info['numa'], "--": "", "---": "", - "link speed": "{speed} Gb/s".format(speed=info['speed']), + "----": "", + "-----": "", + "link speed": "%g Gb/s" % info['speed'], "port status": info['status'], "link status": info['link'], "promiscuous" : info['prom'], "flow ctrl" : info['fc'], + + "RX Filter Mode": info['rx_filter_mode'], + "RX Queueing": info['rx_queue'], + "RX sniffer": info['rx_sniffer'], + "Grat ARP": info['grat_arp'], + } def clear_stats(self): @@ -756,17 +1030,54 @@ class Port(object): return {"streams" : OrderedDict(sorted(data.items())) } - + ######## attributes are a complex type (dict) that might be manipulated through the async thread ############# + + # get in a thread safe manner a duplication of attributes + def get_ts_attr (self): + with self.attr_lock: + return dict(self.__attr) + + # set in a thread safe manner a new dict of attributes + def set_ts_attr (self, new_attr): + with self.attr_lock: + self.__attr = new_attr + + ################# events handler ###################### def async_event_port_job_done (self): # until thread is locked - order is important self.tx_stopped_ts = datetime.now() self.state = self.STATE_STREAMS + self.last_factor_type = None - def async_event_port_attr_changed (self, attr): - self.info['speed'] = attr['speed'] // 1000 - self.attr = attr + def async_event_port_attr_changed (self, new_attr): + + # get a thread safe duplicate + cur_attr = self.get_ts_attr() + + # check if anything changed + if new_attr == cur_attr: + return None + + # generate before + before = self.get_formatted_info(sync = False) + + # update + self.set_ts_attr(new_attr) + + # generate after + after = self.get_formatted_info(sync = False) + + # return diff + diff = {} + for key, new_value in after.items(): + old_value = before.get(key, 'N/A') + if new_value != old_value: + diff[key] = (old_value, new_value) + + return diff + # rest of the events are used for TUI / read only sessions def async_event_port_stopped (self): @@ -792,3 +1103,4 @@ class Port(object): def async_event_released (self): self.owner = '' + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py new file mode 100644 index 00000000..ec83de5d --- /dev/null +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_rx_features.py @@ -0,0 +1,255 @@ + +from .trex_stl_streams import STLStream, STLTXSingleBurst +from .trex_stl_packet_builder_scapy import STLPktBuilder + +from scapy.layers.l2 import Ether, ARP +from scapy.layers.inet import IP, ICMP + +import time + +# a generic abstract class for resolving using the server +class Resolver(object): + def __init__ (self, port, queue_size = 100): + self.port = port + + # code to execute before sending any request - return RC object + def pre_send (self): + raise NotImplementedError() + + # return a list of streams for request + def generate_request (self): + raise NotImplementedError() + + # return None for more packets otherwise RC object + def on_pkt_rx (self, pkt): + raise NotImplementedError() + + # return value in case of timeout + def on_timeout_err (self, retries): + raise NotImplementedError() + + ##################### API ###################### + def resolve (self, retries = 0): + + # first cleanup + rc = self.port.remove_all_streams() + if not rc: + return rc + + # call the specific class implementation + rc = self.pre_send() + if not rc: + return rc + + # start the iteration + try: + + # add the stream(s) + self.port.add_streams(self.generate_request()) + rc = self.port.set_rx_queue(size = 100) + if not rc: + return rc + + return self.resolve_wrapper(retries) + + finally: + # best effort restore + self.port.remove_rx_queue() + self.port.remove_all_streams() + + + # main resolve function + def resolve_wrapper (self, retries): + + # retry for 'retries' + index = 0 + while True: + rc = self.resolve_iteration() + if rc is not None: + return rc + + if index >= retries: + return self.on_timeout_err(retries) + + index += 1 + time.sleep(0.1) + + + + def resolve_iteration (self): + + mult = {'op': 'abs', 'type' : 'percentage', 'value': 100} + rc = self.port.start(mul = mult, force = False, duration = -1, mask = 0xffffffff) + if not rc: + return rc + + # save the start timestamp + self.start_ts = rc.data()['ts'] + + # block until traffic finishes + while self.port.is_active(): + time.sleep(0.01) + + return self.wait_for_rx_response() + + + def wait_for_rx_response (self): + + # we try to fetch response for 5 times + polling = 5 + + while polling > 0: + + # fetch the queue + rx_pkts = self.port.get_rx_queue_pkts() + + # might be an error + if not rx_pkts: + return rx_pkts + + # for each packet - examine it + for pkt in rx_pkts.data(): + rc = self.on_pkt_rx(pkt) + if rc is not None: + return rc + + if polling == 0: + return None + + polling -= 1 + time.sleep(0.1) + + + + + +class ARPResolver(Resolver): + def __init__ (self, port_id): + super(ARPResolver, self).__init__(port_id) + + # before resolve + def pre_send (self): + self.dst = self.port.get_dst_addr() + self.src = self.port.get_src_addr() + + if self.dst['ipv4'] is None: + return self.port.err("Port has a non-IPv4 destination: '{0}'".format(self.dst['mac'])) + + if self.src['ipv4'] is None: + return self.port.err('Port must have an IPv4 source address configured') + + # invalidate the current ARP resolution (if exists) + return self.port.invalidate_arp() + + + # return a list of streams for request + def generate_request (self): + + base_pkt = Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(psrc = self.src['ipv4'], pdst = self.dst['ipv4'], hwsrc = self.src['mac']) + s1 = STLStream( packet = STLPktBuilder(pkt = base_pkt), mode = STLTXSingleBurst(total_pkts = 1) ) + + return [s1] + + + # return None in case more packets are needed else the status rc + def on_pkt_rx (self, pkt): + scapy_pkt = Ether(pkt['binary']) + if not 'ARP' in scapy_pkt: + return None + + arp = scapy_pkt['ARP'] + + # check this is the right ARP (ARP reply with the address) + if (arp.op != 2) or (arp.psrc != self.dst['ipv4']): + return None + + + # update the port with L3 full configuration + rc = self.port.set_l3_mode(self.src['ipv4'], self.dst['ipv4'], arp.hwsrc) + if not rc: + return rc + + return self.port.ok('Port {0} - Recieved ARP reply from: {1}, hw: {2}'.format(self.port.port_id, arp.psrc, arp.hwsrc)) + + + def on_timeout_err (self, retries): + return self.port.err('failed to receive ARP response ({0} retries)'.format(retries)) + + + + + #################### ping resolver #################### + +class PingResolver(Resolver): + def __init__ (self, port, ping_ip, pkt_size): + super(PingResolver, self).__init__(port) + self.ping_ip = ping_ip + self.pkt_size = pkt_size + + def pre_send (self): + + self.src = self.port.get_src_addr() + self.dst = self.port.get_dst_addr() + + if self.src['ipv4'] is None: + return self.port.err('Ping - port does not have an IPv4 address configured') + + if self.dst['mac'] is None: + return self.port.err('Ping - port has an unresolved destination, cannot determine next hop MAC address') + + return self.port.ok() + + + # return a list of streams for request + def generate_request (self): + + base_pkt = Ether(dst = self.dst['mac'])/IP(src = self.src['ipv4'], dst = self.ping_ip)/ICMP(type = 8) + pad = max(0, self.pkt_size - len(base_pkt)) + + base_pkt = base_pkt / (pad * 'x') + + #base_pkt.show2() + s1 = STLStream( packet = STLPktBuilder(pkt = base_pkt), mode = STLTXSingleBurst(total_pkts = 1) ) + + self.base_pkt = base_pkt + + return [s1] + + # return None for more packets otherwise RC object + def on_pkt_rx (self, pkt): + scapy_pkt = Ether(pkt['binary']) + if not 'ICMP' in scapy_pkt: + return None + + ip = scapy_pkt['IP'] + if ip.dst != self.src['ipv4']: + return None + + icmp = scapy_pkt['ICMP'] + + dt = pkt['ts'] - self.start_ts + + # echo reply + if icmp.type == 0: + # check seq + if icmp.seq != self.base_pkt['ICMP'].seq: + return None + return self.port.ok('Reply from {0}: bytes={1}, time={2:.2f}ms, TTL={3}'.format(ip.src, len(pkt['binary']), dt * 1000, ip.ttl)) + + # unreachable + elif icmp.type == 3: + # check seq + if icmp.payload.seq != self.base_pkt['ICMP'].seq: + return None + return self.port.ok('Reply from {0}: Destination host unreachable'.format(icmp.src)) + + else: + # skip any other types + #scapy_pkt.show2() + return None + + + + # return the str of a timeout err + def on_timeout_err (self, retries): + return self.port.ok('Request timed out.') diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py index 9f601484..c08a0af8 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_stats.py @@ -670,12 +670,19 @@ class CTRexInfoGenerator(object): ("promiscuous", []), ("flow ctrl", []), ("--", []), - ("HW src mac", []), - ("SW src mac", []), - ("SW dst mac", []), + ("src IPv4", []), + ("src MAC", []), ("---", []), + ("Destination", []), + ("ARP Resolution", []), + ("----", []), ("PCI Address", []), ("NUMA Node", []), + ("-----", []), + ("RX Filter Mode", []), + ("RX Queueing", []), + ("RX sniffer", []), + ("Grat ARP", []), ] ) @@ -1103,13 +1110,7 @@ class CPortStats(CTRexStats): port_state = format_text(port_state, 'bold') if self._port_obj: - if 'link' in self._port_obj.attr: - if self._port_obj.attr.get('link', {}).get('up') == False: - link_state = format_text('DOWN', 'red', 'bold') - else: - link_state = 'UP' - else: - link_state = 'N/A' + link_state = 'UP' if self._port_obj.is_up() else format_text('DOWN', 'red', 'bold') else: link_state = '' @@ -1130,7 +1131,7 @@ class CPortStats(CTRexStats): return {"owner": owner, "state": "{0}".format(port_state), 'link': link_state, - "speed": self._port_obj.get_formatted_speed() if self._port_obj else '', + "speed": "%g Gb/s" % self._port_obj.get_speed_gbps() if self._port_obj else '', "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True), format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' , "--": " ", diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py index e63f9125..26613e56 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_streams.py @@ -987,7 +987,8 @@ class STLProfile(object): loop_count = 1, vm = None, packet_hook = None, - split_mode = None): + split_mode = None, + min_ipg_usec = None): """ Convert a pcap file with a number of packets to a list of connected streams. packet1->packet2->packet3 etc @@ -1017,6 +1018,9 @@ class STLProfile(object): used for dual mode can be 'MAC' or 'IP' + min_ipg_usec : float + Minumum inter packet gap in usec. Used to guard from too small IPGs. + :return: STLProfile """ @@ -1024,9 +1028,15 @@ class STLProfile(object): # check filename if not os.path.isfile(pcap_file): raise STLError("file '{0}' does not exists".format(pcap_file)) + if speedup <= 0: + raise STLError('Speedup should not be negative.') + if min_ipg_usec and min_ipg_usec < 0: + raise STLError('min_ipg_usec should not be negative.') + - # make sure IPG is not less than 1 usec - if ipg_usec is not None and ipg_usec < 0.001: + # make sure IPG is not less than 0.001 usec + if (ipg_usec is not None and (ipg_usec < 0.001 * speedup) and + (min_ipg_usec is None or min_ipg_usec < 0.001)): raise STLError("ipg_usec cannot be less than 0.001 usec: '{0}'".format(ipg_usec)) if loop_count < 0: @@ -1039,6 +1049,7 @@ class STLProfile(object): pkts = PCAPReader(pcap_file).read_all() return STLProfile.__pkts_to_streams(pkts, ipg_usec, + min_ipg_usec, speedup, loop_count, vm, @@ -1046,8 +1057,20 @@ class STLProfile(object): else: pkts_a, pkts_b = PCAPReader(pcap_file).read_all(split_mode = split_mode) + # swap the packets if a is empty, or the ts of first packet in b is earlier + if not pkts_a: + pkts_a, pkts_b = pkts_b, pkts_a + elif (ipg_usec is None) and pkts_b: + meta = pkts_a[0][1] + start_time_a = meta[0] * 1e6 + meta[1] + meta = pkts_b[0][1] + start_time_b = meta[0] * 1e6 + meta[1] + if start_time_b < start_time_a: + pkts_a, pkts_b = pkts_b, pkts_a + profile_a = STLProfile.__pkts_to_streams(pkts_a, ipg_usec, + min_ipg_usec, speedup, loop_count, vm, @@ -1056,6 +1079,7 @@ class STLProfile(object): profile_b = STLProfile.__pkts_to_streams(pkts_b, ipg_usec, + min_ipg_usec, speedup, loop_count, vm, @@ -1070,23 +1094,27 @@ class STLProfile(object): @staticmethod - def __pkts_to_streams (pkts, ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0): + def __pkts_to_streams (pkts, ipg_usec, min_ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0): streams = [] - - # 10 ms delay before starting the PCAP - last_ts_usec = -(start_delay_usec) - if packet_hook: pkts = [(packet_hook(cap), meta) for (cap, meta) in pkts] - for i, (cap, meta) in enumerate(pkts, start = 1): # IPG - if not provided, take from cap - if ipg_usec == None: - ts_usec = (meta[0] * 1e6 + meta[1]) / float(speedup) - else: - ts_usec = (ipg_usec * i) / float(speedup) + if ipg_usec is None: + packet_time = meta[0] * 1e6 + meta[1] + if i == 1: + prev_time = packet_time + isg = (packet_time - prev_time) / float(speedup) + if min_ipg_usec and isg < min_ipg_usec: + isg = min_ipg_usec + prev_time = packet_time + else: # user specified ipg + if min_ipg_usec: + isg = min_ipg_usec + else: + isg = ipg_usec / float(speedup) # handle last packet if i == len(pkts): @@ -1100,13 +1128,11 @@ class STLProfile(object): packet = STLPktBuilder(pkt_buffer = cap, vm = vm), mode = STLTXSingleBurst(total_pkts = 1, percentage = 100), self_start = True if (i == 1) else False, - isg = (ts_usec - last_ts_usec), # seconds to usec + isg = isg, # usec action_count = action_count, next = next)) - - last_ts_usec = ts_usec - + profile = STLProfile(streams) profile.meta = {'type': 'pcap'} diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py index aa6c4218..a60a7ede 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_types.py @@ -50,11 +50,25 @@ class RC(): return (e if len(e) != 1 else e[0]) def __str__ (self): - s = "" - for x in self.rc_list: - if x.data: - s += format_text("\n{0}".format(x.data), 'bold') - return s + if self.good(): + s = "" + for x in self.rc_list: + if x.data: + s += format_text("\n{0}".format(x.data), 'bold') + return s + else: + show_count = 10 + err_list = [] + err_count = 0 + for x in filter(len, listify(self.err())): + err_count += 1 + if len(err_list) < show_count: + err_list.append(format_text(x, 'bold')) + s = '\n' if len(err_list) > 1 else '' + if err_count > show_count: + s += format_text('Occurred %s errors, showing first %s:\n' % (err_count, show_count), 'bold') + s += '\n'.join(err_list) + return s def __iter__(self): return self.rc_list.__iter__() @@ -135,6 +149,12 @@ def validate_type(arg_name, arg, valid_types): else: raise STLError('validate_type: valid_types should be type or list or tuple of types') + +def validate_choice (arg_name, arg, choices): + if arg is not None and not arg in choices: + raise STLError("validate_choice: argument '{0}' can only be one of '{1}'".format(arg_name, choices)) + + # throws STLError if not exactly one argument is present def verify_exclusive_arg (args_list): if not (len(list(filter(lambda x: x is not None, args_list))) == 1): diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py index 72ee8972..cbbacb27 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/common.py @@ -3,6 +3,8 @@ import sys import string import random import time +import socket +import re try: import pwd @@ -86,3 +88,23 @@ class PassiveTimer(object): return (time.time() > self.expr_sec) +def is_valid_ipv4 (addr): + try: + socket.inet_pton(socket.AF_INET, addr) + return True + except (socket.error, TypeError): + return False + +def is_valid_mac (mac): + return bool(re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac.lower())) + +def list_remove_dup (l): + tmp = list() + + for x in l: + if not x in tmp: + tmp.append(x) + + return tmp + + diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py index 7eda8635..f5dab30c 100755 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/parsing_opts.py @@ -1,6 +1,6 @@ import argparse from collections import namedtuple, OrderedDict -from .common import list_intersect, list_difference +from .common import list_intersect, list_difference, is_valid_ipv4, is_valid_mac, list_remove_dup from .text_opts import format_text from ..trex_stl_types import * from .constants import ON_OFF_DICT, UP_DOWN_DICT, FLOW_CTRL_DICT @@ -14,56 +14,85 @@ ArgumentGroup = namedtuple('ArgumentGroup', ['type', 'args', 'options']) # list of available parsing options -MULTIPLIER = 1 -MULTIPLIER_STRICT = 2 -PORT_LIST = 3 -ALL_PORTS = 4 -PORT_LIST_WITH_ALL = 5 -FILE_PATH = 6 -FILE_FROM_DB = 7 -SERVER_IP = 8 -STREAM_FROM_PATH_OR_FILE = 9 -DURATION = 10 -FORCE = 11 -DRY_RUN = 12 -XTERM = 13 -TOTAL = 14 -FULL_OUTPUT = 15 -IPG = 16 -SPEEDUP = 17 -COUNT = 18 -PROMISCUOUS = 19 -LINK_STATUS = 20 -LED_STATUS = 21 -TUNABLES = 22 -REMOTE_FILE = 23 -LOCKED = 24 -PIN_CORES = 25 -CORE_MASK = 26 -DUAL = 27 -FLOW_CTRL = 28 -SUPPORTED = 29 - -GLOBAL_STATS = 50 -PORT_STATS = 51 -PORT_STATUS = 52 -STREAMS_STATS = 53 -STATS_MASK = 54 -CPU_STATS = 55 -MBUF_STATS = 56 -EXTENDED_STATS = 57 -EXTENDED_INC_ZERO_STATS = 58 - -STREAMS_MASK = 60 -CORE_MASK_GROUP = 61 - -# ALL_STREAMS = 61 -# STREAM_LIST_WITH_ALL = 62 +_constants = ''' + +MULTIPLIER +MULTIPLIER_STRICT +PORT_LIST +ALL_PORTS +PORT_LIST_WITH_ALL +FILE_PATH +FILE_FROM_DB +SERVER_IP +STREAM_FROM_PATH_OR_FILE +DURATION +FORCE +DRY_RUN +XTERM +TOTAL +FULL_OUTPUT +IPG +MIN_IPG +SPEEDUP +COUNT +PROMISCUOUS +LINK_STATUS +LED_STATUS +TUNABLES +REMOTE_FILE +LOCKED +PIN_CORES +CORE_MASK +DUAL +FLOW_CTRL +SUPPORTED +FILE_PATH_NO_CHECK + +OUTPUT_FILENAME +LIMIT +PORT_RESTART + +RETRIES + +SINGLE_PORT +DST_MAC + +PING_IPV4 +PING_COUNT +PKT_SIZE + +SERVICE_OFF + +SRC_IPV4 +DST_IPV4 + +GLOBAL_STATS +PORT_STATS +PORT_STATUS +STREAMS_STATS +STATS_MASK +CPU_STATS +MBUF_STATS +EXTENDED_STATS +EXTENDED_INC_ZERO_STATS + +STREAMS_MASK +CORE_MASK_GROUP + +# ALL_STREAMS +# STREAM_LIST_WITH_ALL +# list of ArgumentGroup types +MUTEX +''' + +for index, line in enumerate(_constants.splitlines()): + var = line.strip().split() + if not var or '#' in var[0]: + continue + exec('%s = %s' % (var[0], index)) -# list of ArgumentGroup types -MUTEX = 1 def check_negative(value): ivalue = int(value) @@ -217,8 +246,30 @@ def is_valid_file(filename): return filename +def check_ipv4_addr (ipv4_str): + if not is_valid_ipv4(ipv4_str): + raise argparse.ArgumentTypeError("invalid IPv4 address: '{0}'".format(ipv4_str)) + return ipv4_str + +def check_pkt_size (pkt_size): + try: + pkt_size = int(pkt_size) + except ValueError: + raise argparse.ArgumentTypeError("invalid packet size type: '{0}'".format(pkt_size)) + + if (pkt_size < 64) or (pkt_size > 9216): + raise argparse.ArgumentTypeError("invalid packet size: '{0}' - valid range is 64 to 9216".format(pkt_size)) + + return pkt_size + +def check_mac_addr (addr): + if not is_valid_mac(addr): + raise argparse.ArgumentTypeError("not a valid MAC address: '{0}'".format(addr)) + + return addr + def decode_tunables (tunable_str): tunables = {} @@ -273,6 +324,11 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'default': None, 'type': float}), + MIN_IPG: ArgumentPack(['--min-ipg'], + {'help': "Minimal IPG value in usec between packets. Used to guard from too small IPGs.", + 'dest': "min_ipg_usec", + 'default': None, + 'type': float}), SPEEDUP: ArgumentPack(['-s', '--speedup'], {'help': "Factor to accelerate the injection. effectively means IPG = IPG / SPEEDUP", @@ -303,6 +359,53 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'dest': 'flow_ctrl', 'choices': FLOW_CTRL_DICT}), + SRC_IPV4: ArgumentPack(['--src'], + {'help': 'Configure source IPv4 address', + 'dest': 'src_ipv4', + 'required': True, + 'type': check_ipv4_addr}), + + DST_IPV4: ArgumentPack(['--dst'], + {'help': 'Configure destination IPv4 address', + 'dest': 'dst_ipv4', + 'required': True, + 'type': check_ipv4_addr}), + + + DST_MAC: ArgumentPack(['--dst'], + {'help': 'Configure destination MAC address', + 'dest': 'dst_mac', + 'required': True, + 'type': check_mac_addr}), + + RETRIES: ArgumentPack(['-r', '--retries'], + {'help': 'retries count [default is zero]', + 'dest': 'retries', + 'default': 0, + 'type': int}), + + + OUTPUT_FILENAME: ArgumentPack(['-o', '--output'], + {'help': 'Output PCAP filename', + 'dest': 'output_filename', + 'default': None, + 'required': True, + 'type': str}), + + + PORT_RESTART: ArgumentPack(['-r', '--restart'], + {'help': 'hard restart port(s)', + 'dest': 'restart', + 'default': False, + 'action': 'store_true'}), + + LIMIT: ArgumentPack(['-l', '--limit'], + {'help': 'Limit the packet count to be written to the file', + 'dest': 'limit', + 'default': 1000, + 'type': int}), + + SUPPORTED: ArgumentPack(['--supp'], {'help': 'Show which attributes are supported by current NICs', 'default': None, @@ -325,6 +428,33 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'help': "A list of ports on which to apply the command", 'default': []}), + + SINGLE_PORT: ArgumentPack(['--port', '-p'], + {'dest':'ports', + 'type': int, + 'metavar': 'PORT', + 'help': 'source port for the action', + 'required': True}), + + PING_IPV4: ArgumentPack(['-d'], + {'help': 'which IPv4 to ping', + 'dest': 'ping_ipv4', + 'required': True, + 'type': check_ipv4_addr}), + + PING_COUNT: ArgumentPack(['-n', '--count'], + {'help': 'How many times to ping [default is 5]', + 'dest': 'count', + 'default': 5, + 'type': int}), + + PKT_SIZE: ArgumentPack(['-s'], + {'dest':'pkt_size', + 'help': 'packet size to use', + 'default': 64, + 'type': check_pkt_size}), + + ALL_PORTS: ArgumentPack(['-a'], {"action": "store_true", "dest": "all_ports", @@ -362,6 +492,14 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'type': is_valid_file, 'help': "File path to load"}), + FILE_PATH_NO_CHECK: ArgumentPack(['-f'], + {'metavar': 'FILE', + 'dest': 'file', + 'nargs': 1, + 'required': True, + 'type': str, + 'help': "File path to load"}), + FILE_FROM_DB: ArgumentPack(['--db'], {'metavar': 'LOADED_STREAM_PACK', 'help': "A stream pack which already loaded into console cache."}), @@ -447,11 +585,18 @@ OPTIONS_DB = {MULTIPLIER: ArgumentPack(['-m', '--multiplier'], 'default': None, 'help': "Core mask - only cores responding to the bit mask will be active"}), + SERVICE_OFF: ArgumentPack(['--off'], + {'action': 'store_false', + 'dest': 'enabled', + 'default': True, + 'help': 'Deactivates services on port(s)'}), + # advanced options PORT_LIST_WITH_ALL: ArgumentGroup(MUTEX, [PORT_LIST, ALL_PORTS], {'required': False}), + STREAM_FROM_PATH_OR_FILE: ArgumentGroup(MUTEX, [FILE_PATH, FILE_FROM_DB], {'required': True}), @@ -515,6 +660,8 @@ class CCmdArgParser(argparse.ArgumentParser): if not self.has_ports_cfg(opts): return opts + opts.ports = listify(opts.ports) + # if all ports are marked or if (getattr(opts, "all_ports", None) == True) or (getattr(opts, "ports", None) == []): if default_ports is None: @@ -522,10 +669,17 @@ class CCmdArgParser(argparse.ArgumentParser): else: opts.ports = default_ports + opts.ports = list_remove_dup(opts.ports) + # so maybe we have ports configured invalid_ports = list_difference(opts.ports, self.stateless_client.get_all_ports()) if invalid_ports: - msg = "{0}: port(s) {1} are not valid port IDs".format(self.cmd_name, invalid_ports) + + if len(invalid_ports) > 1: + msg = "{0}: port(s) {1} are not valid port IDs".format(self.cmd_name, invalid_ports) + else: + msg = "{0}: port {1} is not a valid port ID".format(self.cmd_name, invalid_ports[0]) + self.stateless_client.logger.log(format_text(msg, 'bold')) return RC_ERR(msg) diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py index bfb96950..63b05bf4 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/text_opts.py @@ -27,6 +27,9 @@ class TextCodesStripper: def strip (s): return re.sub(TextCodesStripper.pattern, '', s) +def clear_formatting(s): + return TextCodesStripper.strip(s) + def format_num (size, suffix = "", compact = True, opts = None): if opts is None: opts = () @@ -128,11 +131,13 @@ def yellow(text): def underline(text): return text_attribute(text, 'underline') - +# apply attribute on each non-empty line def text_attribute(text, attribute): - return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'], - txt=text, - stop=TEXT_CODES[attribute]['end']) + return '\n'.join(['{start}{txt}{end}'.format( + start = TEXT_CODES[attribute]['start'], + txt = line, + end = TEXT_CODES[attribute]['end']) + if line else '' for line in ('%s' % text).split('\n')]) FUNC_DICT = {'blue': blue, diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py index 397ada16..a2a47927 100644 --- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py +++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py @@ -6,7 +6,7 @@ class ZippedMsg: MSG_COMPRESS_THRESHOLD = 256 MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA - def check_threshold (self, msg): + def check_threshold(self, msg): return len(msg) >= self.MSG_COMPRESS_THRESHOLD def compress (self, msg): @@ -16,7 +16,7 @@ class ZippedMsg: return new_msg - def decompress (self, msg): + def decompress(self, msg): if len(msg) < 8: return None @@ -30,3 +30,15 @@ class ZippedMsg: return x + + def is_compressed(self, msg): + if len(msg) < 8: + return False + + t = struct.unpack(">II", msg[:8]) + if (t[0] != self.MSG_COMPRESS_HEADER_MAGIC): + return False + + return True + + diff --git a/scripts/dpdk_nic_bind.py b/scripts/dpdk_nic_bind.py index 36d123f1..e797666b 100755 --- a/scripts/dpdk_nic_bind.py +++ b/scripts/dpdk_nic_bind.py @@ -54,7 +54,11 @@ if needed_path not in PATH: # Each device within this is itself a dictionary of device properties devices = {} # list of supported DPDK drivers -dpdk_drivers = [ "igb_uio", "vfio-pci", "uio_pci_generic" ] +# , + +dpdk_and_kernel=[ "mlx5_core", "mlx5_ib" ] + +dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic" ] # command-line arg flags b_flag = None @@ -568,10 +572,15 @@ def show_status(): if not has_driver(d): no_drv.append(devices[d]) continue - if devices[d]["Driver_str"] in dpdk_drivers: + + if devices[d]["Driver_str"] in dpdk_and_kernel: dpdk_drv.append(devices[d]) - else: kernel_drv.append(devices[d]) + else: + if devices[d]["Driver_str"] in dpdk_drivers: + dpdk_drv.append(devices[d]) + else: + kernel_drv.append(devices[d]) # print each category separately, so we can clearly see what's used by DPDK display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \ @@ -617,7 +626,7 @@ def show_table(get_macs = True): get_nic_details() dpdk_drv = [] for d in devices.keys(): - if devices[d].get("Driver_str") in dpdk_drivers: + if devices[d].get("Driver_str") in (dpdk_drivers+dpdk_and_kernel): dpdk_drv.append(d) if get_macs: diff --git a/scripts/dpdk_setup_ports.py b/scripts/dpdk_setup_ports.py index f85dae5d..ce6d2b2f 100755 --- a/scripts/dpdk_setup_ports.py +++ b/scripts/dpdk_setup_ports.py @@ -14,6 +14,7 @@ import traceback from collections import defaultdict, OrderedDict from distutils.util import strtobool import getpass +import subprocess class ConfigCreator(object): mandatory_interface_fields = ['Slot_str', 'Device_str', 'NUMA'] @@ -41,51 +42,52 @@ class ConfigCreator(object): cores[core] = cores[core][:1] include_lcores = [int(x) for x in include_lcores] exclude_lcores = [int(x) for x in exclude_lcores] + self.has_zero_lcore = False + self.lcores_per_numa = {} + total_lcores = 0 for numa, cores in self.cpu_topology.items(): + self.lcores_per_numa[numa] = {'main': [], 'siblings': [], 'all': []} for core, lcores in cores.items(): - for lcore in copy.copy(lcores): + total_lcores += len(lcores) + for lcore in list(lcores): if include_lcores and lcore not in include_lcores: cores[core].remove(lcore) if exclude_lcores and lcore in exclude_lcores: cores[core].remove(lcore) if 0 in lcores: self.has_zero_lcore = True - cores[core].remove(0) - zero_lcore_numa = numa - zero_lcore_core = core - zero_lcore_siblings = cores[core] - if self.has_zero_lcore: - del self.cpu_topology[zero_lcore_numa][zero_lcore_core] - self.cpu_topology[zero_lcore_numa][zero_lcore_core] = zero_lcore_siblings + lcores.remove(0) + self.lcores_per_numa[numa]['siblings'].extend(lcores) + else: + self.lcores_per_numa[numa]['main'].extend(lcores[:1]) + self.lcores_per_numa[numa]['siblings'].extend(lcores[1:]) + self.lcores_per_numa[numa]['all'].extend(lcores) + for interface in self.interfaces: for mandatory_interface_field in ConfigCreator.mandatory_interface_fields: if mandatory_interface_field not in interface: raise DpdkSetup("Expected '%s' field in interface dictionary, got: %s" % (mandatory_interface_field, interface)) + Device_str = self._verify_devices_same_type(self.interfaces) if '40Gb' in Device_str: self.speed = 40 else: self.speed = 10 - lcores_per_numa = OrderedDict() - system_lcores = int(self.has_zero_lcore) - for numa, core in self.cpu_topology.items(): - for lcores in core.values(): - if numa not in lcores_per_numa: - lcores_per_numa[numa] = [] - lcores_per_numa[numa].extend(lcores) - system_lcores += len(lcores) - minimum_required_lcores = len(self.interfaces) / 2 + 2 - if system_lcores < minimum_required_lcores: + + minimum_required_lcores = len(self.interfaces) // 2 + 2 + if total_lcores < minimum_required_lcores: raise DpdkSetup('Your system should have at least %s cores for %s interfaces, and it has: %s.' % - (minimum_required_lcores, len(self.interfaces), system_lcores + (0 if self.has_zero_lcore else 1))) + (minimum_required_lcores, len(self.interfaces), total_lcores)) interfaces_per_numa = defaultdict(int) + for i in range(0, len(self.interfaces), 2): - if self.interfaces[i]['NUMA'] != self.interfaces[i+1]['NUMA'] and not ignore_numa: + numa = self.interfaces[i]['NUMA'] + if numa != self.interfaces[i+1]['NUMA'] and not ignore_numa: raise DpdkSetup('NUMA of each pair of interfaces should be the same. Got NUMA %s for client interface %s, NUMA %s for server interface %s' % - (self.interfaces[i]['NUMA'], self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str'])) - interfaces_per_numa[self.interfaces[i]['NUMA']] += 2 - self.lcores_per_numa = lcores_per_numa + (numa, self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str'])) + interfaces_per_numa[numa] += 2 + self.interfaces_per_numa = interfaces_per_numa self.prefix = prefix self.zmq_pub_port = zmq_pub_port @@ -153,16 +155,20 @@ class ConfigCreator(object): config_str += ' '*8 + 'src_mac: %s\n' % self.verify_mac(interface['src_mac']) if index % 2: config_str += '\n' # dual if barrier + if not self.ignore_numa: config_str += ' platform:\n' - if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores, 1 core per dual_if - lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores]) - config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop()) + if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores + lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores['all']]) + config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop(0)) config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool.pop(0) - lcores_per_dual_if = int(len(lcores_pool) / len(self.interfaces)) + lcores_per_dual_if = int(len(lcores_pool) * 2 / len(self.interfaces)) config_str += ' '*6 + 'dual_if:\n' for i in range(0, len(self.interfaces), 2): - lcores_for_this_dual_if = [str(lcores_pool.pop(0)) for _ in range(lcores_per_dual_if)] + lcores_for_this_dual_if = list(map(str, sorted(lcores_pool[:lcores_per_dual_if]))) + lcores_pool = lcores_pool[lcores_per_dual_if:] + if not lcores_for_this_dual_if: + raise DpdkSetup('lcores_for_this_dual_if is empty (internal bug, please report with details of setup)') config_str += ' '*8 + '- socket: 0\n' config_str += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if) else: @@ -170,26 +176,46 @@ class ConfigCreator(object): lcores_per_dual_if = 99 extra_lcores = 1 if self.has_zero_lcore else 2 # worst case 3 iterations, to ensure master and "rx" have cores left - while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores) for lcores in self.lcores_per_numa.values()]): + while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores['all']) for lcores in self.lcores_per_numa.values()]): lcores_per_dual_if -= 1 - for numa, cores in self.lcores_per_numa.items(): + for numa, lcores_dict in self.lcores_per_numa.items(): if not self.interfaces_per_numa[numa]: continue - lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(cores) / self.interfaces_per_numa[numa])) + lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(lcores_dict['all']) / self.interfaces_per_numa[numa])) lcores_pool = copy.deepcopy(self.lcores_per_numa) # first, allocate lcores for dual_if section dual_if_section = ' '*6 + 'dual_if:\n' for i in range(0, len(self.interfaces), 2): numa = self.interfaces[i]['NUMA'] dual_if_section += ' '*8 + '- socket: %s\n' % numa - lcores_for_this_dual_if = [str(lcores_pool[numa].pop(0)) for _ in range(lcores_per_dual_if)] + lcores_for_this_dual_if = lcores_pool[numa]['all'][:lcores_per_dual_if] + lcores_pool[numa]['all'] = lcores_pool[numa]['all'][lcores_per_dual_if:] + for lcore in lcores_for_this_dual_if: + if lcore in lcores_pool[numa]['main']: + lcores_pool[numa]['main'].remove(lcore) + elif lcore in lcores_pool[numa]['siblings']: + lcores_pool[numa]['siblings'].remove(lcore) + else: + raise DpdkSetup('lcore not in main nor in siblings list (internal bug, please report with details of setup)') if not lcores_for_this_dual_if: raise DpdkSetup('Not enough cores at NUMA %s. This NUMA has %s processing units and %s interfaces.' % (numa, len(self.lcores_per_numa[numa]), self.interfaces_per_numa[numa])) - dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if) + dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(list(map(str, sorted(lcores_for_this_dual_if)))) + # take the cores left to master and rx - lcores_pool_left = [lcore for lcores in lcores_pool.values() for lcore in lcores] - config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool_left.pop(0)) - config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool_left.pop(0) + mains_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['main']] + siblings_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['siblings']] + if mains_left: + rx_core = mains_left.pop(0) + else: + rx_core = siblings_left.pop(0) + if self.has_zero_lcore: + master_core = 0 + elif mains_left: + master_core = mains_left.pop(0) + else: + master_core = siblings_left.pop(0) + config_str += ' '*6 + 'master_thread_id: %s\n' % master_core + config_str += ' '*6 + 'latency_thread_id: %s\n' % rx_core # add the dual_if section config_str += dual_if_section @@ -227,6 +253,7 @@ class CIfMap: self.m_cfg_file =cfg_file; self.m_cfg_dict={}; self.m_devices={}; + self.m_is_mellanox_mode=False; def dump_error (self,err): s="""%s @@ -264,6 +291,94 @@ Other network devices s= self.dump_error (err) raise DpdkSetup(s) + def set_only_mellanox_nics(self): + self.m_is_mellanox_mode=True; + + def get_only_mellanox_nics(self): + return self.m_is_mellanox_mode + + + def read_pci (self,pci_id,reg_id): + out=subprocess.check_output(['setpci', '-s',pci_id, '%s.w' %(reg_id)]) + out=out.decode(errors='replace'); + return (out.strip()); + + def write_pci (self,pci_id,reg_id,val): + out=subprocess.check_output(['setpci','-s',pci_id, '%s.w=%s' %(reg_id,val)]) + out=out.decode(errors='replace'); + return (out.strip()); + + def tune_mlx5_device (self,pci_id): + # set PCIe Read to 1024 and not 512 ... need to add it to startup s + val=self.read_pci (pci_id,68) + if val[0]!='3': + val='3'+val[1:] + self.write_pci (pci_id,68,val) + assert(self.read_pci (pci_id,68)==val); + + def get_mtu_mlx5 (self,dev_id): + if len(dev_id)>0: + out=subprocess.check_output(['ifconfig', dev_id]) + out=out.decode(errors='replace'); + obj=re.search(r'MTU:(\d+)',out,flags=re.MULTILINE|re.DOTALL); + if obj: + return int(obj.group(1)); + else: + obj=re.search(r'mtu (\d+)',out,flags=re.MULTILINE|re.DOTALL); + if obj: + return int(obj.group(1)); + else: + return -1 + + def set_mtu_mlx5 (self,dev_id,new_mtu): + if len(dev_id)>0: + out=subprocess.check_output(['ifconfig', dev_id,'mtu',str(new_mtu)]) + out=out.decode(errors='replace'); + + + def set_max_mtu_mlx5_device(self,dev_id): + mtu=9*1024+22 + dev_mtu=self.get_mtu_mlx5 (dev_id); + if (dev_mtu>0) and (dev_mtu!=mtu): + self.set_mtu_mlx5(dev_id,mtu); + if self.get_mtu_mlx5(dev_id) != mtu: + print("Could not set MTU to %d" % mtu) + exit(-1); + + + def disable_flow_control_mlx5_device (self,dev_id): + + if len(dev_id)>0: + my_stderr = open("/dev/null","wb") + cmd ='ethtool -A '+dev_id + ' rx off tx off ' + subprocess.call(cmd, stdout=my_stderr,stderr=my_stderr, shell=True) + my_stderr.close(); + + def check_ofe_version (self): + ofed_info='/usr/bin/ofed_info' + ofed_ver= '-3.4-' + ofed_ver_show= '3.4-1' + + + if not os.path.isfile(ofed_info): + print("OFED %s is not installed on this setup" % ofed_info) + exit(-1); + + try: + out = subprocess.check_output([ofed_info]) + except Exception as e: + print("OFED %s can't run " % (ofed_info)) + exit(-1); + + lines=out.splitlines(); + + if len(lines)>1: + if not (ofed_ver in str(lines[0])): + print("installed OFED version is '%s' should be at least '%s' and up" % (lines[0],ofed_ver_show)) + exit(-1); + + + def load_config_file (self): fcfg=self.m_cfg_file @@ -299,15 +414,19 @@ Other network devices self.raise_error ('Error: port_limit should not be higher than number of interfaces in config file: %s\n' % fcfg) - def do_bind_one (self,key): - cmd='%s dpdk_nic_bind.py --bind=igb_uio %s ' % (sys.executable, key) + def do_bind_one (self,key,mellanox): + if mellanox: + drv="mlx5_core" + else: + drv="igb_uio" + + cmd='%s dpdk_nic_bind.py --bind=%s %s ' % (sys.executable, drv,key) print(cmd) res=os.system(cmd); if res!=0: raise DpdkSetup('') - def pci_name_to_full_name (self,pci_name): c='[0-9A-Fa-f]'; sp='[:]' @@ -330,7 +449,7 @@ Other network devices dpdk_nic_bind.get_nic_details() self.m_devices= dpdk_nic_bind.devices - def do_run (self): + def do_run (self,only_check_all_mlx=False): self.run_dpdk_lspci () if map_driver.dump_interfaces is None or (map_driver.dump_interfaces == [] and map_driver.parent_cfg): self.load_config_file() @@ -343,27 +462,74 @@ Other network devices if_list.append(dev['Slot']) if_list = list(map(self.pci_name_to_full_name, if_list)) + + + # check how many mellanox cards we have + Mellanox_cnt=0; for key in if_list: if key not in self.m_devices: err=" %s does not exist " %key; raise DpdkSetup(err) + if 'Vendor_str' not in self.m_devices[key]: + err=" %s does not have Vendor_str " %key; + raise DpdkSetup(err) - if 'Driver_str' in self.m_devices[key]: - if self.m_devices[key]['Driver_str'] not in dpdk_nic_bind.dpdk_drivers : - self.do_bind_one (key) + if self.m_devices[key]['Vendor_str'].find("Mellanox")>-1 : + Mellanox_cnt=Mellanox_cnt+1 + + + if not map_driver.dump_interfaces : + if ((Mellanox_cnt>0) and (Mellanox_cnt!= len(if_list))): + err=" All driver should be from one vendor. you have at least one driver from Mellanox but not all "; + raise DpdkSetup(err) + + + if not map_driver.dump_interfaces : + if Mellanox_cnt>0 : + self.set_only_mellanox_nics() + + if self.get_only_mellanox_nics(): + self.check_ofe_version () + for key in if_list: + pci_id=self.m_devices[key]['Slot_str'] + self.tune_mlx5_device (pci_id) + if 'Interface' in self.m_devices[key]: + dev_id=self.m_devices[key]['Interface'] + self.disable_flow_control_mlx5_device (dev_id) + self.set_max_mtu_mlx5_device(dev_id) + + + if only_check_all_mlx: + if Mellanox_cnt >0: + exit(1); else: - self.do_bind_one (key) + exit(0); - if if_list and map_driver.args.parent and dpdk_nic_bind.get_igb_uio_usage(): - pid = dpdk_nic_bind.get_pid_using_pci(if_list) - if pid: - cmdline = dpdk_nic_bind.read_pid_cmdline(pid) - print('Some or all of given interfaces are in use by following process:\npid: %s, cmd: %s' % (pid, cmdline)) - if not dpdk_nic_bind.confirm('Ignore and proceed (y/N):'): - sys.exit(1) + for key in if_list: + if key not in self.m_devices: + err=" %s does not exist " %key; + raise DpdkSetup(err) + + if 'Driver_str' in self.m_devices[key]: + if self.m_devices[key]['Driver_str'] not in (dpdk_nic_bind.dpdk_drivers+dpdk_nic_bind.dpdk_and_kernel) : + self.do_bind_one (key,(Mellanox_cnt>0)) + pass; else: - print('WARNING: Some other program is using DPDK driver.\nIf it is TRex and you did not configure it for dual run, current command will fail.') + self.do_bind_one (key,(Mellanox_cnt>0)) + pass; + + if (Mellanox_cnt==0): + # We are not in Mellanox case, we can do this check only in case of Intel (another process is running) + if if_list and map_driver.args.parent and (dpdk_nic_bind.get_igb_uio_usage()): + pid = dpdk_nic_bind.get_pid_using_pci(if_list) + if pid: + cmdline = dpdk_nic_bind.read_pid_cmdline(pid) + print('Some or all of given interfaces are in use by following process:\npid: %s, cmd: %s' % (pid, cmdline)) + if not dpdk_nic_bind.confirm('Ignore and proceed (y/N):'): + sys.exit(1) + else: + print('WARNING: Some other program is using DPDK driver.\nIf it is TRex and you did not configure it for dual run, current command will fail.') def do_return_to_linux(self): if not self.m_devices: @@ -684,7 +850,7 @@ To return to Linux the DPDK bound interfaces (for ifconfig etc.) sudo ./dpdk_set_ports.py -l To create TRex config file using interactive mode - sudo ./dpdk_set_ports.py -l + sudo ./dpdk_set_ports.py -i To create a default config file (example1) sudo ./dpdk_setup_ports.py -c 02:00.0 02:00.1 -o /etc/trex_cfg.yaml @@ -829,6 +995,8 @@ def main (): print(e) exit(-1) + + if __name__ == '__main__': main() diff --git a/scripts/dumy_libs/libibverbs.so.1 b/scripts/dumy_libs/libibverbs.so.1 Binary files differnew file mode 100644 index 00000000..bd93569d --- /dev/null +++ b/scripts/dumy_libs/libibverbs.so.1 diff --git a/scripts/dumy_libs/libnl-3.so.200 b/scripts/dumy_libs/libnl-3.so.200 Binary files differnew file mode 100644 index 00000000..9eaaac9d --- /dev/null +++ b/scripts/dumy_libs/libnl-3.so.200 diff --git a/scripts/dumy_libs/libnl-route-3.so.200 b/scripts/dumy_libs/libnl-route-3.so.200 Binary files differnew file mode 100644 index 00000000..9e1835f1 --- /dev/null +++ b/scripts/dumy_libs/libnl-route-3.so.200 diff --git a/scripts/exp/dns_ipv6_rxcheck-ex.erf b/scripts/exp/dns_ipv6_rxcheck-ex.erf Binary files differindex ebf95197..3984c0ef 100755 --- a/scripts/exp/dns_ipv6_rxcheck-ex.erf +++ b/scripts/exp/dns_ipv6_rxcheck-ex.erf diff --git a/scripts/exp/dns_ipv6_rxcheck.erf b/scripts/exp/dns_ipv6_rxcheck.erf Binary files differindex ebf95197..3984c0ef 100644 --- a/scripts/exp/dns_ipv6_rxcheck.erf +++ b/scripts/exp/dns_ipv6_rxcheck.erf diff --git a/scripts/exp/dns_rxcheck-ex.erf b/scripts/exp/dns_rxcheck-ex.erf Binary files differindex 21a610a6..a6135f34 100755 --- a/scripts/exp/dns_rxcheck-ex.erf +++ b/scripts/exp/dns_rxcheck-ex.erf diff --git a/scripts/exp/dns_rxcheck.erf b/scripts/exp/dns_rxcheck.erf Binary files differindex 21a610a6..a6135f34 100644 --- a/scripts/exp/dns_rxcheck.erf +++ b/scripts/exp/dns_rxcheck.erf diff --git a/scripts/exp/ipv6-0-ex.erf b/scripts/exp/ipv6-0-ex.erf Binary files differindex 21e0eabb..1e102856 100755 --- a/scripts/exp/ipv6-0-ex.erf +++ b/scripts/exp/ipv6-0-ex.erf diff --git a/scripts/exp/ipv6-0.erf b/scripts/exp/ipv6-0.erf Binary files differindex 21e0eabb..1e102856 100644 --- a/scripts/exp/ipv6-0.erf +++ b/scripts/exp/ipv6-0.erf diff --git a/scripts/exp/ipv6_vlan-0-ex.erf b/scripts/exp/ipv6_vlan-0-ex.erf Binary files differindex b97d760b..f7c82833 100755 --- a/scripts/exp/ipv6_vlan-0-ex.erf +++ b/scripts/exp/ipv6_vlan-0-ex.erf diff --git a/scripts/exp/ipv6_vlan-0.erf b/scripts/exp/ipv6_vlan-0.erf Binary files differindex b97d760b..f7c82833 100644 --- a/scripts/exp/ipv6_vlan-0.erf +++ b/scripts/exp/ipv6_vlan-0.erf diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf Binary files differindex 06ac6484..a35e9f47 100755 --- a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf +++ b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf Binary files differindex 06ac6484..a35e9f47 100644 --- a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf +++ b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf diff --git a/scripts/exp/rtsp_short1_rxcheck-ex.erf b/scripts/exp/rtsp_short1_rxcheck-ex.erf Binary files differindex cd120df9..cfc3726a 100755 --- a/scripts/exp/rtsp_short1_rxcheck-ex.erf +++ b/scripts/exp/rtsp_short1_rxcheck-ex.erf diff --git a/scripts/exp/rtsp_short1_rxcheck.erf b/scripts/exp/rtsp_short1_rxcheck.erf Binary files differindex cd120df9..cfc3726a 100644 --- a/scripts/exp/rtsp_short1_rxcheck.erf +++ b/scripts/exp/rtsp_short1_rxcheck.erf diff --git a/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/32bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python2/cel59/64bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python2/fedora18/64bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/32bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py index 86a9c5dc..692e0256 100644 --- a/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py +++ b/scripts/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/context.py @@ -5,7 +5,6 @@ # Distributed under the terms of the Modified BSD License. import atexit -import weakref from zmq.backend import Context as ContextBase from . import constants @@ -14,6 +13,14 @@ from .constants import ENOTSUP, ctx_opt_names from .socket import Socket from zmq.error import ZMQError +# notice when exiting, to avoid triggering term on exit +_exiting = False +def _notice_atexit(): + global _exiting + _exiting = True +atexit.register(_notice_atexit) + + from zmq.utils.interop import cast_int_addr @@ -25,7 +32,6 @@ class Context(ContextBase, AttributeSetter): sockopts = None _instance = None _shadow = False - _exiting = False def __init__(self, io_threads=1, **kwargs): super(Context, self).__init__(io_threads=io_threads, **kwargs) @@ -35,18 +41,10 @@ class Context(ContextBase, AttributeSetter): self._shadow = False self.sockopts = {} - self._exiting = False - if not self._shadow: - ctx_ref = weakref.ref(self) - def _notify_atexit(): - ctx = ctx_ref() - if ctx is not None: - ctx._exiting = True - atexit.register(_notify_atexit) def __del__(self): """deleting a Context should terminate it, without trying non-threadsafe destroy""" - if not self._shadow and not self._exiting: + if not self._shadow and not _exiting: self.term() def __enter__(self): diff --git a/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko b/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko Binary files differnew file mode 100644 index 00000000..a85b9add --- /dev/null +++ b/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko diff --git a/scripts/master_daemon.py b/scripts/master_daemon.py index e4a2fd1c..855358ff 100755 --- a/scripts/master_daemon.py +++ b/scripts/master_daemon.py @@ -33,12 +33,11 @@ def get_trex_path(): def update_trex(package_path = 'http://trex-tgn.cisco.com/trex/release/latest'): if not args.allow_update: raise Exception('Updating server not allowed') + file_name = 'trex_package.tar.gz' # getting new package if package_path.startswith('http'): - file_name = package_path.split('/')[-1] ret_code, stdout, stderr = run_command('wget %s -O %s' % (package_path, os.path.join(tmp_dir, file_name)), timeout = 600) else: - file_name = os.path.basename(package_path) ret_code, stdout, stderr = run_command('rsync -Lc %s %s' % (package_path, os.path.join(tmp_dir, file_name)), timeout = 300) if ret_code: raise Exception('Could not get requested package. Result: %s' % [ret_code, stdout, stderr]) @@ -163,6 +162,11 @@ def _check_path_under_current_or_temp(path): if getpass.getuser() != 'root': fail('Please run this program as root/with sudo') +pid = os.getpid() +ret = os.system('taskset -pc 0 %s' % pid) +if ret: + fail('Could not set self affinity to core zero.') + daemon_actions = OrderedDict([('start', 'start the daemon'), ('stop', 'exit the daemon process'), ('show', 'prompt the status of daemon process (running / not running)'), diff --git a/scripts/t-rex-64 b/scripts/t-rex-64 index d2388cfd..fc8318a5 100755 --- a/scripts/t-rex-64 +++ b/scripts/t-rex-64 @@ -25,6 +25,12 @@ done <<< "$($PYTHON dpdk_setup_ports.py --dump-pci-description)" cd $(dirname $0) export LD_LIBRARY_PATH=$PWD +#Add dummy lib in case we don't find it, e.g. there is no OFED installed +if ldd _$(basename $0) | grep "libibverbs.so" | grep -q "not found"; then +export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs +fi + + if [ -t 0 ] && [ -t 1 ]; then export is_tty=true saveterm="$(stty -g)" diff --git a/scripts/t-rex-64-debug-gdb b/scripts/t-rex-64-debug-gdb index 1e019407..83ab82e0 100755 --- a/scripts/t-rex-64-debug-gdb +++ b/scripts/t-rex-64-debug-gdb @@ -1,4 +1,11 @@ #! /bin/bash export LD_LIBRARY_PATH=`pwd` + +#Add dummy lib in case we don't find it, e.g. there is no OFED installed +if ldd _t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then +export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs +fi + + /usr/bin/gdb --args ./_t-rex-64-debug $@ diff --git a/scripts/t-rex-64-valgrind b/scripts/t-rex-64-valgrind new file mode 100755 index 00000000..d11491ba --- /dev/null +++ b/scripts/t-rex-64-valgrind @@ -0,0 +1,69 @@ +#! /bin/bash +if [ "$(id -u)" != 0 ]; then + echo 'Error: Please run as root (sudo etc.)' + exit -1 +fi + +INPUT_ARGS=${@//[]/-} # replace bizarre minuses with normal one + +./trex-cfg $INPUT_ARGS +RESULT=$? +if [ $RESULT -ne 0 ]; then + exit $RESULT +fi + +pci_desc_re='^(\S+) - (.+)$' +source find_python.sh +while read line +do + if [[ "$line" =~ $pci_desc_re ]]; then + pci_name="pci$(echo ${BASH_REMATCH[1]} | tr ':' '_' | tr '.' '_')" # make alphanumeric name + export $pci_name="${BASH_REMATCH[2]}" + fi +done <<< "$($PYTHON dpdk_setup_ports.py --dump-pci-description)" + +cd $(dirname $0) +export LD_LIBRARY_PATH=$PWD + +#Add dummy lib in case we don't find it, e.g. there is no OFED installed +if ldd ./_t-rex-64 | grep "libibverbs.so" | grep -q "not found"; then +export LD_LIBRARY_PATH=$PWD:$PWD/dumy_libs +fi + +export VALGRIND_LIB=/auto/proj-pcube-b/apps/PL-b/tools/valgrind-dpdk/lib/valgrind +export VALGRIND_BIN="/auto/proj-pcube-b/apps/PL-b/tools/valgrind-dpdk/bin/valgrind --leak-check=full --error-exitcode=1 --suppressions=valgrind.sup" +export GLIBCXX_FORCE_NEW=1 + +if [ -t 0 ] && [ -t 1 ]; then + export is_tty=true + saveterm="$(stty -g)" +else + export is_tty=false +fi + +# if we have a new core run optimized trex +if grep -q avx /proc/cpuinfo ; then + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? + if [ $RESULT -eq 132 ]; then + echo " WARNING this program is optimized for the new Intel processors. " + echo " try the ./t-rex-64-o application that should work for any Intel processor but might be slower. " + echo " try to run t-rex-64-o .. " + + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? + fi +else + $VALGRIND_BIN ./_t-rex-64 $INPUT_ARGS + RESULT=$? +fi + +if $is_tty; then + stty $saveterm +fi + +if [ $RESULT -ne 0 ]; then + exit $RESULT +fi + + diff --git a/scripts/trex-cfg b/scripts/trex-cfg index 714aea6c..c6f12a7e 100755 --- a/scripts/trex-cfg +++ b/scripts/trex-cfg @@ -55,13 +55,23 @@ if ! lsmod | grep -q igb_uio ; then fi else echo "ERROR: We don't have precompiled igb_uio.ko module for your kernel version" - echo "You can try compiling yourself, using the following commands:" - echo "\$cd ko/src " - echo "\$make " - echo "\$make install " - echo "\$cd - " - echo "Then try to run Trex again" - exit 1 + echo Will try compiling automatically. + { + cd ko/src && + make && + make install && + cd - + } &> /dev/null || { + echo Automatic compilation failed. + echo "You can try compiling yourself, using the following commands:" + echo "\$cd ko/src " + echo "\$make " + echo "\$make install " + echo "\$cd - " + echo "Then try to run TRex again" + exit 1 + } + echo Success. fi fi diff --git a/scripts/trex_show_threads.py b/scripts/trex_show_threads.py index fabe6d68..1824d073 100755 --- a/scripts/trex_show_threads.py +++ b/scripts/trex_show_threads.py @@ -58,8 +58,8 @@ def isnum (x): def find_trex_pid (): procs = [x for x in os.listdir('/proc/') if isnum(x)] for proc in procs: - cmd = open('/proc/{0}/{1}'.format(proc, 'cmdline')).readline() - if '_t-rex' in cmd: + cmd = open('/proc/{0}/{1}'.format(proc, 'comm')).readline() + if cmd.startswith('_t-rex-64'): return proc return None diff --git a/scripts/valgrind.sup b/scripts/valgrind.sup new file mode 100644 index 00000000..b6bcc883 --- /dev/null +++ b/scripts/valgrind.sup @@ -0,0 +1,57 @@ +{ + DL issue + Memcheck:Cond + fun:index + fun:expand_dynamic_string_token + fun:fillin_rpath + fun:_dl_init_paths + fun:dl_main + fun:_dl_sysdep_start + fun:_dl_start_final + fun:_dl_start + obj:/lib/x86_64-linux-gnu/ld-2.19.so + obj:* + obj:* + obj:* + obj:* +} + +{ + DPDK threads + Memcheck:Leak + match-leak-kinds: possible + fun:calloc + fun:allocate_dtv + fun:_dl_allocate_tls + fun:allocate_stack + fun:pthread_create@@GLIBC_2.2.5 + fun:rte_eal_init + fun:_Z9main_testiPPc + fun:(below main) +} + +{ + DPDK interrupt thread + Memcheck:Leak + match-leak-kinds: possible + fun:calloc + fun:allocate_dtv + fun:_dl_allocate_tls + fun:allocate_stack + fun:pthread_create@@GLIBC_2.2.5 + fun:rte_eal_intr_init + fun:rte_eal_init + fun:_Z9main_testiPPc + fun:(below main) +} + +{ + DPDK epoll ctl + Memcheck:Param + epoll_ctl(event) + fun:epoll_ctl + fun:eal_intr_thread_main + fun:start_thread + fun:clone +} + |