summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression')
-rwxr-xr-xscripts/automation/regression/CPlatform.py945
-rwxr-xr-xscripts/automation/regression/CProgressDisp.py87
-rwxr-xr-xscripts/automation/regression/CShowParser.py228
-rwxr-xr-xscripts/automation/regression/CustomLogger.py36
-rwxr-xr-xscripts/automation/regression/aggregate_results.py659
-rw-r--r--scripts/automation/regression/functional_tests/config.yaml74
-rw-r--r--scripts/automation/regression/functional_tests/cpp_gtests_test.py46
-rw-r--r--scripts/automation/regression/functional_tests/filters_test.py100
-rwxr-xr-xscripts/automation/regression/functional_tests/functional_general_test.py22
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_golden.capbin0 -> 198474 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.capbin0 -> 316552 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.capbin0 -> 38024 bytes
-rw-r--r--scripts/automation/regression/functional_tests/golden/udp_590.capbin0 -> 630 bytes
-rwxr-xr-xscripts/automation/regression/functional_tests/hltapi_stream_builder_test.py629
-rwxr-xr-xscripts/automation/regression/functional_tests/misc_methods_test.py61
-rwxr-xr-xscripts/automation/regression/functional_tests/pkt_bld_general_test.py28
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_cache_test.py60
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_cmd_link_test.py62
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_device_cfg_test.py20
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_dual_if_obj_test.py31
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_manager_test.py40
-rwxr-xr-xscripts/automation/regression/functional_tests/platform_if_obj_test.py49
-rw-r--r--scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py369
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py367
-rwxr-xr-xscripts/automation/regression/functional_tests/trex_cfg_creator_test.py698
-rwxr-xr-xscripts/automation/regression/hltapi_playground.py193
-rwxr-xr-xscripts/automation/regression/interactive_platform4
-rwxr-xr-xscripts/automation/regression/interactive_platform.py338
-rwxr-xr-xscripts/automation/regression/interfaces_e.py8
-rwxr-xr-xscripts/automation/regression/misc_methods.py284
-rwxr-xr-xscripts/automation/regression/outer_packages.py71
-rwxr-xr-xscripts/automation/regression/platform_cmd_link.py488
-rw-r--r--scripts/automation/regression/reports/.keep0
-rwxr-xr-xscripts/automation/regression/setups/dave/benchmark.yaml118
-rwxr-xr-xscripts/automation/regression/setups/dave/config.yaml94
-rw-r--r--scripts/automation/regression/setups/dummy/config.yaml11
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml298
-rw-r--r--scripts/automation/regression/setups/kiwi02/config.yaml95
-rw-r--r--scripts/automation/regression/setups/trex-dan/benchmark.yaml253
-rw-r--r--scripts/automation/regression/setups/trex-dan/config.yaml68
-rw-r--r--scripts/automation/regression/setups/trex04/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex04/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml244
-rw-r--r--scripts/automation/regression/setups/trex07/config.yaml66
-rw-r--r--scripts/automation/regression/setups/trex08/benchmark.yaml181
-rw-r--r--scripts/automation/regression/setups/trex08/config.yaml40
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml234
-rw-r--r--scripts/automation/regression/setups/trex09/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex10/benchmark.yaml60
-rw-r--r--scripts/automation/regression/setups/trex10/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex11/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex11/config.yaml38
-rw-r--r--scripts/automation/regression/setups/trex12/benchmark.yaml182
-rw-r--r--scripts/automation/regression/setups/trex12/config.yaml40
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml245
-rw-r--r--scripts/automation/regression/setups/trex14/config.yaml67
-rw-r--r--scripts/automation/regression/setups/trex15/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex15/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex17/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex17/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex24/benchmark.yaml155
-rw-r--r--scripts/automation/regression/setups/trex24/config.yaml39
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml254
-rw-r--r--scripts/automation/regression/setups/trex25/config.yaml93
-rwxr-xr-xscripts/automation/regression/sshpass.exp17
-rw-r--r--scripts/automation/regression/stateful_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateful_tests/tests_exceptions.py37
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_client_pkg_test.py34
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py363
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py213
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_ipv6_test.py103
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nat_test.py169
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nbar_test.py123
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_rx_test.py280
-rwxr-xr-xscripts/automation/regression/stateless_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_benchmark_test.py75
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py350
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py31
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py113
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py351
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py568
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py39
-rw-r--r--scripts/automation/regression/test_pcaps/pcap_dual_test.erfbin0 -> 101488 bytes
-rw-r--r--scripts/automation/regression/trex.py457
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py437
85 files changed, 13445 insertions, 0 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
new file mode 100755
index 00000000..0017e7db
--- /dev/null
+++ b/scripts/automation/regression/CPlatform.py
@@ -0,0 +1,945 @@
+#!/router/bin/python
+
+from interfaces_e import IFType
+from platform_cmd_link import *
+import CustomLogger
+import misc_methods
+import re
+import time
+import CProgressDisp
+from CShowParser import CShowParser
+
+class CPlatform(object):
+ def __init__(self, silent_mode):
+ self.if_mngr = CIfManager()
+ self.cmd_link = CCommandLink(silent_mode)
+ self.nat_config = None
+ self.stat_route_config = None
+ self.running_image = None
+ self.needed_image_path = None
+ self.tftp_cfg = None
+ self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
+
+ def configure_basic_interfaces(self, mtu = 9050):
+
+ cache = CCommandCache()
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ client_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.client_if.get_src_mac_addr()) )
+ client_if_command_set.append ('mtu %s' % mtu)
+ client_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() ))
+ client_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() ))
+
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+
+ server_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.server_if.get_src_mac_addr()) )
+ server_if_command_set.append ('mtu %s' % mtu)
+ server_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() ))
+ server_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() ))
+
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+
+ self.cmd_link.run_single_command(cache)
+ self.config_history['basic_if_config'] = True
+
+
+
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
+
+ cache = CCommandCache()
+ for intf in intf_list:
+ if_command_set = []
+
+ if_command_set.append ('mac-address {mac}'.format( mac = intf.get_src_mac_addr()) )
+ if_command_set.append ('mtu %s' % mtu)
+ if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() ))
+ if_command_set.append ('ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() ))
+
+ cache.add('IF', if_command_set, intf.get_name())
+
+ self.cmd_link.run_single_command(cache)
+
+
+ def load_clean_config (self, config_filename = "clean_config.cfg", cfg_drive = "bootflash"):
+ for i in range(5):
+ self.clear_nat_translations()
+ cache = CCommandCache()
+ cache.add('EXEC', "configure replace {drive}:{file} force".format(drive = cfg_drive, file = config_filename))
+ res = self.cmd_link.run_single_command(cache)
+ if 'Rollback Done' not in res:
+ print('Failed to load clean config, trying again')
+ time.sleep(2)
+ if i < 4:
+ continue
+ raise Exception('Could not load clean config, response: %s' % res)
+
+ def config_pbr (self, mode = 'config'):
+ idx = 1
+ unconfig_str = '' if mode=='config' else 'no '
+
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ pre_commit_set = set([])
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+ conf_t_command_set = []
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() )
+
+ if dual_if.is_duplicated():
+ # define the relevant VRF name
+ pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ # assign VRF to interfaces, config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config route-map routing
+ conf_t_command_set.append('{mode}route-map {dup}_{p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = client_net_next_hop) )
+ conf_t_command_set.append('{mode}route-map {dup}_{p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = server_net_next_hop) )
+ conf_t_command_set.append('exit')
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+ else:
+ # config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config route-map routing
+ conf_t_command_set.append('{mode}route-map {p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = client_net_next_hop) )
+ conf_t_command_set.append('{mode}route-map {p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if mode == 'config':
+ conf_t_command_set.append('set ip next-hop {next_hop}'.format(
+ next_hop = server_net_next_hop) )
+ conf_t_command_set.append('exit')
+
+ # config global arp to interfaces net address
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # assign generated config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ cache.add('CONF', conf_t_command_set)
+ idx += 2
+
+ # finish handling pre-config cache
+ pre_commit_set = list(pre_commit_set)
+ if len(pre_commit_set):
+ pre_commit_set.append('exit')
+ pre_commit_cache.add('CONF', pre_commit_set )
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+ if self.config_history['basic_if_config']:
+ # in this case, duplicated interfaces will lose its ip address.
+ # re-config IPv4 addresses
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+
+ def config_no_pbr (self):
+ self.config_pbr(mode = 'unconfig')
+
+ def config_static_routing (self, stat_route_obj, mode = 'config'):
+
+ if mode == 'config':
+ self.stat_route_config = stat_route_obj # save the latest static route config for future removal purposes
+
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ pre_commit_set = set([])
+ current_dup_intf = None
+ # client_net = None
+ # server_net = None
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+ conf_t_command_set = []
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+
+ # handle duplicated addressing generation
+ if dual_if.is_duplicated():
+ if dual_if.get_vrf_name() != current_dup_intf:
+ # if this is a dual interfaces, and it is different from the one we proccessed so far, reset static route addressing
+ current_dup_intf = dual_if.get_vrf_name()
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+ else:
+ if current_dup_intf is not None:
+ current_dup_intf = None
+ client_net = stat_route_obj.client_net_start
+ server_net = stat_route_obj.server_net_start
+
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv4_addr() )
+
+ # handle static route configuration for the interfaces
+ if dual_if.is_duplicated():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ # define the relevant VRF name
+ pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ # assign VRF to interfaces, config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+ server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
+
+ conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_net = client_net,
+ dest_mask = stat_route_obj.client_mask,
+ next_hop = client_net_next_hop))
+ conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_net = server_net,
+ dest_mask = stat_route_obj.server_mask,
+ next_hop = server_net_next_hop))
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # assign generated interfaces config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+
+ else:
+ conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ next_net = client_net,
+ dest_mask = stat_route_obj.client_mask,
+ next_hop = server_net_next_hop))
+ conf_t_command_set.append( "{mode}ip route {next_net} {dest_mask} {next_hop}".format(
+ mode = unconfig_str,
+ next_net = server_net,
+ dest_mask = stat_route_obj.server_mask,
+ next_hop = client_net_next_hop))
+
+ # config global arp to interfaces net address
+ if dual_if.client_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ dest_mac = dual_if.client_if.get_dest_mac()))
+ if dual_if.server_if.get_dest_mac():
+ conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ dest_mac = dual_if.server_if.get_dest_mac()))
+
+ # bump up to the next client network address
+ client_net = misc_methods.get_single_net_client_addr(client_net, stat_route_obj.net_increment)
+ server_net = misc_methods.get_single_net_client_addr(server_net, stat_route_obj.net_increment)
+
+
+ # finish handling pre-config cache
+ pre_commit_set = list(pre_commit_set)
+ if len(pre_commit_set):
+ pre_commit_set.append('exit')
+ pre_commit_cache.add('CONF', pre_commit_set )
+ # assign generated config list to cache
+ cache.add('CONF', conf_t_command_set)
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+ if self.config_history['basic_if_config']:
+ # in this case, duplicated interfaces will lose its ip address.
+ # re-config IPv4 addresses
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+
+
+ def config_no_static_routing (self, stat_route_obj = None):
+
+ if stat_route_obj is None and self.stat_route_config is not None:
+ self.config_static_routing(self.stat_route_config, mode = 'unconfig')
+ self.stat_route_config = None # reverse current static route config back to None (no nat config is known to run).
+ elif stat_route_obj is not None:
+ self.config_static_routing(stat_route_obj, mode = 'unconfig')
+ else:
+ raise UserWarning('No static route configuration is available for removal.')
+
+ def config_nbar_pd (self, mode = 'config'):
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+
+ for intf in self.if_mngr.get_if_list(if_type = IFType.Client):
+ cache.add('IF', "{mode}ip nbar protocol-discovery".format( mode = unconfig_str ), intf.get_name())
+
+ self.cmd_link.run_single_command( cache )
+
+ def config_no_nbar_pd (self):
+ self.config_nbar_pd (mode = 'unconfig')
+
+
+ def config_nat_verify (self, mode = 'config'):
+
+ # toggle all duplicate interfaces
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ if mode=='config':
+ self.toggle_duplicated_intf(action = 'down')
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+ else:
+ # if we're in 'unconfig', toggle duplicated interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # self.__toggle_interfaces(dup_ifs)
+
+ def config_no_nat_verify (self):
+ self.config_nat_verify(mode = 'unconfig')
+
+ def config_nat (self, nat_obj, mode = 'config'):
+
+ if mode == 'config':
+ self.nat_config = nat_obj # save the latest nat config for future removal purposes
+
+ cache = CCommandCache()
+ conf_t_command_set = []
+ client_net = nat_obj.clients_net_start
+ pool_net = nat_obj.nat_pool_start
+ unconfig_str = '' if mode=='config' else 'no '
+
+ # toggle all duplicate interfaces
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ if mode=='config':
+ self.toggle_duplicated_intf(action = 'down')
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+ else:
+ # if we're in 'unconfig', toggle duplicated interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # self.__toggle_interfaces(dup_ifs)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "{mode}ip nat inside".format( mode = unconfig_str ), dual_if.client_if.get_name())
+ cache.add('IF', "{mode}ip nat outside".format( mode = unconfig_str ), dual_if.server_if.get_name())
+ pool_id = dual_if.get_id() + 1
+
+ conf_t_command_set.append("{mode}ip nat pool pool{pool_num} {start_addr} {end_addr} netmask {mask}".format(
+ mode = unconfig_str,
+ pool_num = pool_id,
+ start_addr = pool_net,
+ end_addr = CNatConfig.calc_pool_end(pool_net, nat_obj.nat_netmask),
+ mask = nat_obj.nat_netmask))
+
+ conf_t_command_set.append("{mode}ip nat inside source list {num} pool pool{pool_num} overload".format(
+ mode = unconfig_str,
+ num = pool_id,
+ pool_num = pool_id ))
+ conf_t_command_set.append("{mode}access-list {num} permit {net_addr} {net_wildcard}".format(
+ mode = unconfig_str,
+ num = pool_id,
+ net_addr = client_net,
+ net_wildcard = nat_obj.client_acl_wildcard))
+
+ # bump up to the next client address
+ client_net = misc_methods.get_single_net_client_addr(client_net, nat_obj.net_increment)
+ pool_net = misc_methods.get_single_net_client_addr(pool_net, nat_obj.net_increment)
+
+
+ # assign generated config list to cache
+ cache.add('CONF', conf_t_command_set)
+
+ # deploy the configs (order is important!)
+ return self.cmd_link.run_single_command( cache )
+
+
+ def config_no_nat (self, nat_obj = None):
+ # first, clear all nat translations
+ self.clear_nat_translations()
+
+ # then, decompose the known config
+ if nat_obj is None and self.nat_config is not None:
+ self.config_nat(self.nat_config, mode = 'unconfig')
+ self.nat_config = None # reverse current NAT config back to None (no nat config is known to run).
+ elif nat_obj is not None:
+ self.config_nat(nat_obj, mode = 'unconfig')
+ else:
+ raise UserWarning('No NAT configuration is available for removal.')
+
+
+ def config_zbf (self, mode = 'config'):
+ cache = CCommandCache()
+ pre_commit_cache = CCommandCache()
+ conf_t_command_set = []
+
+ # toggle all duplicate interfaces down
+ self.toggle_duplicated_intf(action = 'down')
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ # self.__toggle_interfaces(dup_ifs, action = 'down' )
+
+ # define security zones and security service policy to be applied on the interfaces
+ conf_t_command_set.append('class-map type inspect match-any c1')
+ conf_t_command_set.append('match protocol tcp')
+ conf_t_command_set.append('match protocol udp')
+ conf_t_command_set.append('policy-map type inspect p1')
+ conf_t_command_set.append('class type inspect c1')
+ conf_t_command_set.append('inspect')
+ conf_t_command_set.append('class class-default')
+ conf_t_command_set.append('pass')
+
+ conf_t_command_set.append('zone security z_in')
+ conf_t_command_set.append('zone security z_out')
+
+ conf_t_command_set.append('zone-pair security in2out source z_in destination z_out')
+ conf_t_command_set.append('service-policy type inspect p1')
+ conf_t_command_set.append('zone-pair security out2in source z_out destination z_in')
+ conf_t_command_set.append('service-policy type inspect p1')
+ conf_t_command_set.append('exit')
+
+ pre_commit_cache.add('CONF', conf_t_command_set)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "zone-member security z_in", dual_if.client_if.get_name() )
+ cache.add('IF', "zone-member security z_out", dual_if.server_if.get_name() )
+
+ self.cmd_link.run_command( [pre_commit_cache, cache] )
+
+ def config_no_zbf (self):
+ cache = CCommandCache()
+ conf_t_command_set = []
+
+ # define security zones and security service policy to be applied on the interfaces
+ conf_t_command_set.append('no zone-pair security in2out source z_in destination z_out')
+ conf_t_command_set.append('no zone-pair security out2in source z_out destination z_in')
+
+ conf_t_command_set.append('no policy-map type inspect p1')
+ conf_t_command_set.append('no class-map type inspect match-any c1')
+
+ conf_t_command_set.append('no zone security z_in')
+ conf_t_command_set.append('no zone security z_out')
+
+ cache.add('CONF', conf_t_command_set)
+
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ cache.add('IF', "no zone-member security z_in", dual_if.client_if.get_name() )
+ cache.add('IF', "no zone-member security z_out", dual_if.server_if.get_name() )
+
+ self.cmd_link.run_command( [cache] )
+ # toggle all duplicate interfaces back up
+ self.toggle_duplicated_intf(action = 'up')
+ # dup_ifs = self.if_mngr.get_duplicated_if()
+ # self.__toggle_interfaces(dup_ifs)
+
+
+ def config_ipv6_pbr (self, mode = 'config'):
+ idx = 1
+ unconfig_str = '' if mode=='config' else 'no '
+ cache = CCommandCache()
+ conf_t_command_set = []
+
+ conf_t_command_set.append('{mode}ipv6 unicast-routing'.format(mode = unconfig_str) )
+
+ for dual_if in self.if_mngr.get_dual_if_list():
+ client_if_command_set = []
+ server_if_command_set = []
+
+ client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
+ server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
+
+
+ client_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str))
+ server_if_command_set.append ('{mode}ipv6 enable'.format(mode = unconfig_str))
+
+ if dual_if.is_duplicated():
+ prefix = 'ipv6_' + dual_if.get_vrf_name()
+ else:
+ prefix = 'ipv6'
+
+ # config interfaces with relevant route-map
+ client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+
+ # config global arp to interfaces net address and vrf
+ if dual_if.client_if.get_ipv6_dest_mac():
+ conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format(
+ mode = unconfig_str,
+ next_hop = server_net_next_hop,
+ intf = dual_if.client_if.get_name(),
+ dest_mac = dual_if.client_if.get_ipv6_dest_mac()))
+ if dual_if.server_if.get_ipv6_dest_mac():
+ conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format(
+ mode = unconfig_str,
+ next_hop = client_net_next_hop,
+ intf = dual_if.server_if.get_name(),
+ dest_mac = dual_if.server_if.get_ipv6_dest_mac()))
+
+ conf_t_command_set.append('{mode}route-map {pre}_{p1}_to_{p2} permit 10'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if (mode == 'config'):
+ conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = client_net_next_hop ) )
+ conf_t_command_set.append('{mode}route-map {pre}_{p2}_to_{p1} permit 10'.format(
+ mode = unconfig_str,
+ pre = prefix,
+ p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
+ if (mode == 'config'):
+ conf_t_command_set.append('set ipv6 next-hop {next_hop}'.format(next_hop = server_net_next_hop ) )
+ conf_t_command_set.append('exit')
+
+ # assign generated config list to cache
+ cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ idx += 2
+
+ cache.add('CONF', conf_t_command_set)
+
+ # deploy the configs (order is important!)
+ self.cmd_link.run_command( [cache] )
+
+ def config_no_ipv6_pbr (self):
+ self.config_ipv6_pbr(mode = 'unconfig')
+
+ # show methods
+ def get_cpu_util (self):
+ response = self.cmd_link.run_single_command('show platform hardware qfp active datapath utilization | inc Load')
+ return CShowParser.parse_cpu_util_stats(response)
+
+ def get_cft_stats (self):
+ response = self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-show-all-instances')
+ return CShowParser.parse_cft_stats(response)
+
+ def get_nbar_stats (self):
+ per_intf_stats = {}
+ for intf in self.if_mngr.get_if_list(if_type = IFType.Client):
+ response = self.cmd_link.run_single_command("show ip nbar protocol-discovery interface {interface} stats packet-count protocol".format( interface = intf.get_name() ), flush_first = True)
+ per_intf_stats[intf.get_name()] = CShowParser.parse_nbar_stats(response)
+ return per_intf_stats
+
+ def get_nbar_profiling_stats (self):
+ response = self.cmd_link.run_single_command("show platform hardware qfp active feature nbar profiling")
+ return CShowParser.parse_nbar_profiling_stats(response)
+
+ def get_drop_stats (self):
+
+ response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics', flush_first = True)
+ # print response
+ # response = self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics')
+ # print response
+ if_list_by_name = [x.get_name() for x in self.if_mngr.get_if_list()]
+ return CShowParser.parse_drop_stats(response, if_list_by_name )
+
+ def get_nat_stats (self):
+ response = self.cmd_link.run_single_command('show ip nat statistics')
+ return CShowParser.parse_nat_stats(response)
+
+ def get_nat_trans (self):
+ return self.cmd_link.run_single_command('show ip nat translation')
+
+ def get_cvla_memory_usage(self):
+ response = self.cmd_link.run_single_command('show platform hardware qfp active infrastructure cvla client handles')
+ # (res, res2) = CShowParser.parse_cvla_memory_usage(response)
+ return CShowParser.parse_cvla_memory_usage(response)
+
+
+ # clear methods
+ def clear_nat_translations(self):
+ pre_commit_cache = CCommandCache()
+ # prevent new NAT entries
+ # http://www.cisco.com/c/en/us/support/docs/ip/network-address-translation-nat/13779-clear-nat-comments.html
+ for dual_if in self.if_mngr.get_dual_if_list(is_duplicated = False):
+ pre_commit_cache.add('IF', "no ip nat inside", dual_if.client_if.get_name())
+ pre_commit_cache.add('IF', "no ip nat outside", dual_if.server_if.get_name())
+ self.cmd_link.run_single_command(pre_commit_cache)
+ time.sleep(0.5)
+ pre_commit_cache = CCommandCache()
+ # clear the translation
+ pre_commit_cache.add('EXEC', 'clear ip nat translation *')
+ self.cmd_link.run_single_command(pre_commit_cache)
+ time.sleep(0.5)
+
+ def clear_cft_counters (self):
+ """ clear_cft_counters(self) -> None
+
+ Clears the CFT counters on the platform
+ """
+ self.cmd_link.run_single_command('test platform hardware qfp active infrastructure cft datapath function cft-cpp-clear-instance-stats')
+
+ def clear_counters(self):
+ """ clear_counters(self) -> None
+
+ Clears the platform counters
+ """
+
+ pre_commit_cache = CCommandCache()
+ pre_commit_cache.add('EXEC', ['clear counters', '\r'] )
+ self.cmd_link.run_single_command( pre_commit_cache , read_until = ['#', '\[confirm\]'])
+
+ def clear_nbar_stats(self):
+ """ clear_nbar_stats(self) -> None
+
+ Clears the NBAR-PD classification stats
+ """
+ pre_commit_cache = CCommandCache()
+ pre_commit_cache.add('EXEC', ['clear ip nbar protocol-discovery','\r'] )
+ self.cmd_link.run_single_command( pre_commit_cache )
+
+ def clear_packet_drop_stats(self):
+ """ clear_packet_drop_stats(self) -> None
+
+ Clears packet-drop stats
+ """
+# command = "show platform hardware qfp active statistics drop clear"
+ self.cmd_link.run_single_command('show platform hardware qfp active interface all statistics clear_drop')
+
+ ###########################################
+ # file transfer and image loading methods #
+ ###########################################
+ def get_running_image_details (self):
+ """ get_running_image_details() -> dict
+
+ Check for the currently running image file on the platform.
+ Returns a dictionary, where 'drive' key is the drive in which the image is installed,
+ and 'image' key is the actual image file used.
+ """
+ response = self.cmd_link.run_single_command('show version | include System image')
+ parsed_info = CShowParser.parse_show_image_version(response)
+ self.running_image = parsed_info
+ return parsed_info
+
+
+ def check_image_existence (self, img_name):
+ """ check_image_existence(self, img_name) -> boolean
+
+ Parameters
+ ----------
+ img_name : str
+ a string represents the image name.
+
+ Check if the image file defined in the platform_config already loaded into the platform.
+ """
+ search_drives = ['bootflash', 'harddisk', self.running_image['drive']]
+ for search_drive in search_drives:
+ command = "dir {drive}: | include {image}".format(drive = search_drive, image = img_name)
+ response = self.cmd_link.run_single_command(command, timeout = 10)
+ if CShowParser.parse_image_existence(response, img_name):
+ self.needed_image_path = '%s:/%s' % (search_drive, img_name)
+ print('Found image in platform:', self.needed_image_path)
+ return True
+ return False
+
+ def config_tftp_server(self, device_cfg_obj, external_tftp_config = None, applyToPlatform = False):
+ """ configure_tftp_server(self, external_tftp_config, applyToPlatform) -> str
+
+ Parameters
+ ----------
+ external_tftp_config : dict (Not is use)
+ A path to external tftp config file other than using the one defined in the instance.
+ applyToPlatform : boolean
+ set to True in order to apply the config into the platform
+
+ Configures the tftp server on an interface of the platform.
+ """
+# tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_server_config
+ self.tftp_cfg = device_cfg_obj.get_tftp_info()
+ cache = CCommandCache()
+
+ command = "ip tftp source-interface {intf}".format( intf = device_cfg_obj.get_mgmt_interface() )
+ cache.add('CONF', command )
+ self.cmd_link.run_single_command(cache)
+ self.config_history['tftp_server_config'] = True
+
+ def load_platform_image(self, img_filename, external_tftp_config = None):
+ """ load_platform_image(self, img_filename, external_tftp_config) -> None
+
+ Parameters
+ ----------
+ external_tftp_config : dict
+ A path to external tftp config file other than using the one defined in the instance.
+ img_filename : str
+ image name to be saved into the platforms drive.
+
+ This method loads the configured image into the platform's harddisk (unless it is already loaded),
+ and sets that image to be the boot_image of the platform.
+ """
+ if not self.check_image_existence(img_filename): # check if this image isn't already saved in platform
+ #tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_cfg
+
+ if self.config_history['tftp_server_config']: # make sure a TFTP configuration has been loaded
+ cache = CCommandCache()
+ if self.running_image is None:
+ self.get_running_image_details()
+
+ command = "copy tftp://{tftp_ip}/{img_path}/{image} bootflash:".format(
+ tftp_ip = self.tftp_cfg['ip_address'],
+ img_path = self.tftp_cfg['images_path'],
+ image = img_filename)
+ cache.add('EXEC', [command, '\r', '\r'])
+
+ progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Copying image via tftp, this may take a while...\n")
+ progress_thread.start()
+
+ response = self.cmd_link.run_single_command(cache, timeout = 900, read_until = ['\?', '#'])
+ print("RESPONSE:")
+ print(response)
+ progress_thread.join()
+ copy_ok = CShowParser.parse_file_copy(response)
+
+ if not copy_ok:
+ raise UserWarning('Image file loading failed. Please make sure the accessed image exists and has read privileges')
+ else:
+ raise UserWarning('TFTP configuration is not available. Please make sure a valid TFTP configuration has been provided')
+
+ def set_boot_image(self, boot_image):
+ """ set_boot_image(self, boot_image) -> None
+
+ Parameters
+ ----------
+ boot_image : str
+ An image file to be set as boot_image
+
+ Configures boot_image as the boot image of the platform into the running-config + config-register
+ """
+ cache = CCommandCache()
+ if self.needed_image_path is None:
+ if not self.check_image_existence(boot_image):
+ raise Exception("Trying to set boot image that's not found in router, please copy it first.")
+
+ boot_img_cmd = "boot system flash %s" % self.needed_image_path
+ config_register_cmd = "config-register 0x2021"
+ cache.add('CONF', ["no boot system", boot_img_cmd, config_register_cmd, '\r'])
+ response = self.cmd_link.run_single_command( cache )
+ print("RESPONSE:")
+ print(response)
+ self.save_config_to_startup_config()
+
+ def is_image_matches(self, needed_image):
+ """ set_boot_image(self, needed_image) -> boolean
+
+ Parameters
+ ----------
+ needed_image : str
+ An image file to compare router running image
+
+ Compares image name to router running image, returns match result.
+
+ """
+ if self.running_image is None:
+ self.get_running_image_details()
+ needed_image = needed_image.lower()
+ current_image = self.running_image['image'].lower()
+ if needed_image.find(current_image) != -1:
+ return True
+ if current_image.find(needed_image) != -1:
+ return True
+ return False
+
+ # misc class related methods
+
+ def load_platform_data_from_file (self, device_cfg_obj):
+ self.if_mngr.load_config(device_cfg_obj)
+
+ def launch_connection (self, device_cfg_obj):
+ self.running_image = None # clear the image name "cache"
+ self.cmd_link.launch_platform_connectivity(device_cfg_obj)
+
+ def reload_connection (self, device_cfg_obj):
+ self.cmd_link.close_platform_connection()
+ self.launch_connection(device_cfg_obj)
+
+ def save_config_to_startup_config (self):
+ """ save_config_to_startup_config(self) -> None
+
+ Copies running-config into startup-config.
+ """
+ cache = CCommandCache()
+ cache.add('EXEC', ['wr', '\r'] )
+ self.cmd_link.run_single_command(cache)
+
+ def reload_platform(self, device_cfg_obj):
+ """ reload_platform(self) -> None
+
+ Reloads the platform.
+ """
+ from subprocess import call
+ import os
+ i = 0
+ sleep_time = 30 # seconds
+
+ try:
+ cache = CCommandCache()
+
+ cache.add('EXEC', ['reload','n\r','\r'] )
+ self.cmd_link.run_single_command( cache )
+
+ progress_thread = CProgressDisp.ProgressThread(notifyMessage = "Reloading the platform, this may take a while...\n")
+ progress_thread.start()
+ time.sleep(60) # need delay for device to shut down before polling it
+ # poll the platform until ping response is received.
+ while True:
+ time.sleep(sleep_time)
+ try:
+ x = call(["ping", "-c 1", device_cfg_obj.get_ip_address()], stdout = open(os.devnull, 'wb'))
+ except:
+ x = 1
+ if x == 0:
+ break
+ elif i > 20:
+ raise TimeoutError('Platform failed to reload after reboot for over {minutes} minutes!'.format(minutes = round(1 + i * sleep_time / 60)))
+ else:
+ i += 1
+
+ time.sleep(30)
+ self.reload_connection(device_cfg_obj)
+ progress_thread.join()
+ except Exception as e:
+ print(e)
+
+ def get_if_manager(self):
+ return self.if_mngr
+
+ def dump_obj_config (self, object_name):
+ if object_name=='nat' and self.nat_config is not None:
+ self.nat_config.dump_config()
+ elif object_name=='static_route' and self.stat_route_config is not None:
+ self.stat_route_config.dump_config()
+ else:
+ raise UserWarning('No known configuration exists.')
+
+ def toggle_duplicated_intf(self, action = 'down'):
+
+ dup_ifs = self.if_mngr.get_duplicated_if()
+ self.__toggle_interfaces( dup_ifs, action = action )
+
+
+ def __toggle_interfaces (self, intf_list, action = 'up'):
+ cache = CCommandCache()
+ mode_str = 'no ' if action == 'up' else ''
+
+ for intf_obj in intf_list:
+ cache.add('IF', '{mode}shutdown'.format(mode = mode_str), intf_obj.get_name())
+
+ self.cmd_link.run_single_command( cache )
+
+
+class CStaticRouteConfig(object):
+
+ def __init__(self, static_route_dict):
+ self.clients_start = static_route_dict['clients_start']
+ self.servers_start = static_route_dict['servers_start']
+ self.net_increment = misc_methods.gen_increment_dict(static_route_dict['dual_port_mask'])
+ self.client_mask = static_route_dict['client_destination_mask']
+ self.server_mask = static_route_dict['server_destination_mask']
+ self.client_net_start = self.extract_net_addr(self.clients_start, self.client_mask)
+ self.server_net_start = self.extract_net_addr(self.servers_start, self.server_mask)
+ self.static_route_dict = static_route_dict
+
+ def extract_net_addr (self, ip_addr, ip_mask):
+ addr_lst = ip_addr.split('.')
+ mask_lst = ip_mask.split('.')
+ mask_lst = [str(int(x) & int(y)) for x, y in zip(addr_lst, mask_lst)]
+ return '.'.join(mask_lst)
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump( self.static_route_dict , default_flow_style=False))
+
+
+class CNatConfig(object):
+ def __init__(self, nat_dict):
+ self.clients_net_start = nat_dict['clients_net_start']
+ self.client_acl_wildcard= nat_dict['client_acl_wildcard_mask']
+ self.net_increment = misc_methods.gen_increment_dict(nat_dict['dual_port_mask'])
+ self.nat_pool_start = nat_dict['pool_start']
+ self.nat_netmask = nat_dict['pool_netmask']
+ self.nat_dict = nat_dict
+
+ @staticmethod
+ def calc_pool_end (nat_pool_start, netmask):
+ pool_start_lst = [int(x) for x in nat_pool_start.split('.')]
+ pool_end_lst = list( pool_start_lst ) # create new list object, don't point to the original one
+ mask_lst = [int(x) for x in netmask.split('.')]
+ curr_octet = 3 # start with the LSB octet
+ inc_val = 1
+
+ while True:
+ tmp_masked = inc_val & mask_lst[curr_octet]
+ if tmp_masked == 0:
+ if (inc_val << 1) > 255:
+ inc_val = 1
+ pool_end_lst[curr_octet] = 255
+ curr_octet -= 1
+ else:
+ inc_val <<= 1
+ else:
+ pool_end_lst[curr_octet] += (inc_val - 1)
+ break
+ return '.'.join([str(x) for x in pool_end_lst])
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump( self.nat_dict , default_flow_style=False))
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/CProgressDisp.py b/scripts/automation/regression/CProgressDisp.py
new file mode 100755
index 00000000..18df2f43
--- /dev/null
+++ b/scripts/automation/regression/CProgressDisp.py
@@ -0,0 +1,87 @@
+#!/router/bin/python
+from __future__ import print_function
+import threading
+import sys
+import time
+import outer_packages
+import termstyle
+import progressbar
+
+
+class ProgressThread(threading.Thread):
+ def __init__(self, notifyMessage = None):
+ super(ProgressThread, self).__init__()
+ self.stoprequest = threading.Event()
+ self.notifyMessage = notifyMessage
+
+ def run(self):
+ if self.notifyMessage is not None:
+ print(self.notifyMessage, end=' ')
+
+ while not self.stoprequest.is_set():
+ print("\b.", end=' ')
+ sys.stdout.flush()
+ time.sleep(5)
+
+ def join(self, timeout=None):
+ if self.notifyMessage is not None:
+ print(termstyle.green("Done!\n"), end=' ')
+ self.stoprequest.set()
+ super(ProgressThread, self).join(timeout)
+
+
+class TimedProgressBar(threading.Thread):
+ def __init__(self, time_in_secs):
+ super(TimedProgressBar, self).__init__()
+ self.stoprequest = threading.Event()
+ self.stopFlag = False
+ self.time_in_secs = time_in_secs + 15 # 80 # taking 15 seconds extra
+ widgets = ['Running TRex: ', progressbar.Percentage(), ' ',
+ progressbar.Bar(marker='>',left='[',right=']'),
+ ' ', progressbar.ETA()]
+ self.pbar = progressbar.ProgressBar(widgets=widgets, maxval=self.time_in_secs*2)
+
+
+ def run (self):
+ # global g_stop
+ print()
+ self.pbar.start()
+
+ try:
+ for i in range(0, self.time_in_secs*2 + 1):
+ if (self.stopFlag == True):
+ break
+ time.sleep(0.5)
+ self.pbar.update(i)
+ # self.pbar.finish()
+
+ except KeyboardInterrupt:
+ # self.pbar.finish()
+ print("\nInterrupted by user!!")
+ self.join()
+ finally:
+ print()
+
+ def join(self, isPlannedStop = True, timeout=None):
+ if isPlannedStop:
+ self.pbar.update(self.time_in_secs*2)
+ self.stopFlag = True
+ else:
+ self.stopFlag = True # Stop the progress bar in its current location
+ self.stoprequest.set()
+ super(TimedProgressBar, self).join(timeout)
+
+
+def timedProgressBar(time_in_secs):
+ widgets = ['Running TRex: ', progressbar.Percentage(), ' ',
+ Bar(marker='>',left='[',right=']'),
+ ' ', progressbar.ETA()]
+ pbar = progressbar.ProgressBar(widgets=widgets, maxval=time_in_secs*2)
+ pbar.start()
+ for i in range(0, time_in_secs*2 + 1):
+ time.sleep(0.5)
+ pbar.update(i)
+ pbar.finish()
+ print()
+
+
diff --git a/scripts/automation/regression/CShowParser.py b/scripts/automation/regression/CShowParser.py
new file mode 100755
index 00000000..3445c70e
--- /dev/null
+++ b/scripts/automation/regression/CShowParser.py
@@ -0,0 +1,228 @@
+#!/router/bin/python-2.7.4
+
+import re
+import misc_methods
+
+class PlatformResponseMissmatch(Exception):
+ def __init__(self, message):
+ # Call the base class constructor with the parameters it needs
+ super(PlatformResponseMissmatch, self).__init__(message + ' is not available for given platform state and data.\nPlease make sure the relevant features are turned on in the platform.')
+
+class PlatformResponseAmbiguity(Exception):
+ def __init__(self, message):
+ # Call the base class constructor with the parameters it needs
+ super(PlatformResponseAmbiguity, self).__init__(message + ' found more than one file matching the provided filename.\nPlease provide more distinct filename.')
+
+
+class CShowParser(object):
+
+ @staticmethod
+ def parse_drop_stats (query_response, interfaces_list):
+ res = {'total_drops' : 0}
+ response_lst = query_response.split('\r\n')
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("^\s*(\w+/\d/\d)\s+(\d+)\s+(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ if (mtch.group(1) in interfaces_list):
+ res[mtch.group(1)] = (int(mtch.group(2)) + int(mtch.group(3)))
+ res['total_drops'] += (int(mtch.group(2)) + int(mtch.group(3)))
+# if mtch_found == 0: # no matches found at all
+# raise PlatformResponseMissmatch('Drop stats')
+# else:
+# return res
+ return res
+
+ @staticmethod
+ def parse_nbar_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ stats = {}
+ final_stats = {}
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("\s*([\w-]+)\s*(\d+)\s*(\d+)\s+", line)
+ if mtch:
+ mtch_found += 1
+ key = mtch.group(1)
+ pkt_in = int(mtch.group(2))
+ pkt_out = int(mtch.group(3))
+
+ avg_pkt_cnt = ( pkt_in + pkt_out )/2
+ if avg_pkt_cnt == 0.0:
+ # escaping zero division case
+ continue
+ if key in stats:
+ stats[key] += avg_pkt_cnt
+ else:
+ stats[key] = avg_pkt_cnt
+
+ # Normalize the results to percents
+ for protocol in stats:
+ protocol_norm_stat = int(stats[protocol]*10000/stats['Total'])/100.0 # round the result to x.xx format
+ if (protocol_norm_stat != 0.0):
+ final_stats[protocol] = protocol_norm_stat
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('NBAR classification stats')
+ else:
+ return { 'percentage' : final_stats, 'packets' : stats }
+
+ @staticmethod
+ def parse_nat_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ mtch_found = 0
+
+ for line in response_lst:
+ mtch = re.match("Total (active translations):\s+(\d+).*(\d+)\s+static,\s+(\d+)\s+dynamic", line)
+ if mtch:
+ mtch_found += 1
+ res['total_active_trans'] = int(mtch.group(2))
+ res['static_active_trans'] = int(mtch.group(3))
+ res['dynamic_active_trans'] = int(mtch.group(4))
+ continue
+
+ mtch = re.match("(Hits):\s+(\d+)\s+(Misses):\s+(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ res['num_of_hits'] = int(mtch.group(2))
+ res['num_of_misses'] = int(mtch.group(4))
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('NAT translations stats')
+ else:
+ return res
+
+ @staticmethod
+ def parse_cpu_util_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = { 'cpu0' : 0,
+ 'cpu1' : 0 }
+ mtch_found = 0
+ for line in response_lst:
+ mtch = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", line)
+ if mtch:
+ mtch_found += 1
+ res['cpu0'] += float(mtch.group(1))
+ res['cpu1'] += float(mtch.group(2))
+
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('CPU utilization processing')
+ else:
+ res['cpu0'] = res['cpu0']/mtch_found
+ res['cpu1'] = res['cpu1']/mtch_found
+ return res
+
+ @staticmethod
+ def parse_cft_stats (query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ mtch_found = 0
+ for line in response_lst:
+ mtch = re.match("\W*(\w+)\W*([:]|[=])\W*(\d+)", line)
+ if mtch:
+ mtch_found += 1
+ res[ str( mix_string(m.group(1)) )] = float(m.group(3))
+ if mtch_found == 0: # no matches found at all
+ raise PlatformResponseMissmatch('CFT counters stats')
+ else:
+ return res
+
+
+ @staticmethod
+ def parse_cvla_memory_usage(query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+ res2 = {}
+ cnt = 0
+ state = 0
+ name = ''
+ number = 0.0
+
+ for line in response_lst:
+ if state == 0:
+ mtch = re.match("\W*Entity name:\W*(\w[^\r\n]+)", line)
+ if mtch:
+ name = misc_methods.mix_string(mtch.group(1))
+ state = 1
+ cnt += 1
+ elif state == 1:
+ mtch = re.match("\W*Handle:\W*(\d+)", line)
+ if mtch:
+ state = state + 1
+ else:
+ state = 0;
+ elif state == 2:
+ mtch = re.match("\W*Number of allocations:\W*(\d+)", line)
+ if mtch:
+ state = state + 1
+ number=float(mtch.group(1))
+ else:
+ state = 0;
+ elif state == 3:
+ mtch = re.match("\W*Memory allocated:\W*(\d+)", line)
+ if mtch:
+ state = 0
+ res[name] = float(mtch.group(1))
+ res2[name] = number
+ else:
+ state = 0
+ if cnt == 0:
+ raise PlatformResponseMissmatch('CVLA memory usage stats')
+
+ return (res,res2)
+
+
+ @staticmethod
+ def parse_show_image_version(query_response):
+ response_lst = query_response.split('\r\n')
+ res = {}
+
+ for line in response_lst:
+ mtch = re.match("System image file is \"(\w+):(.*/)?(.+)\"", line)
+ if mtch:
+ res['drive'] = mtch.group(1)
+ res['image'] = mtch.group(3)
+ return res
+
+ raise PlatformResponseMissmatch('Running image info')
+
+
+ @staticmethod
+ def parse_image_existence(query_response, img_name):
+ response_lst = query_response.split('\r\n')
+ cnt = 0
+
+ for line in response_lst:
+ regex = re.compile(".* (?!include) %s" % img_name )
+ mtch = regex.match(line)
+ if mtch:
+ cnt += 1
+ if cnt == 1:
+ return True
+ elif cnt > 1:
+ raise PlatformResponseAmbiguity('Image existence')
+ else:
+ return False
+
+ @staticmethod
+ def parse_file_copy (query_response):
+ rev_response_lst = reversed(query_response.split('\r\n'))
+ lines_parsed = 0
+
+ for line in rev_response_lst:
+ mtch = re.match("\[OK - (\d+) bytes\]", line)
+ if mtch:
+ return True
+ lines_parsed += 1
+
+ if lines_parsed > 5:
+ return False
+ return False
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/CustomLogger.py b/scripts/automation/regression/CustomLogger.py
new file mode 100755
index 00000000..14ef1362
--- /dev/null
+++ b/scripts/automation/regression/CustomLogger.py
@@ -0,0 +1,36 @@
+
+import sys
+import os
+import logging
+
+
+# def setup_custom_logger(name, log_path = None):
+# logging.basicConfig(level = logging.INFO,
+# format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+# datefmt = '%m-%d %H:%M')
+
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+ if log_path is None:
+ log_path = os.getcwd()+'/trex_log.log'
+ else:
+ directory = os.path.dirname(log_path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ logging.basicConfig(level = logging.DEBUG,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+ # define a Handler which writes INFO messages or higher to the sys.stderr
+ consoleLogger = logging.StreamHandler()
+ consoleLogger.setLevel(logging.ERROR)
+ # set a format which is simpler for console use
+ formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+ # tell the handler to use this format
+ consoleLogger.setFormatter(formatter)
+
+ # add the handler to the logger
+ logging.getLogger(name).addHandler(consoleLogger) \ No newline at end of file
diff --git a/scripts/automation/regression/aggregate_results.py b/scripts/automation/regression/aggregate_results.py
new file mode 100755
index 00000000..c7c61ea6
--- /dev/null
+++ b/scripts/automation/regression/aggregate_results.py
@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-
+import xml.etree.ElementTree as ET
+import outer_packages
+import argparse
+import glob
+from pprint import pprint
+import sys, os
+from collections import OrderedDict
+import copy
+import datetime, time
+try:
+ import cPickle as pickle
+except:
+ import pickle
+import subprocess, shlex
+from ansi2html import Ansi2HTMLConverter
+
+converter = Ansi2HTMLConverter(inline = True)
+convert = converter.convert
+
+def ansi2html(text):
+ return convert(text, full = False)
+
+FUNCTIONAL_CATEGORY = 'Functional' # how to display those categories
+ERROR_CATEGORY = 'Error'
+
+
+def pad_tag(text, tag):
+ return '<%s>%s</%s>' % (tag, text, tag)
+
+def mark_string(text, color, condition):
+ if condition:
+ return '<font color=%s><b>%s</b></font>' % (color, text)
+ return text
+
+
+def is_functional_test_name(testname):
+ #if testname.startswith(('platform_', 'misc_methods_', 'vm_', 'payload_gen_', 'pkt_builder_')):
+ # return True
+ #return False
+ if testname.startswith('functional_tests.'):
+ return True
+ return False
+
+def is_good_status(text):
+ return text in ('Successful', 'Fixed', 'Passed', 'True', 'Pass')
+
+# input: xml element with test result
+# output string: 'error', 'failure', 'skipped', 'passed'
+def get_test_result(test):
+ for child in test.getchildren():
+ if child.tag in ('error', 'failure', 'skipped'):
+ return child.tag
+ return 'passed'
+
+# returns row of table with <th> and <td> columns - key: value
+def add_th_td(key, value):
+ return '<tr><th>%s</th><td>%s</td></tr>\n' % (key, value)
+
+# returns row of table with <td> and <td> columns - key: value
+def add_td_td(key, value):
+ return '<tr><td>%s</td><td>%s</td></tr>\n' % (key, value)
+
+# returns row of table with <th> and <th> columns - key: value
+def add_th_th(key, value):
+ return '<tr><th>%s</th><th>%s</th></tr>\n' % (key, value)
+
+# returns <div> with table of tests under given category.
+# category - string with name of category
+# tests - list of tests, derived from aggregated xml report, changed a little to get easily stdout etc.
+# tests_type - stateful or stateless
+# category_info_dir - folder to search for category info file
+# expanded - bool, false = outputs (stdout etc.) of tests are hidden by CSS
+# brief - bool, true = cut some part of tests outputs (useful for errors section with expanded flag)
+def add_category_of_tests(category, tests, tests_type = None, category_info_dir = None, expanded = False, brief = False):
+ is_actual_category = category not in (FUNCTIONAL_CATEGORY, ERROR_CATEGORY)
+ category_id = '_'.join([category, tests_type]) if tests_type else category
+ category_name = ' '.join([category, tests_type.capitalize()]) if tests_type else category
+ html_output = ''
+ if is_actual_category:
+ html_output += '<br><table class="reference">\n'
+
+ if category_info_dir:
+ category_info_file = '%s/report_%s.info' % (category_info_dir, category)
+ if os.path.exists(category_info_file):
+ with open(category_info_file) as f:
+ for info_line in f.readlines():
+ key_value = info_line.split(':', 1)
+ if key_value[0].strip() in list(trex_info_dict.keys()) + ['User']: # always 'hhaim', no need to show
+ continue
+ html_output += add_th_td('%s:' % key_value[0], key_value[1])
+ else:
+ html_output += add_th_td('Info:', 'No info')
+ print('add_category_of_tests: no category info %s' % category_info_file)
+ if tests_type:
+ html_output += add_th_td('Tests type:', tests_type.capitalize())
+ if len(tests):
+ total_duration = 0.0
+ for test in tests:
+ total_duration += float(test.attrib['time'])
+ html_output += add_th_td('Tests duration:', datetime.timedelta(seconds = int(total_duration)))
+ html_output += '</table>\n'
+
+ if not len(tests):
+ return html_output + pad_tag('<br><font color=red>No tests!</font>', 'b')
+ html_output += '<br>\n<table class="reference" width="100%">\n<tr><th align="left">'
+
+ if category == ERROR_CATEGORY:
+ html_output += 'Setup</th><th align="left">Failed tests:'
+ else:
+ html_output += '%s tests:' % category_name
+ html_output += '</th><th align="center">Final Result</th>\n<th align="center">Time (s)</th>\n</tr>\n'
+ for test in tests:
+ functional_test = is_functional_test_name(test.attrib['name'])
+ if functional_test and is_actual_category:
+ continue
+ if category == ERROR_CATEGORY:
+ test_id = ('err_' + test.attrib['classname'] + test.attrib['name']).replace('.', '_')
+ else:
+ test_id = (category_id + test.attrib['name']).replace('.', '_')
+ if expanded:
+ html_output += '<tr>\n<th>'
+ else:
+ html_output += '<tr onclick=tgl_test("%s") class=linktr>\n<td class=linktext>' % test_id
+ if category == ERROR_CATEGORY:
+ html_output += FUNCTIONAL_CATEGORY if functional_test else test.attrib['classname']
+ if expanded:
+ html_output += '</th><td>'
+ else:
+ html_output += '</td><td class=linktext>'
+ html_output += '%s</td>\n<td align="center">' % test.attrib['name']
+ test_result = get_test_result(test)
+ if test_result == 'error':
+ html_output += '<font color="red"><b>ERROR</b></font></td>'
+ elif test_result == 'failure':
+ html_output += '<font color="red"><b>FAILED</b></font></td>'
+ elif test_result == 'skipped':
+ html_output += '<font color="blue"><b>SKIPPED</b></font></td>'
+ else:
+ html_output += '<font color="green"><b>PASSED</b></font></td>'
+ html_output += '<td align="center"> '+ test.attrib['time'] + '</td></tr>'
+
+ result, result_text = test.attrib.get('result', ('', ''))
+ if result_text:
+ start_index_errors_stl = result_text.find('STLError: \n******')
+ if start_index_errors_stl > 0:
+ result_text = result_text[start_index_errors_stl:].strip() # cut traceback
+ start_index_errors = result_text.find('Exception: The test is failed, reasons:')
+ if start_index_errors > 0:
+ result_text = result_text[start_index_errors + 10:].strip() # cut traceback
+ result_text = ansi2html(result_text)
+ result_text = '<b style="color:000080;">%s:</b><br>%s<br><br>' % (result.capitalize(), result_text.replace('\n', '<br>'))
+ stderr = '' if brief and result_text else test.get('stderr', '')
+ if stderr:
+ stderr = ansi2html(stderr)
+ stderr = '<b style="color:000080;"><text color=000080>Stderr</text>:</b><br>%s<br><br>\n' % stderr.replace('\n', '<br>')
+ stdout = '' if brief and result_text else test.get('stdout', '')
+ if stdout:
+ stdout = ansi2html(stdout)
+ if brief: # cut off server logs
+ stdout = stdout.split('>>>>>>>>>>>>>>>', 1)[0]
+ stdout = '<b style="color:000080;">Stdout:</b><br>%s<br><br>\n' % stdout.replace('\n', '<br>')
+
+ html_output += '<tr style="%scolor:603000;" id="%s"><td colspan=%s>' % ('' if expanded else 'display:none;', test_id, 4 if category == ERROR_CATEGORY else 3)
+ if result_text or stderr or stdout:
+ html_output += '%s%s%s</td></tr>' % (result_text, stderr, stdout)
+ else:
+ html_output += '<b style="color:000080;">No output</b></td></tr>'
+
+ html_output += '\n</table>'
+ return html_output
+
+style_css = """
+html {overflow-y:scroll;}
+
+body {
+ font-size:12px;
+ color:#000000;
+ background-color:#ffffff;
+ margin:0px;
+ font-family:verdana,helvetica,arial,sans-serif;
+}
+
+div {width:100%;}
+
+table,th,td,input,textarea {
+ font-size:100%;
+}
+
+table.reference, table.reference_fail {
+ background-color:#ffffff;
+ border:1px solid #c3c3c3;
+ border-collapse:collapse;
+ vertical-align:middle;
+}
+
+table.reference th {
+ background-color:#e5eecc;
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+table.reference_fail th {
+ background-color:#ffcccc;
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+
+table.reference td, table.reference_fail td {
+ border:1px solid #c3c3c3;
+ padding:3px;
+}
+
+a.example {font-weight:bold}
+
+#a:link,a:visited {color:#900B09; background-color:transparent}
+#a:hover,a:active {color:#FF0000; background-color:transparent}
+
+.linktr {
+ cursor: pointer;
+}
+
+.linktext {
+ color:#0000FF;
+ text-decoration: underline;
+}
+"""
+
+
+# main
+if __name__ == '__main__':
+
+ # deal with input args
+ argparser = argparse.ArgumentParser(description='Aggregate test results of from ./reports dir, produces xml, html, mail report.')
+ argparser.add_argument('--input_dir', default='./reports',
+ help='Directory with xmls/setups info. Filenames: report_<setup name>.xml/report_<setup name>.info')
+ argparser.add_argument('--output_xml', default='./reports/aggregated_tests.xml',
+ dest = 'output_xmlfile', help='Name of output xml file with aggregated results.')
+ argparser.add_argument('--output_html', default='./reports/aggregated_tests.html',
+ dest = 'output_htmlfile', help='Name of output html file with aggregated results.')
+ argparser.add_argument('--output_mail', default='./reports/aggregated_tests_mail.html',
+ dest = 'output_mailfile', help='Name of output html file with aggregated results for mail.')
+ argparser.add_argument('--output_title', default='./reports/aggregated_tests_title.txt',
+ dest = 'output_titlefile', help='Name of output file to contain title of mail.')
+ argparser.add_argument('--build_status_file', default='./reports/build_status',
+ dest = 'build_status_file', help='Name of output file to save scenaries build results (should not be wiped).')
+ argparser.add_argument('--last_passed_commit', default='./reports/last_passed_commit',
+ dest = 'last_passed_commit', help='Name of output file to save last passed commit (should not be wiped).')
+ args = argparser.parse_args()
+
+
+##### get input variables/TRex commit info
+
+ scenario = os.environ.get('SCENARIO')
+ build_url = os.environ.get('BUILD_URL')
+ build_id = os.environ.get('BUILD_ID')
+ trex_repo = os.environ.get('TREX_CORE_REPO')
+ python_ver = os.environ.get('PYTHON_VER')
+ if not scenario:
+ print('Warning: no environment variable SCENARIO, using default')
+ scenario = 'TRex regression'
+ if not build_url:
+ print('Warning: no environment variable BUILD_URL')
+ if not build_id:
+ print('Warning: no environment variable BUILD_ID')
+ if not python_ver:
+ print('Warning: no environment variable PYTHON_VER')
+
+ trex_info_dict = OrderedDict()
+ for file in glob.glob('%s/report_*.info' % args.input_dir):
+ with open(file) as f:
+ file_lines = f.readlines()
+ if not len(file_lines):
+ continue # to next file
+ for info_line in file_lines:
+ key_value = info_line.split(':', 1)
+ not_trex_keys = ['Server', 'Router', 'User']
+ if key_value[0].strip() in not_trex_keys:
+ continue # to next parameters
+ trex_info_dict[key_value[0].strip()] = key_value[1].strip()
+ break
+
+ trex_last_commit_info = ''
+ trex_last_commit_hash = trex_info_dict.get('Git SHA')
+ if trex_last_commit_hash and trex_repo:
+ try:
+ print('Getting TRex commit with hash %s' % trex_last_commit_hash)
+ command = 'git --git-dir %s show %s --quiet' % (trex_repo, trex_last_commit_hash)
+ print('Executing: %s' % command)
+ proc = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (trex_last_commit_info, stderr) = proc.communicate()
+ print('Stdout:\n\t' + trex_last_commit_info.replace('\n', '\n\t'))
+ print('Stderr:', stderr)
+ print('Return code:', proc.returncode)
+ trex_last_commit_info = trex_last_commit_info.replace('\n', '<br>')
+ except Exception as e:
+ print('Error getting last commit: %s' % e)
+
+##### get xmls: report_<setup name>.xml
+
+ err = []
+ jobs_list = []
+ jobs_file = '%s/jobs_list.info' % args.input_dir
+ if os.path.exists(jobs_file):
+ with open('%s/jobs_list.info' % args.input_dir) as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line:
+ jobs_list.append(line)
+ else:
+ message = '%s does not exist!' % jobs_file
+ print(message)
+ err.append(message)
+
+##### aggregate results to 1 single tree
+ aggregated_root = ET.Element('testsuite')
+ test_types = ('functional', 'stateful', 'stateless')
+ setups = {}
+ for job in jobs_list:
+ setups[job] = {}
+ for test_type in test_types:
+ xml_file = '%s/report_%s_%s.xml' % (args.input_dir, job, test_type)
+ if not os.path.exists(xml_file):
+ continue
+ if os.path.basename(xml_file) == os.path.basename(args.output_xmlfile):
+ continue
+ setups[job][test_type] = []
+ print('Processing report: %s.%s' % (job, test_type))
+ tree = ET.parse(xml_file)
+ root = tree.getroot()
+ for key, value in root.attrib.items():
+ if key in aggregated_root.attrib and value.isdigit(): # sum total number of failed tests etc.
+ aggregated_root.attrib[key] = str(int(value) + int(aggregated_root.attrib[key]))
+ else:
+ aggregated_root.attrib[key] = value
+ tests = root.getchildren()
+ if not len(tests): # there should be tests:
+ message = 'No tests in xml %s' % xml_file
+ print(message)
+ #err.append(message)
+ for test in tests:
+ setups[job][test_type].append(test)
+ test.attrib['name'] = test.attrib['classname'] + '.' + test.attrib['name']
+ test.attrib['classname'] = job
+ aggregated_root.append(test)
+ if not sum([len(x) for x in setups[job].values()]):
+ message = 'No reports from setup %s!' % job
+ print(message)
+ err.append(message)
+ continue
+
+ total_tests_count = int(aggregated_root.attrib.get('tests', 0))
+ error_tests_count = int(aggregated_root.attrib.get('errors', 0))
+ failure_tests_count = int(aggregated_root.attrib.get('failures', 0))
+ skipped_tests_count = int(aggregated_root.attrib.get('skip', 0))
+ passed_tests_count = total_tests_count - error_tests_count - failure_tests_count - skipped_tests_count
+
+ tests_count_string = mark_string('Total: %s' % total_tests_count, 'red', total_tests_count == 0) + ', '
+ tests_count_string += mark_string('Passed: %s' % passed_tests_count, 'red', error_tests_count + failure_tests_count > 0) + ', '
+ tests_count_string += mark_string('Error: %s' % error_tests_count, 'red', error_tests_count > 0) + ', '
+ tests_count_string += mark_string('Failure: %s' % failure_tests_count, 'red', failure_tests_count > 0) + ', '
+ tests_count_string += 'Skipped: %s' % skipped_tests_count
+
+##### save output xml
+
+ print('Writing output file: %s' % args.output_xmlfile)
+ ET.ElementTree(aggregated_root).write(args.output_xmlfile)
+
+
+##### build output html
+ error_tests = []
+ functional_tests = OrderedDict()
+ # categorize and get output of each test
+ for test in aggregated_root.getchildren(): # each test in xml
+ if is_functional_test_name(test.attrib['name']):
+ functional_tests[test.attrib['name']] = test
+ result_tuple = None
+ for child in test.getchildren(): # <system-out>, <system-err> (<failure>, <error>, <skipped> other: passed)
+# if child.tag in ('failure', 'error'):
+ #temp = copy.deepcopy(test)
+ #print temp._children
+ #print test._children
+# error_tests.append(test)
+ if child.tag == 'failure':
+ error_tests.append(test)
+ result_tuple = ('failure', child.text)
+ elif child.tag == 'error':
+ error_tests.append(test)
+ result_tuple = ('error', child.text)
+ elif child.tag == 'skipped':
+ result_tuple = ('skipped', child.text)
+ elif child.tag == 'system-out':
+ test.attrib['stdout'] = child.text
+ elif child.tag == 'system-err':
+ test.attrib['stderr'] = child.text
+ if result_tuple:
+ test.attrib['result'] = result_tuple
+
+ html_output = '''\
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<style type="text/css">
+'''
+ html_output += style_css
+ html_output +='''
+</style>
+</head>
+
+<body>
+<table class="reference">
+'''
+ if scenario:
+ html_output += add_th_td('Scenario:', scenario.capitalize())
+ if python_ver:
+ html_output += add_th_td('Python:', python_ver)
+ start_time_file = '%s/start_time.info' % args.input_dir
+ if os.path.exists(start_time_file):
+ with open(start_time_file) as f:
+ start_time = int(f.read())
+ total_time = int(time.time()) - start_time
+ html_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M'))
+ html_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ html_output += add_th_td('Tests count:', tests_count_string)
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ html_output += add_th_td(key, trex_info_dict[key])
+ if trex_last_commit_info:
+ html_output += add_th_td('Last commit:', trex_last_commit_info)
+ html_output += '</table><br>\n'
+ if err:
+ html_output += '<font color=red>%s<font><br><br>\n' % '\n<br>'.join(err)
+
+#<table style="width:100%;">
+# <tr>
+# <td>Summary:</td>\
+#'''
+ #passed_quantity = len(result_types['passed'])
+ #failed_quantity = len(result_types['failed'])
+ #error_quantity = len(result_types['error'])
+ #skipped_quantity = len(result_types['skipped'])
+
+ #html_output += '<td>Passed: %s</td>' % passed_quantity
+ #html_output += '<td>Failed: %s</td>' % (pad_tag(failed_quantity, 'b') if failed_quantity else '0')
+ #html_output += '<td>Error: %s</td>' % (pad_tag(error_quantity, 'b') if error_quantity else '0')
+ #html_output += '<td>Skipped: %s</td>' % (pad_tag(skipped_quantity, 'b') if skipped_quantity else '0')
+# html_output += '''
+# </tr>
+#</table>'''
+
+ category_arr = [FUNCTIONAL_CATEGORY, ERROR_CATEGORY]
+
+# Adding buttons
+ # Error button
+ if len(error_tests):
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_{error}")>{error}</button>'.format(error = ERROR_CATEGORY)
+ # Setups buttons
+ for category in sorted(setups.keys()):
+ category_arr.append(category)
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (category_arr[-1], category)
+ # Functional buttons
+ if len(functional_tests):
+ html_output += '\n<button onclick=tgl_cat("cat_tglr_%s")>%s</button>' % (FUNCTIONAL_CATEGORY, FUNCTIONAL_CATEGORY)
+
+# Adding tests
+ # Error tests
+ if len(error_tests):
+ html_output += '<div style="display:block;" id="cat_tglr_%s">' % ERROR_CATEGORY
+ html_output += add_category_of_tests(ERROR_CATEGORY, error_tests)
+ html_output += '</div>'
+ # Setups tests
+ for category, tests in setups.items():
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % category
+ if 'stateful' in tests:
+ html_output += add_category_of_tests(category, tests['stateful'], 'stateful', category_info_dir=args.input_dir)
+ if 'stateless' in tests:
+ html_output += add_category_of_tests(category, tests['stateless'], 'stateless', category_info_dir=(None if 'stateful' in tests else args.input_dir))
+ html_output += '</div>'
+ # Functional tests
+ if len(functional_tests):
+ html_output += '<div style="display:none;" id="cat_tglr_%s">' % FUNCTIONAL_CATEGORY
+ html_output += add_category_of_tests(FUNCTIONAL_CATEGORY, functional_tests.values())
+ html_output += '</div>'
+
+ html_output += '\n\n<script type="text/javascript">\n var category_arr = %s\n' % ['cat_tglr_%s' % x for x in category_arr]
+ html_output += '''
+ function tgl_cat(id)
+ {
+ for(var i=0; i<category_arr.length; i++)
+ {
+ var e = document.getElementById(category_arr[i]);
+ if (id == category_arr[i])
+ {
+ if(e.style.display == 'block')
+ e.style.display = 'none';
+ else
+ e.style.display = 'block';
+ }
+ else
+ {
+ if (e) e.style.display = 'none';
+ }
+ }
+ }
+ function tgl_test(id)
+ {
+ var e = document.getElementById(id);
+ if(e.style.display == 'table-row')
+ e.style.display = 'none';
+ else
+ e.style.display = 'table-row';
+ }
+</script>
+</body>
+</html>\
+'''
+
+# save html
+ with open(args.output_htmlfile, 'w') as f:
+ print('Writing output file: %s' % args.output_htmlfile)
+ f.write(html_output)
+ html_output = None
+
+# mail report (only error tests, expanded)
+
+ mail_output = '''\
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<style type="text/css">
+'''
+ mail_output += style_css
+ mail_output +='''
+</style>
+</head>
+
+<body>
+<table class="reference">
+'''
+ if scenario:
+ mail_output += add_th_td('Scenario:', scenario.capitalize())
+ if python_ver:
+ mail_output += add_th_td('Python:', python_ver)
+ if build_url:
+ mail_output += add_th_td('Full HTML report:', '<a class="example" href="%s/HTML_Report">link</a>' % build_url)
+ start_time_file = '%s/start_time.info' % args.input_dir
+ if os.path.exists(start_time_file):
+ with open(start_time_file) as f:
+ start_time = int(f.read())
+ total_time = int(time.time()) - start_time
+ mail_output += add_th_td('Regression start:', datetime.datetime.fromtimestamp(start_time).strftime('%d/%m/%Y %H:%M'))
+ mail_output += add_th_td('Regression duration:', datetime.timedelta(seconds = total_time))
+ mail_output += add_th_td('Tests count:', tests_count_string)
+ for key in trex_info_dict:
+ if key == 'Git SHA':
+ continue
+ mail_output += add_th_td(key, trex_info_dict[key])
+
+ if trex_last_commit_info:
+ mail_output += add_th_td('Last commit:', trex_last_commit_info)
+ mail_output += '</table><br>\n<table width=100%><tr><td>\n'
+
+ for category in setups.keys():
+ failing_category = False
+ for test in error_tests:
+ if test.attrib['classname'] == category:
+ failing_category = True
+ if failing_category or not len(setups[category]) or not sum([len(x) for x in setups[category]]):
+ mail_output += '<table class="reference_fail" align=left style="Margin-bottom:10;Margin-right:10;">\n'
+ else:
+ mail_output += '<table class="reference" align=left style="Margin-bottom:10;Margin-right:10;">\n'
+ mail_output += add_th_th('Setup:', pad_tag(category.replace('.', '/'), 'b'))
+ category_info_file = '%s/report_%s.info' % (args.input_dir, category.replace('.', '_'))
+ if os.path.exists(category_info_file):
+ with open(category_info_file) as f:
+ for info_line in f.readlines():
+ key_value = info_line.split(':', 1)
+ if key_value[0].strip() in list(trex_info_dict.keys()) + ['User']: # always 'hhaim', no need to show
+ continue
+ mail_output += add_th_td('%s:' % key_value[0].strip(), key_value[1].strip())
+ else:
+ mail_output += add_th_td('Info:', 'No info')
+ mail_output += '</table>\n'
+ mail_output += '</td></tr></table>\n'
+
+ # Error tests
+ if len(error_tests) or err:
+ if err:
+ mail_output += '<font color=red>%s<font>' % '\n<br>'.join(err)
+ if len(error_tests) > 5:
+ mail_output += '\n<font color=red>More than 5 failed tests, showing brief output.<font>\n<br>'
+ # show only brief version (cut some info)
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True, brief=True)
+ else:
+ mail_output += add_category_of_tests(ERROR_CATEGORY, error_tests, expanded=True)
+ else:
+ mail_output += '<table><tr style="font-size:120;color:green;font-family:arial"><td>☺</td><td style="font-size:20">All passed.</td></tr></table>\n'
+ mail_output += '\n</body>\n</html>'
+
+##### save outputs
+
+
+# mail content
+ with open(args.output_mailfile, 'w') as f:
+ print('Writing output file: %s' % args.output_mailfile)
+ f.write(mail_output)
+
+# build status
+ category_dict_status = {}
+ if os.path.exists(args.build_status_file):
+ print('Reading: %s' % args.build_status_file)
+ with open(args.build_status_file, 'rb') as f:
+ try:
+ category_dict_status = pickle.load(f)
+ except Exception as e:
+ print('Error during pickle load: %s' % e)
+ if type(category_dict_status) is not dict:
+ print('%s is corrupt, truncating' % args.build_status_file)
+ category_dict_status = {}
+
+ last_status = category_dict_status.get(scenario, 'Successful') # assume last is passed if no history
+ if err or len(error_tests): # has fails
+ exit_status = 1
+ if is_good_status(last_status):
+ current_status = 'Failure'
+ else:
+ current_status = 'Still Failing'
+ else:
+ exit_status = 0
+ if is_good_status(last_status):
+ current_status = 'Successful'
+ else:
+ current_status = 'Fixed'
+ category_dict_status[scenario] = current_status
+
+ with open(args.build_status_file, 'wb') as f:
+ print('Writing output file: %s' % args.build_status_file)
+ pickle.dump(category_dict_status, f)
+
+# last successful commit
+ if (current_status in ('Successful', 'Fixed')) and trex_last_commit_hash and jobs_list > 0 and scenario == 'nightly':
+ with open(args.last_passed_commit, 'w') as f:
+ print('Writing output file: %s' % args.last_passed_commit)
+ f.write(trex_last_commit_hash)
+
+# mail title
+ mailtitle_output = scenario.capitalize()
+ if build_id:
+ mailtitle_output += ' - Build #%s' % build_id
+ mailtitle_output += ' - %s!' % current_status
+
+ with open(args.output_titlefile, 'w') as f:
+ print('Writing output file: %s' % args.output_titlefile)
+ f.write(mailtitle_output)
+
+# exit
+ sys.exit(exit_status)
diff --git a/scripts/automation/regression/functional_tests/config.yaml b/scripts/automation/regression/functional_tests/config.yaml
new file mode 100644
index 00000000..e1bc2016
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/config.yaml
@@ -0,0 +1,74 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : hostname
+ password : root password
+ version_path : not used
+ cores : 1
+
+router:
+ model : device model
+ hostname : device hostname
+ ip_address : device ip
+ image : device image name
+ line_password : telnet pass
+ en_password : enable pass
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : path to clean_config file
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.1000.0000
+ server :
+ name : GigabitEthernet0/0/2
+ src_mac_addr : 0000.0002.0000
+ dest_mac_addr : 0000.2000.0000
+ vrf_name : null
+ - client :
+ name : GigabitEthernet0/0/3
+ src_mac_addr : 0000.0003.0000
+ dest_mac_addr : 0000.3000.0000
+ server :
+ name : GigabitEthernet0/0/4
+ src_mac_addr : 0000.0004.0000
+ dest_mac_addr : 0000.4000.0000
+ vrf_name : dup
+
+
+tftp:
+ hostname : tftp hostname
+ ip_address : tftp ip
+ root_dir : tftp root dir
+ images_path : path related to root dir
diff --git a/scripts/automation/regression/functional_tests/cpp_gtests_test.py b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
new file mode 100644
index 00000000..6535da84
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
@@ -0,0 +1,46 @@
+import outer_packages
+from nose.plugins.attrib import attr
+import functional_general_test
+from trex import CTRexScenario
+import os, sys
+from subprocess import Popen, STDOUT
+import shlex
+import time
+import errno
+import tempfile
+
+# runs command
+def run_command(command, timeout = 15, poll_rate = 0.1, cwd = None):
+ # pipes might stuck, even with timeout
+ with tempfile.TemporaryFile() as stdout_file:
+ proc = Popen(shlex.split(command), stdout = stdout_file, stderr = STDOUT, cwd = cwd, close_fds = True, universal_newlines = True)
+ if timeout > 0:
+ for i in range(int(timeout/poll_rate)):
+ time.sleep(poll_rate)
+ if proc.poll() is not None: # process stopped
+ break
+ if proc.poll() is None:
+ proc.kill() # timeout
+ stdout_file.seek(0)
+ return (errno.ETIME, '%s\n\n...Timeout of %s second(s) is reached!' % (stdout_file.read().decode(errors = 'replace'), timeout))
+ else:
+ proc.wait()
+ stdout_file.seek(0)
+ return (proc.returncode, stdout_file.read().decode(errors = 'replace'))
+
+@attr('run_on_trex')
+class CPP_Test(functional_general_test.CGeneralFunctional_Test):
+ def test_gtests_all(self):
+ print('')
+ bp_sim = os.path.join(CTRexScenario.scripts_path, 'bp-sim-64')
+ ret, out = run_command('%s --ut' % bp_sim, cwd = CTRexScenario.scripts_path)
+ print('Output:\n%s' % out)
+ if ret:
+ raise Exception('Non zero return status of gtests (%s)' % ret)
+
+ def test_gtests_valgrind(self):
+ print('')
+ ret, out = run_command(os.path.join(CTRexScenario.scripts_path, 'run-gtest-clean'), cwd = CTRexScenario.scripts_path)
+ print('Output:\n%s' % out)
+ if ret:
+ raise Exception('Non zero return status of Valgrind gtests (%s)' % ret)
diff --git a/scripts/automation/regression/functional_tests/filters_test.py b/scripts/automation/regression/functional_tests/filters_test.py
new file mode 100644
index 00000000..fbb8a126
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/filters_test.py
@@ -0,0 +1,100 @@
+#!/router/bin/python
+
+import functional_general_test
+from trex_stl_lib.utils import filters
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import assert_true, assert_false
+from nose.tools import raises
+
+
+class ToggleFilter_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.list_db = [1, 2, 3, 4, 5]
+ self.set_db = {1, 2, 3, 4, 5}
+ self.tuple_db = (1, 2, 3, 4, 5)
+ self.dict_db = {str(x): x**2
+ for x in range(5)}
+
+ def test_init_with_dict(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_equal(toggle_filter._toggle_db, set(self.dict_db.keys()))
+ assert_equal(toggle_filter.filter_items(), self.dict_db)
+
+
+ def test_init_with_list(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_equal(toggle_filter._toggle_db, set(self.list_db))
+ assert_equal(toggle_filter.filter_items(), self.list_db)
+
+ def test_init_with_set(self):
+ toggle_filter = filters.ToggleFilter(self.set_db)
+ assert_equal(toggle_filter._toggle_db, self.set_db)
+ assert_equal(toggle_filter.filter_items(), self.set_db)
+
+ def test_init_with_tuple(self):
+ toggle_filter = filters.ToggleFilter(self.tuple_db)
+ assert_equal(toggle_filter._toggle_db, set(self.tuple_db))
+ assert_equal(toggle_filter.filter_items(), self.tuple_db)
+
+ @raises(TypeError)
+ def test_init_with_non_iterable(self):
+ toggle_filter = filters.ToggleFilter(15)
+
+ def test_dict_toggeling(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_false(toggle_filter.toggle_item("3"))
+ assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '4'})
+ assert_true(toggle_filter.toggle_item("3"))
+ assert_equal(toggle_filter._toggle_db, {'0', '1', '2', '3', '4'})
+ assert_false(toggle_filter.toggle_item("2"))
+ assert_false(toggle_filter.toggle_item("4"))
+ self.dict_db.update({'5': 25, '6': 36})
+ assert_true(toggle_filter.toggle_item("6"))
+
+ assert_equal(toggle_filter.filter_items(), {'0': 0, '1': 1, '3': 9, '6': 36})
+
+ del self.dict_db['1']
+ assert_equal(toggle_filter.filter_items(), {'0': 0, '3': 9, '6': 36})
+
+ def test_dict_toggeling_negative(self):
+ toggle_filter = filters.ToggleFilter(self.dict_db)
+ assert_raises(KeyError, toggle_filter.toggle_item, "100")
+
+ def test_list_toggeling(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_false(toggle_filter.toggle_item(3))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5})
+ assert_true(toggle_filter.toggle_item(3))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 3, 4, 5})
+ assert_false(toggle_filter.toggle_item(2))
+ assert_false(toggle_filter.toggle_item(4))
+ self.list_db.extend([6 ,7])
+ assert_true(toggle_filter.toggle_item(6))
+
+ assert_equal(toggle_filter.filter_items(), [1, 3 , 5, 6])
+
+ self.list_db.remove(1)
+ assert_equal(toggle_filter.filter_items(), [3, 5, 6])
+
+ def test_list_toggling_negative(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_raises(KeyError, toggle_filter.toggle_item, 10)
+
+ def test_toggle_multiple_items(self):
+ toggle_filter = filters.ToggleFilter(self.list_db)
+ assert_false(toggle_filter.toggle_items(1, 3, 5))
+ assert_equal(toggle_filter._toggle_db, {2, 4})
+ assert_true(toggle_filter.toggle_items(1, 5))
+ assert_equal(toggle_filter._toggle_db, {1, 2, 4, 5})
+
+ def test_dont_show_after_init(self):
+ toggle_filter = filters.ToggleFilter(self.list_db, show_by_default = False)
+ assert_equal(toggle_filter._toggle_db, set())
+ assert_equal(toggle_filter.filter_items(), [])
+
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/functional_general_test.py b/scripts/automation/regression/functional_tests/functional_general_test.py
new file mode 100755
index 00000000..525b58d2
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/functional_general_test.py
@@ -0,0 +1,22 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class CGeneralFunctional_Test(object):
+ def __init__(self):
+ pass
+
+
+ def setUp(self):
+ pass
+
+
+ def tearDown(self):
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
new file mode 100644
index 00000000..6ca32299
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
new file mode 100644
index 00000000..43ae2368
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
new file mode 100644
index 00000000..7d5e7ec2
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/golden/udp_590.cap b/scripts/automation/regression/functional_tests/golden/udp_590.cap
new file mode 100644
index 00000000..29302f22
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/udp_590.cap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
new file mode 100755
index 00000000..c6b477aa
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
@@ -0,0 +1,629 @@
+#!/router/bin/python
+
+import os
+import unittest
+from trex_stl_lib.trex_stl_hltapi import STLHltStream
+from trex_stl_lib.trex_stl_types import validate_type
+from nose.plugins.attrib import attr
+from nose.tools import nottest
+
+def compare_yamls(yaml1, yaml2):
+ validate_type('yaml1', yaml1, str)
+ validate_type('yaml2', yaml2, str)
+ i = 0
+ for line1, line2 in zip(yaml1.strip().split('\n'), yaml2.strip().split('\n')):
+ i += 1
+ assert line1 == line2, 'yamls are not equal starting from line %s:\n%s\n Golden <-> Generated\n%s' % (i, line1.strip(), line2.strip())
+
+# TODO: move the tests to compare pcaps, not yamls
+@nottest
+class CTRexHltApi_Test(unittest.TestCase):
+ ''' Checks correct HLTAPI creation of packet/VM '''
+
+ def setUp(self):
+ self.golden_yaml = None
+ self.test_yaml = None
+
+ def tearDown(self):
+ compare_yamls(self.golden_yaml, self.test_yaml)
+
+ # Eth/IP/TCP, all values default, no VM instructions + test MACs correction
+ def test_hlt_basic(self):
+ STLHltStream(mac_src = 'a0:00:01:::01', mac_dst = '0d 00 01 00 00 01',
+ mac_src2 = '{00 b0 01 00 00 01}', mac_dst2 = 'd0.00.01.00.00.01')
+ with self.assertRaises(Exception):
+ STLHltStream(mac_src2 = '00:00:00:00:00:0k')
+ with self.assertRaises(Exception):
+ STLHltStream(mac_dst2 = '100:00:00:00:00:00')
+ # wrong encap
+ with self.assertRaises(Exception):
+ STLHltStream(l2_encap = 'ethernet_sdfgsdfg')
+ # all default values
+ test_stream = STLHltStream()
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQAAFAAAAABAAAAAVAAD+U1/QAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, test MAC fields VM, wait for masking of variables for MAC
+ @nottest
+ def test_macs_vm(self):
+ test_stream = STLHltStream(name = 'stream-0', )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+TBD
+'''
+
+
+ # Eth/IP/TCP, ip src and dest is changed by VM
+ def test_ip_ranges(self):
+ # running on single core not implemented yet
+ with self.assertRaises(Exception):
+ test_stream = STLHltStream(split_by_cores = 'single',
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,)
+ # wrong type
+ with self.assertRaises(Exception):
+ test_stream = STLHltStream(split_by_cores = 12345,
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,)
+
+ test_stream = STLHltStream(split_by_cores = 'duplicate',
+ ip_src_addr = '192.168.1.1',
+ ip_src_mode = 'increment',
+ ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5',
+ ip_dst_count = 2,
+ ip_dst_mode = 'random',
+ name = 'test_ip_ranges',
+ rate_pps = 1)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ip_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ pps: 1.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGrxPAqAEBBQUFBQQAAFAAAAABAAAAAVAAD+UqSwAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 4
+ min_value: 0
+ name: inc_4_4_1
+ op: inc
+ size: 4
+ step: 1
+ type: flow_var
+ - add_value: 3232235777
+ is_big_endian: true
+ name: inc_4_4_1
+ pkt_offset: 26
+ type: write_flow_var
+ - init_value: 0
+ max_value: 4294967295
+ min_value: 0
+ name: ip_dst_random
+ op: random
+ size: 4
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ name: ip_dst_random
+ pkt_offset: 30
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: ''
+'''
+
+ # Eth / IP / TCP, tcp ports are changed by VM
+ def test_tcp_ranges(self):
+ test_stream = STLHltStream(tcp_src_port_mode = 'decrement',
+ tcp_src_port_count = 10,
+ tcp_dst_port_mode = 'random',
+ tcp_dst_port_count = 10,
+ tcp_dst_port = 1234,
+ name = 'test_tcp_ranges',
+ rate_pps = '2')
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_tcp_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ pps: 2.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQABNIAAAABAAAAAVAAD+UxewAAISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1015
+ is_big_endian: true
+ name: dec_2_9_1
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 0
+ max_value: 65535
+ min_value: 0
+ name: tcp_dst_random
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ name: tcp_dst_random
+ pkt_offset: 36
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: dec_2_9_1
+'''
+
+ # Eth / IP / UDP, udp ports are changed by VM
+ def test_udp_ranges(self):
+ # UDP is not set, expecting ignore of wrong UDP arguments
+ STLHltStream(udp_src_port_mode = 'qwerqwer',
+ udp_src_port_count = 'weqwer',
+ udp_src_port = 'qwerqwer',
+ udp_dst_port_mode = 'qwerqwe',
+ udp_dst_port_count = 'sfgsdfg',
+ udp_dst_port = 'sdfgsdfg')
+ # UDP is set, expecting fail due to wrong UDP arguments
+ with self.assertRaises(Exception):
+ STLHltStream(l4_protocol = 'udp',
+ udp_src_port_mode = 'qwerqwer',
+ udp_src_port_count = 'weqwer',
+ udp_src_port = 'qwerqwer',
+ udp_dst_port_mode = 'qwerqwe',
+ udp_dst_port_count = 'sfgsdfg',
+ udp_dst_port = 'sdfgsdfg')
+ # generate it already with correct arguments
+ test_stream = STLHltStream(l4_protocol = 'udp',
+ udp_src_port_mode = 'decrement',
+ udp_src_port_count = 10,
+ udp_src_port = 1234,
+ udp_dst_port_mode = 'increment',
+ udp_dst_port_count = 10,
+ udp_dst_port = 1234,
+ name = 'test_udp_ranges',
+ rate_percent = 20,)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_udp_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 20.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEARuroAAAAAwAAAAQTSBNIAHsmgISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1225
+ is_big_endian: true
+ name: dec_2_9_1
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 0
+ max_value: 9
+ min_value: 0
+ name: inc_2_9_1
+ op: inc
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 1234
+ is_big_endian: true
+ name: inc_2_9_1
+ pkt_offset: 36
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: dec_2_9_1
+'''
+
+ # Eth/IP/TCP, packet length is changed in VM by frame_size
+ def test_pkt_len_by_framesize(self):
+ # just check errors, no compare to golden
+ STLHltStream(length_mode = 'increment',
+ frame_size_min = 100,
+ frame_size_max = 3000)
+ test_stream = STLHltStream(length_mode = 'decrement',
+ frame_size_min = 100,
+ frame_size_max = 3000,
+ name = 'test_pkt_len_by_framesize',
+ rate_bps = 1000)
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_pkt_len_by_framesize
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ bps_L2: 1000.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAuqAAAAAEAGr00AAAAAwAAAAQQAAFAAAAABAAAAAVAAD+UwiwAAISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 3000
+ max_value: 3000
+ min_value: 100
+ name: pkt_len
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - name: pkt_len
+ type: trim_pkt_size
+ - add_value: -14
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 16
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: pkt_len
+'''
+
+ # Eth/IP/UDP, packet length is changed in VM by l3_length
+ def test_pkt_len_by_l3length(self):
+ test_stream = STLHltStream(l4_protocol = 'udp',
+ length_mode = 'random',
+ l3_length_min = 100,
+ l3_length_max = 400,
+ name = 'test_pkt_len_by_l3length')
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_pkt_len_by_l3length
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABCABFAAGQAAAAAEARuVwAAAAAwAAAAQQAAFABfCaTISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 114
+ max_value: 414
+ min_value: 114
+ name: pkt_len
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - name: pkt_len
+ type: trim_pkt_size
+ - add_value: -14
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 16
+ type: write_flow_var
+ - add_value: -34
+ is_big_endian: true
+ name: pkt_len
+ pkt_offset: 38
+ type: write_flow_var
+ - pkt_offset: 14
+ type: fix_checksum_ipv4
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with vlan, no VM
+ def test_vlan_basic(self):
+ with self.assertRaises(Exception):
+ STLHltStream(l2_encap = 'ethernet_ii',
+ vlan_id = 'sdfgsdgf')
+ test_stream = STLHltStream(l2_encap = 'ethernet_ii')
+ assert ':802.1Q:' not in test_stream.get_pkt_type(), 'Default packet should not include dot1q'
+
+ test_stream = STLHltStream(name = 'test_vlan_basic', l2_encap = 'ethernet_ii_vlan')
+ assert ':802.1Q:' in test_stream.get_pkt_type(), 'No dot1q in packet with encap ethernet_ii_vlan'
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_vlan_basic
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAAgARQAALgAAAABABrrJAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/leEMAACEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with 4 vlan
+ def test_vlan_multiple(self):
+ # default frame size should be not enough
+ with self.assertRaises(Exception):
+ STLHltStream(vlan_id = [1, 2, 3, 4])
+ test_stream = STLHltStream(name = 'test_vlan_multiple', frame_size = 100,
+ vlan_id = [1, 2, 3, 4], # can be either array or string separated by spaces
+ vlan_protocol_tag_id = '8100 0x8100')
+ pkt_layers = test_stream.get_pkt_type()
+ assert '802.1Q:' * 4 in pkt_layers, 'No four dot1q layers in packet: %s' % pkt_layers
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_vlan_multiple
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADADgQAwBAgARQAARgAAAABABrqxAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/l6p0AACEhISEhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IP/TCP, with 5 vlans and VMs on vlan_id
+ def test_vlan_vm(self):
+ test_stream = STLHltStream(name = 'test_vlan_vm', frame_size = 100,
+ vlan_id = '1 2 1000 4 5', # 5 vlans
+ vlan_id_mode = 'increment fixed decrement random', # 5th vlan will be default fixed
+ vlan_id_step = 2, # 1st vlan step will be 2, others - default 1
+ vlan_id_count = [4, 1, 10], # 4th independent on count, 5th will be fixed
+ )
+ pkt_layers = test_stream.get_pkt_type()
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ assert '802.1Q:' * 5 in pkt_layers, 'No five dot1q layers in packet: %s' % pkt_layers
+ self.golden_yaml = '''
+- name: test_vlan_vm
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADPogQAwBIEAMAUIAEUAAEIAAAAAQAa6tQAAAADAAAABBAAAUAAAAAEAAAABUAAP5SzkAAAhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 6
+ min_value: 0
+ name: dec_2_3_2
+ op: inc
+ size: 2
+ step: 2
+ type: flow_var
+ - add_value: 1
+ is_big_endian: true
+ mask: 4095
+ name: dec_2_3_2
+ pkt_cast_size: 2
+ pkt_offset: 14
+ shift: 0
+ type: write_mask_flow_var
+ - init_value: 9
+ max_value: 9
+ min_value: 0
+ name: dec_2_9_1
+ op: dec
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 991
+ is_big_endian: true
+ mask: 4095
+ name: dec_2_9_1
+ pkt_cast_size: 2
+ pkt_offset: 22
+ shift: 0
+ type: write_mask_flow_var
+ - init_value: 0
+ max_value: 65535
+ min_value: 0
+ name: vlan_id_random
+ op: random
+ size: 2
+ step: 1
+ type: flow_var
+ - add_value: 0
+ is_big_endian: true
+ mask: 4095
+ name: vlan_id_random
+ pkt_cast_size: 2
+ pkt_offset: 26
+ shift: 0
+ type: write_mask_flow_var
+ split_by_var: dec_2_9_1
+'''
+
+
+ # Eth/IPv6/TCP, no VM
+ def test_ipv6_basic(self):
+ # default frame size should be not enough
+ with self.assertRaises(Exception):
+ STLHltStream(l3_protocol = 'ipv6')
+ # error should not affect
+ STLHltStream(ipv6_src_addr = 'asdfasdfasgasdf')
+ # error should affect
+ with self.assertRaises(Exception):
+ STLHltStream(l3_protocol = 'ipv6', ipv6_src_addr = 'asdfasdfasgasdf')
+ test_stream = STLHltStream(name = 'test_ipv6_basic', l3_protocol = 'ipv6', length_mode = 'fixed', l3_length = 150, )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ipv6_basic
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABht1gAAAAAG4GQP6AAAAAAAAAAAAAAAAAABL+gAAAAAAAAAAAAAAAAAAiBAAAUAAAAAEAAAABUAAP5ctLAAAhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions: []
+ split_by_var: ''
+'''
+
+ # Eth/IPv6/UDP, VM on ipv6 fields
+ def test_ipv6_src_dst_ranges(self):
+ test_stream = STLHltStream(name = 'test_ipv6_src_dst_ranges', l3_protocol = 'ipv6', l3_length = 150, l4_protocol = 'udp',
+ ipv6_src_addr = '1111:2222:3333:4444:5555:6666:7777:8888',
+ ipv6_dst_addr = '1111:1111:1111:1111:1111:1111:1111:1111',
+ ipv6_src_mode = 'increment', ipv6_src_step = 5, ipv6_src_count = 10,
+ ipv6_dst_mode = 'decrement', ipv6_dst_step = '1111:1111:1111:1111:1111:1111:0000:0011', ipv6_dst_count = 150,
+ )
+ self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+ self.golden_yaml = '''
+- name: test_ipv6_src_dst_ranges
+ stream:
+ action_count: 0
+ enabled: true
+ flags: 3
+ isg: 0.0
+ mode:
+ percentage: 10.0
+ type: continuous
+ packet:
+ binary: AAAAAAAAAAABAAABht1gAAAAAG4RQBERIiIzM0REVVVmZnd3iIgRERERERERERERERERERERBAAAUABucjohISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+ meta: ''
+ flow_stats:
+ enabled: false
+ self_start: true
+ vm:
+ instructions:
+ - init_value: 0
+ max_value: 45
+ min_value: 0
+ name: inc_4_9_5
+ op: inc
+ size: 4
+ step: 5
+ type: flow_var
+ - add_value: 2004322440
+ is_big_endian: true
+ name: inc_4_9_5
+ pkt_offset: 34
+ type: write_flow_var
+ - init_value: 2533
+ max_value: 2533
+ min_value: 0
+ name: dec_4_149_17
+ op: dec
+ size: 4
+ step: 17
+ type: flow_var
+ - add_value: 286328620
+ is_big_endian: true
+ name: dec_4_149_17
+ pkt_offset: 50
+ type: write_flow_var
+ split_by_var: dec_4_149_17
+'''
+
+
+
+
+
+ def yaml_save_location(self):
+ #return os.devnull
+ # debug/deveopment, comment line above
+ return '/tmp/%s.yaml' % self._testMethodName
+
+
diff --git a/scripts/automation/regression/functional_tests/misc_methods_test.py b/scripts/automation/regression/functional_tests/misc_methods_test.py
new file mode 100755
index 00000000..096f86d8
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/misc_methods_test.py
@@ -0,0 +1,61 @@
+#!/router/bin/python
+
+import functional_general_test
+import misc_methods
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class MiscMethods_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.ipv4_gen = misc_methods.get_network_addr()
+ self.ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
+ pass
+
+ def test_ipv4_gen(self):
+ for i in range(1, 255):
+ assert_equal( next(self.ipv4_gen), [".".join( map(str, [1, 1, i, 0])), '255.255.255.0'] )
+
+ def test_ipv6_gen(self):
+ tmp_ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0]
+ for i in range(0, 255):
+ tmp_ipv6_addr[2] = hex(i)[2:]
+ assert_equal( next(self.ipv6_gen), ":".join( map(str, tmp_ipv6_addr)) )
+
+ def test_get_ipv4_client_addr(self):
+ tmp_ipv4_addr = next(self.ipv4_gen)[0]
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv4_addr), '1.1.1.1')
+ assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv4_addr, {'3' : 255} )
+
+ def test_get_ipv6_client_addr(self):
+ tmp_ipv6_addr = next(self.ipv6_gen)
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 1}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:1')
+ assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 2}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:2')
+ assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv6_addr, {'7' : 70000} )
+
+
+ @raises(ValueError)
+ def test_ipv4_client_addr_exception(self):
+ tmp_ipv4_addr = next(self.ipv4_gen)[0]
+ misc_methods.get_single_net_client_addr(tmp_ipv4_addr, {'4' : 1})
+
+ @raises(ValueError)
+ def test_ipv6_client_addr_exception(self):
+ tmp_ipv6_addr = next(self.ipv6_gen)
+ misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'8' : 1}, ip_type = 'ipv6')
+
+ @raises(StopIteration)
+ def test_gen_ipv4_to_limit (self):
+ while(True):
+ next(self.ipv4_gen)
+
+ @raises(StopIteration)
+ def test_gen_ipv6_to_limit (self):
+ while(True):
+ next(self.ipv6_gen)
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/pkt_bld_general_test.py b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
new file mode 100755
index 00000000..9a1b708a
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
@@ -0,0 +1,28 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import sys
+import outer_packages
+
+
+class CGeneralPktBld_Test(object):
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def print_packet(pkt_obj):
+ print("\nGenerated packet:\n{}".format(repr(pkt_obj)))
+
+
+ def setUp(self):
+ pass
+
+
+ def tearDown(self):
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
new file mode 100755
index 00000000..0be21280
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
@@ -0,0 +1,60 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.cache = CCommandCache()
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+ self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+ self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+ self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+
+ def test_add(self):
+ assert_equal(self.cache.cache['IF'],
+ {'GigabitEthernet0/0/1' : ['ip nbar protocol-discovery'],
+ 'GigabitEthernet0/0/2' : ['ip nbar protocol-discovery']
+ })
+ assert_equal(self.cache.cache['CONF'],
+ ["arp 1.1.1.1 0000.0001.0000 arpa",
+ "arp 1.1.2.1 0000.0002.0000 arpa"]
+ )
+ assert_equal(self.cache.cache['EXEC'],
+ ["show ip nbar protocol-discovery stats packet-count"])
+
+ def test_dump_config (self):
+ import sys
+ from io import StringIO, BytesIO
+ saved_stdout = sys.stdout
+ try:
+ out = BytesIO() if sys.version_info < (3,0) else StringIO()
+ sys.stdout = out
+ self.cache.dump_config()
+ output = out.getvalue().strip()
+ assert_equal(output,
+ "configure terminal\ninterface GigabitEthernet0/0/1\nip nbar protocol-discovery\ninterface GigabitEthernet0/0/2\nip nbar protocol-discovery\nexit\narp 1.1.1.1 0000.0001.0000 arpa\narp 1.1.2.1 0000.0002.0000 arpa\nexit\nshow ip nbar protocol-discovery stats packet-count"
+ )
+ finally:
+ sys.stdout = saved_stdout
+
+ def test_get_config_list (self):
+ assert_equal(self.cache.get_config_list(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_clear_cache (self):
+ self.cache.clear_cache()
+ assert_equal(self.cache.cache,
+ {"IF" : {},
+ "CONF" : [],
+ "EXEC" : []}
+ )
+
+ def tearDown(self):
+ self.cache.clear_cache()
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_link_test.py b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
new file mode 100755
index 00000000..7a31815b
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
@@ -0,0 +1,62 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandLink_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.cache = CCommandCache()
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+ self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+ self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+ self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+ self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+ self.com_link = CCommandLink()
+
+ def test_transmit(self):
+ # test here future implemntatin of platform physical link
+ pass
+
+ def test_run_cached_command (self):
+ self.com_link.run_command([self.cache])
+
+ assert_equal (self.com_link.get_history(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ self.com_link.clear_history()
+ self.com_link.run_single_command(self.cache)
+ assert_equal (self.com_link.get_history(),
+ ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_run_single_command(self):
+ self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+ assert_equal (self.com_link.get_history(),
+ ["show ip nbar protocol-discovery stats packet-count"]
+ )
+
+ def test_run_mixed_commands (self):
+ self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+ self.com_link.run_command([self.cache])
+ self.com_link.run_command(["show ip interface brief"])
+
+ assert_equal (self.com_link.get_history(),
+ ["show ip nbar protocol-discovery stats packet-count",
+ "configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count",
+ "show ip interface brief"]
+ )
+
+ def test_clear_history (self):
+ self.com_link.run_command(["show ip interface brief"])
+ self.com_link.clear_history()
+ assert_equal (self.com_link.get_history(), [])
+
+ def tearDown(self):
+ self.cache.clear_cache()
+
+
diff --git a/scripts/automation/regression/functional_tests/platform_device_cfg_test.py b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
new file mode 100755
index 00000000..c60635fe
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
@@ -0,0 +1,20 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDeviceCfg_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+
+ def test_get_interfaces_cfg(self):
+ assert_equal (self.dev_cfg.get_interfaces_cfg(),
+ [{'client': {'src_mac_addr': '0000.0001.0000', 'name': 'GigabitEthernet0/0/1', 'dest_mac_addr': '0000.1000.0000'}, 'vrf_name': None, 'server': {'src_mac_addr': '0000.0002.0000', 'name': 'GigabitEthernet0/0/2', 'dest_mac_addr': '0000.2000.0000'}}, {'client': {'src_mac_addr': '0000.0003.0000', 'name': 'GigabitEthernet0/0/3', 'dest_mac_addr': '0000.3000.0000'}, 'vrf_name': 'dup', 'server': {'src_mac_addr': '0000.0004.0000', 'name': 'GigabitEthernet0/0/4', 'dest_mac_addr': '0000.4000.0000'}}]
+ )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
new file mode 100755
index 00000000..a97a3305
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDualIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', 0, IFType.Client)
+ self.if_2 = CIfObj('gig0/0/2', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', 0, IFType.Server)
+ self.if_3 = CIfObj('gig0/0/3', '1.1.3.1', '2001:DB8:2:2222:0:0:0:1', '0000.0003.0000', '0000.0003.0000', 0, IFType.Client)
+ self.if_4 = CIfObj('gig0/0/4', '1.1.4.1', '2001:DB8:3:2222:0:0:0:1', '0000.0004.0000', '0000.0004.0000', 0, IFType.Server)
+ self.dual_1 = CDualIfObj(None, self.if_1, self.if_2)
+ self.dual_2 = CDualIfObj('dup', self.if_3, self.if_4)
+
+ def test_id_allocation(self):
+ assert (self.dual_1.get_id() < self.dual_2.get_id() < CDualIfObj._obj_id)
+
+ def test_get_vrf_name (self):
+ assert_equal ( self.dual_1.get_vrf_name() , None )
+ assert_equal ( self.dual_2.get_vrf_name() , 'dup' )
+
+ def test_is_duplicated (self):
+ assert_equal ( self.dual_1.is_duplicated() , False )
+ assert_equal ( self.dual_2.is_duplicated() , True )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
new file mode 100755
index 00000000..72015f55
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
@@ -0,0 +1,40 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfManager_Test(functional_general_test.CGeneralFunctional_Test):
+
+ def setUp(self):
+ self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+ self.if_mng = CIfManager()
+
+ # main testing method to check the entire class
+ def test_load_config (self):
+ self.if_mng.load_config(self.dev_cfg)
+
+ # check the number of items in each qeury
+ assert_equal( len(self.if_mng.get_if_list()), 4 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client)), 2 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = True)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = False)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server)), 2 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = True)), 1 )
+ assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = False)), 1 )
+ assert_equal( len(self.if_mng.get_duplicated_if()), 2 )
+ assert_equal( len(self.if_mng.get_dual_if_list()), 2 )
+
+ # check the classification with intf name
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list()) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True)) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False)) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] )
+ assert_equal( list(map(CIfObj.get_name, self.if_mng.get_duplicated_if()) ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] )
+
+ # check the classification with vrf name
+ assert_equal( list(map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list()) ), [None, 'dup'] )
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/platform_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
new file mode 100755
index 00000000..2412d3cc
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
@@ -0,0 +1,49 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+ test_idx = 1
+
+ def setUp(self):
+ self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', 0, IFType.Client)
+ self.if_2 = CIfObj('TenGig0/0/0', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', 0, IFType.Server)
+ CIfObj_Test.test_idx += 1
+
+ def test_id_allocation(self):
+ assert (self.if_1.get_id() < self.if_2.get_id() < CIfObj._obj_id)
+
+ def test_isClient(self):
+ assert_equal (self.if_1.is_client(), True)
+
+ def test_isServer(self):
+ assert_equal (self.if_2.is_server(), True)
+
+ def test_get_name (self):
+ assert_equal (self.if_1.get_name(), 'gig0/0/1')
+ assert_equal (self.if_2.get_name(), 'TenGig0/0/0')
+
+ def test_get_src_mac_addr (self):
+ assert_equal (self.if_1.get_src_mac_addr(), '0000.0001.0000')
+
+ def test_get_dest_mac (self):
+ assert_equal (self.if_2.get_dest_mac(), '0000.0002.0000')
+
+ def test_get_ipv4_addr (self):
+ assert_equal (self.if_1.get_ipv4_addr(), '1.1.1.1' )
+ assert_equal (self.if_2.get_ipv4_addr(), '1.1.2.1' )
+
+ def test_get_ipv6_addr (self):
+ assert_equal (self.if_1.get_ipv6_addr(), '2001:DB8:0:2222:0:0:0:1' )
+ assert_equal (self.if_2.get_ipv6_addr(), '2001:DB8:1:2222:0:0:0:1' )
+
+ def test_get_type (self):
+ assert_equal (self.if_1.get_if_type(), IFType.Client)
+ assert_equal (self.if_2.get_if_type(), IFType.Server)
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
new file mode 100644
index 00000000..5d34e5df
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
@@ -0,0 +1,369 @@
+#!/router/bin/python
+
+import pkt_bld_general_test
+
+#HACK FIX ME START
+import sys
+import os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CURRENT_PATH, '../../../trex_control_plane/stl/'))
+#HACK FIX ME END
+from trex_stl_lib.trex_stl_packet_builder_scapy import *
+
+from scapy.all import *
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import os
+import random
+import pprint
+
+class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+ def setUp(self):
+ pass
+
+ def test_simple_vm1(self):
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="a",min_value="16.0.0.1",max_value="16.0.0.10",init_value="16.0.0.1",size=4,op="inc"),
+ STLVmWrFlowVar(fv_name="a",pkt_offset= "IP.src"),
+ STLVmFixIpv4(offset = "IP")]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+ pkt_builder.dump_scripts ()
+
+ print(pkt_builder.get_vm_data())
+
+ assert_equal( pkt_builder.get_vm_data(), {'split_by_var': '', 'instructions': [{'name': 'a', 'max_value': 268435466, 'min_value': 268435457, 'init_value': 268435457, 'size': 4, 'type': 'flow_var', 'step':1,'op': 'inc'}, {'is_big_endian': True, 'pkt_offset': 26, 'type': 'write_flow_var', 'name': 'a', 'add_value': 0}, {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}]} )
+
+
+
+ def test_simple_no_vm1(self):
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+
+ pkt_builder.compile();
+
+ pkt_builder.dump_scripts ()
+
+ assert_equal( pkt_builder.get_vm_data(),
+ { 'instructions': [ ],
+ 'split_by_var': ''}
+ )
+
+
+ def test_simple_mac_default(self):
+
+ pkt = Ether()/IP()/UDP()
+
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac () ,True)
+ assert_equal( pkt_builder.is_default_dst_mac () ,True)
+
+ pkt = Ether(src="00:00:00:00:00:01")/IP()/UDP()
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac (), False)
+ assert_equal( pkt_builder.is_default_dst_mac (), True)
+
+ pkt = Ether(dst="00:00:00:00:00:01")/IP()/UDP()
+
+ pkt_builder = STLPktBuilder(pkt = pkt);
+
+ assert_equal( pkt_builder.is_default_src_mac (),True)
+ assert_equal( pkt_builder.is_default_dst_mac (),False)
+
+
+
+
+ def test_simple_teredo(self):
+
+ pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=3797,sport=3544)/IPv6(src="2001:0:4137:9350:8000:f12a:b9c8:2815",dst="2001:4860:0:2001::68")/UDP(dport=12,sport=1025)/ICMPv6Unknown()
+
+ pkt.build();
+ p_utl=CTRexScapyPktUtl(pkt);
+
+ assert_equal( p_utl.get_field_offet_by_str("IPv6.src"), (50,16) )
+ assert_equal( p_utl.get_field_offet_by_str("IPv6.dst"), (66,16) )
+
+
+
+
+ def test_simple_scapy_vlan(self):
+
+ py='5'*(9)
+ p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+ Dot1Q(vlan=12)/ \
+ Dot1Q(vlan=17)/ \
+ IP(src="10.0.0.10",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/py
+
+ p1.build();
+ p1.dump_layers_offset()
+ p1.show2();
+ hexdump(p1);
+ #wrpcap("ipv4_udp_9k.pcap", p1);
+
+ p_utl=CTRexScapyPktUtl(p1);
+
+ assert_equal(p_utl.get_pkt_layers(),"Ethernet:802.1Q:802.1Q:IP:UDP:Raw")
+ assert_equal(p_utl.layer_offset("802.1Q",0),14);
+ assert_equal(p_utl.layer_offset("802.1Q",1),18);
+ assert_equal(p_utl.get_field_offet_by_str("802|1Q.vlan"),(14,0));
+ assert_equal(p_utl.get_field_offet_by_str("802|1Q:1.vlan"),(18,0));
+ assert_equal(p_utl.get_field_offet_by_str("IP.src"),(34,4));
+
+ def test_simple_scapy_128_udp(self):
+ """
+ build 128 byte packet with 0x35 as pyld
+ """
+
+
+ pkt_size =128
+ p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)
+ pyld_size=pkt_size-len(p1);
+
+ pkt=p1/('5'*(pyld_size))
+
+ pkt.show2();
+ hexdump(pkt);
+ assert_equal(len(pkt),128)
+
+ def test_simple_scapy_9k_ip_len(self):
+ """
+ build 9k ipv4 len packet
+ """
+
+
+ ip_pkt_size =9*1024
+ p_l2=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00");
+ p_l3= IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)
+ pyld_size = ip_pkt_size-len(p_l3);
+
+ pkt=p_l2/p_l3/('\x55'*(pyld_size))
+
+ #pkt.show2();
+ #hexdump(pkt);
+ assert_equal(len(pkt),9*1024+14)
+
+ def test_simple_scapy_ipv6_1(self):
+ """
+ build ipv6 packet
+ """
+
+ print("start ")
+ py='\x55'*(64)
+
+ p=Ether()/IPv6()/UDP(dport=12,sport=1025)/py
+ #p.build();
+ #p.dump_layers_offset()
+ hexdump(p);
+ p.show2();
+
+ p_utl=CTRexScapyPktUtl(p);
+
+ assert_equal(p_utl.get_field_offet_by_str("IPv6.src"),(22,16));
+
+
+ def test_simple_vm2(self):
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+ STLVmWrFlowVar (fv_name="my_valn",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now
+ ]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+
+ d= pkt_builder.get_vm_data()
+ assert_equal(d['instructions'][1]['pkt_offset'],17)
+
+ def test_simple_vm3(self):
+ try:
+ raw1 = STLScVmRaw( [ STLVmFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+ STLVmWrFlowVar(fv_name="my_valn_err",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now
+ ]
+ );
+
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( raw1 )
+ pkt_builder.compile();
+
+
+ d= pkt_builder.get_vm_data()
+ except CTRexPacketBuildException as e:
+ error=str(e)
+ assert_equal(error.find("[errcode:-11]"),0);
+
+ def test_simple_tuple_gen(self):
+ vm = STLScVmRaw( [ STLVmTupleGen(name="tuple"), # define tuple gen
+ STLVmWrFlowVar(fv_name="tuple.ip", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" ) #write udp.port
+ ]
+ );
+ pkt_builder = STLPktBuilder();
+
+ py='5'*128
+ pkt=Ether()/ \
+ Dot1Q(vlan=12)/ \
+ IP(src="16.0.0.1",dst="48.0.0.1")/ \
+ UDP(dport=12,sport=1025)/IP()/py
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( vm )
+ pkt_builder.compile();
+ d= pkt_builder.get_vm_data()
+ pkt_builder.dump_vm_data_as_yaml()
+
+ assert_equal(d['instructions'][1]['pkt_offset'],30)
+ assert_equal(d['instructions'][3]['pkt_offset'],38)
+
+ def test_simple_random_pkt_size(self):
+
+ ip_pkt_size = 9*1024
+ p_l2 = Ether();
+ p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+ p_l4 = UDP(dport=12,sport=1025)
+ pyld_size = ip_pkt_size-len(p_l3/p_l4);
+
+ pkt =p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+ l3_len_fix =-(len(p_l2));
+ l4_len_fix =-(len(p_l2/p_l3));
+
+ vm = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=64, max_value=len(pkt), size=2, op="random"),
+ STLVmTrimPktSize("fv_rand"), # total packet size
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix),
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+ STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "UDP.len", add_val=l4_len_fix)
+ ]
+ )
+ pkt_builder = STLPktBuilder();
+
+ # set packet
+ pkt_builder.set_packet(pkt);
+ pkt_builder.add_command ( vm )
+ pkt_builder.compile();
+ d= pkt_builder.get_vm_data()
+ pkt_builder.dump_vm_data_as_yaml()
+
+ assert_equal(d['instructions'][0]['max_value'],9230)
+ assert_equal(d['instructions'][2]['pkt_offset'],16)
+ assert_equal(d['instructions'][4]['pkt_offset'],38)
+
+ def test_simple_pkt_loader(self):
+ p=RawPcapReader("functional_tests/golden/basic_imix_golden.cap")
+ print("")
+ for pkt in p:
+ print(pkt[1])
+ print(hexdump(str(pkt[0])))
+ break;
+
+ def test_simple_pkt_loader1(self):
+
+ pkt_builder = STLPktBuilder(pkt = "functional_tests/golden/udp_590.cap", build_raw = False);
+ print("")
+ pkt_builder.dump_as_hex()
+ r = pkt_builder.pkt_raw
+ assert_equal(safe_ord(r[1]),0x50)
+ assert_equal(safe_ord(r[0]),0x00)
+ assert_equal(safe_ord(r[0x240]),0x16)
+ assert_equal(safe_ord(r[0x24d]),0x79)
+ assert_equal(len(r),590)
+
+ print(len(r))
+
+ def test_simple_pkt_loader2(self):
+
+ pkt_builder = STLPktBuilder(pkt = "functional_tests/golden/basic_imix_golden.cap");
+ assert_equal(pkt_builder.pkt_layers_desc (), "Ethernet:IP:UDP:Raw");
+
+ def test_simple_pkt_loader3(self):
+
+ #pkt_builder = STLPktBuilder(pkt = "stl/golden/basic_imix_golden.cap");
+ #r = pkt_builder.pkt_raw
+ #print ""
+ #hexdump(str(r))
+
+
+ #print pkt_builder.pkt_layers_desc ()
+
+
+ #pkt_builder.set_packet(pkt);
+
+ py='\x55'*(64)
+
+ p=Ether()/IP()/UDP(dport=12,sport=1025)/py
+ pkt_str = bytes(p);
+ print("")
+ hexdump(pkt_str);
+ scapy_pkt = Ether(pkt_str);
+ scapy_pkt.show2();
+
+ def tearDown(self):
+ pass
+
+
+class CTRexPktBuilderScapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+ def setUp(self):
+ pass;
+ #self.pkt_bld = CTRexPktBuilder()
+ #self.pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet())
+ #self.pp = pprint.PrettyPrinter(indent=4)
+
+ def tearDown(self):
+ pass
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
new file mode 100644
index 00000000..bc5bc4d5
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -0,0 +1,367 @@
+
+import outer_packages
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import nottest
+from nose.plugins.attrib import attr
+from trex import CTRexScenario
+from trex_stl_lib import trex_stl_sim
+from trex_stl_lib.trex_stl_streams import STLProfile
+from trex_stl_lib.trex_stl_packet_builder_scapy import RawPcapReader, RawPcapWriter, Ether
+from trex_stl_lib.utils.text_opts import *
+
+import sys
+
+if sys.version_info > (3,0):
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+import os
+import subprocess
+import shlex
+from threading import Thread
+from collections import defaultdict
+
+@attr('run_on_trex')
+class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
+ def setUp (self):
+ self.test_path = os.path.abspath(os.getcwd())
+ self.scripts_path = CTRexScenario.scripts_path
+
+ self.verify_exists(os.path.join(self.scripts_path, "bp-sim-64-debug"))
+
+ self.stl_sim = os.path.join(self.scripts_path, "stl-sim")
+
+ self.verify_exists(self.stl_sim)
+
+ self.profiles_path = os.path.join(self.scripts_path, "stl/yaml/")
+
+ self.profiles = {}
+ self.profiles['imix_3pkt'] = os.path.join(self.profiles_path, "imix_3pkt.yaml")
+ self.profiles['imix_3pkt_vm'] = os.path.join(self.profiles_path, "imix_3pkt_vm.yaml")
+ self.profiles['random_size_9k'] = os.path.join(self.profiles_path, "../udp_rand_len_9k.py")
+ self.profiles['imix_tuple_gen'] = os.path.join(self.profiles_path, "imix_1pkt_tuple_gen.yaml")
+
+ for k, v in self.profiles.items():
+ self.verify_exists(v)
+
+ self.valgrind_profiles = [ self.profiles['imix_3pkt_vm'],
+ self.profiles['random_size_9k'],
+ self.profiles['imix_tuple_gen'] ]
+
+ self.golden_path = os.path.join(self.test_path,"stl/golden/")
+
+ os.chdir(self.scripts_path)
+
+
+ def tearDown (self):
+ os.chdir(self.test_path)
+
+
+
+ def get_golden (self, name):
+ golden = os.path.join(self.golden_path, name)
+ self.verify_exists(golden)
+ return golden
+
+
+ def verify_exists (self, name):
+ if not os.path.exists(name):
+ raise Exception("cannot find '{0}'".format(name))
+
+
+ def scapy_pkt_show_to_str (self, scapy_pkt):
+ capture = StringIO()
+ save_stdout = sys.stdout
+ sys.stdout = capture
+ scapy_pkt.show()
+ sys.stdout = save_stdout
+ return capture.getvalue()
+
+
+ def compare_caps (self, output, golden, max_diff_sec = 0.01):
+ pkts1 = []
+ pkts2 = []
+ pkts_ts_buckets = defaultdict(list)
+
+ for pkt in RawPcapReader(output):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts1.extend(sorted(pkts_ts_buckets[ts]))
+ pkts_ts_buckets.clear()
+
+ for pkt in RawPcapReader(golden):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts2.extend(sorted(pkts_ts_buckets[ts]))
+
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
+
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > 0.000005: # 5 nsec
+ raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2))
+
+ if pkt1[0] != pkt2[0]:
+ errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i)
+ print(errmsg)
+
+ print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline'))
+
+ scapy_pkt1_info = self.scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n')
+ scapy_pkt2_info = self.scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n')
+
+ print(format_text("\nGot:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print(format_text("\nExpected:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print("\n")
+ raise AssertionError(errmsg)
+
+
+ def run_sim (self, yaml, output, options = "", silent = False, obj = None, tunables = None):
+ if output:
+ user_cmd = "-f {0} -o {1} {2} -p {3}".format(yaml, output, options, self.scripts_path)
+ else:
+ user_cmd = "-f {0} {1} -p {2}".format(yaml, options, self.scripts_path)
+
+ if silent:
+ user_cmd += " --silent"
+
+ if tunables:
+ user_cmd += " -t"
+ for k, v in tunables.items():
+ user_cmd += " {0}={1}".format(k, v)
+
+ rc = trex_stl_sim.main(args = shlex.split(user_cmd))
+ if obj:
+ obj['rc'] = (rc == 0)
+
+ return (rc == 0)
+
+
+
+ def run_py_profile_path (self,
+ profile,
+ options,
+ silent = False,
+ do_no_remove = False,
+ compare = True,
+ test_generated = True,
+ do_no_remove_generated = False,
+ tunables = None):
+
+ print('Testing profile: %s' % profile)
+ output_cap = "a.pcap"
+ input_file = os.path.join('stl/', profile)
+ golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap');
+ if os.path.exists(output_cap):
+ os.unlink(output_cap)
+ try:
+ rc = self.run_sim(yaml = input_file,
+ output = output_cap,
+ options = options,
+ silent = silent,
+ tunables = tunables)
+ assert_equal(rc, True, 'Simulation on profile %s failed.' % profile)
+ #s='cp '+output_cap+' '+golden_file;
+ #print s
+ #os.system(s)
+
+ if compare:
+ self.compare_caps(output_cap, golden_file)
+ finally:
+ if not do_no_remove:
+ os.unlink(output_cap)
+
+ if test_generated:
+ try:
+ generated_filename = input_file.replace('.py', '_GENERATED.py').replace('.yaml', '_GENERATED.py')
+ if input_file.endswith('.py'):
+ profile = STLProfile.load_py(input_file, **(tunables if tunables else {}))
+ elif input_file.endswith('.yaml'):
+ profile = STLProfile.load_yaml(input_file)
+
+ profile.dump_to_code(generated_filename)
+
+ rc = self.run_sim(yaml = generated_filename,
+ output = output_cap,
+ options = options,
+ silent = silent)
+ assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
+
+ if compare:
+ self.compare_caps(output_cap, golden_file)
+
+
+ finally:
+ if not do_no_remove_generated:
+ os.unlink(generated_filename)
+ # python 3 does not generate PYC under the same dir
+ if os.path.exists(generated_filename + 'c'):
+ os.unlink(generated_filename + 'c')
+ if not do_no_remove:
+ os.unlink(output_cap)
+
+
+ def test_stl_profiles (self):
+ p = [
+ ["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
+ ["syn_attack.py","-m 1 -l 50",True],
+ ["udp_1pkt_1mac.py","-m 1 -l 50",True],
+ ["udp_1pkt_mac.py","-m 1 -l 50",True],
+ ["udp_1pkt.py","-m 1 -l 50",True],
+ ["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
+ ["udp_rand_len_9k.py","-m 1 -l 50",True],
+ ["udp_1pkt_mpls.py","-m 1 -l 50",True],
+ ["udp_1pkt_mpls_vm.py","-m 1 ",True],
+ ["imix.py","-m 1 -l 100",True],
+ ["udp_inc_len_9k.py","-m 1 -l 100",True],
+ ["udp_1pkt_range_clients.py","-m 1 -l 100",True],
+ ["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
+ ["pcap.py", "-m 1", True, False],
+ ["pcap_with_vm.py", "-m 1", True, False],
+ ["flow_stats.py", "-m 1 -l 1", True],
+ ["flow_stats_latency.py", "-m 1 -l 1", True],
+
+ # YAML test
+ ["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_1burst.yaml","-m 1 -l 100",True],
+ ["yaml/burst_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_2.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_tuple_gen.yaml","-m 1 -l 100",True],
+ ["yaml/imix_1pkt_vm.yaml","-m 1 -l 100",True],
+ ["udp_1pkt_pcap.py","-m 1 -l 10",True, False],
+ ["udp_3pkt_pcap.py","-m 1 -l 10",True, False],
+ #["udp_1pkt_simple.py","-m 1 -l 3",True],
+ ["udp_1pkt_pcap_relative_path.py","-m 1 -l 3",True, False],
+ ["udp_1pkt_tuple_gen_split.py","-m 1 -l 100",True],
+ ["udp_1pkt_range_clients_split.py","-m 1 -l 100",True],
+ ["udp_1pkt_vxlan.py","-m 1 -l 17",True, False], # can't generate: no VXLAN in Scapy, only in profile
+ ["udp_1pkt_ipv6_in_ipv4.py","-m 1 -l 17",True],
+ ["yaml/imix_3pkt.yaml","-m 50kpps --limit 20",True],
+ ["yaml/imix_3pkt_vm.yaml","-m 50kpps --limit 20",True],
+ ["udp_1pkt_simple_mac_dst.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_src.py","-m 1 -l 1 ",True],
+ ["udp_1pkt_simple_mac_dst_src.py","-m 1 -l 1 ",True],
+ ["burst_3st_loop_x_times.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_step.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask1.py","-m 1 -l 20 ",True] ,
+ ["udp_1pkt_mac_mask2.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_mac_mask3.py","-m 1 -l 20 ",True],
+ ["udp_1pkt_simple_test2.py","-m 1 -l 10 ",True, False], # test split of packet with ip option
+ ["udp_1pkt_simple_test.py","-m 1 -l 10 ",True, False],
+ ["udp_1pkt_mac_mask5.py","-m 1 -l 30 ",True],
+ ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True],
+ ["udp_1pkt_src_ip_split.py","-m 1 -l 50",True],
+ ["udp_1pkt_repeat_random.py","-m 1 -l 50",True],
+ ];
+
+ p1 = [ ["udp_1pkt_repeat_random.py","-m 1 -l 50",True] ];
+
+ for obj in p:
+ try:
+ test_generated = obj[3]
+ except: # check generated if not said otherwise
+ test_generated = True
+ self.run_py_profile_path (obj[0],obj[1],compare =obj[2], test_generated = test_generated, do_no_remove=True, do_no_remove_generated = False)
+
+
+ def test_hlt_profiles (self):
+ p = (
+ ['hlt/hlt_udp_inc_dec_len_9k.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_imix_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_imix_4rates.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david1.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david2.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david3.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_david4.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_wentong1.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_wentong2.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_tcp_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_udp_ports.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_udp_random_ports.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_ip_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_framesize_vm.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_l3_length_vm.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_vlan_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_4vlans.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_vlans_vm.py', '-m 1 -l 20', True, {'random_seed': 1}],
+ ['hlt/hlt_ipv6_default.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_ipv6_ranges.py', '-m 1 -l 20', True, None],
+ ['hlt/hlt_mac_ranges.py', '-m 1 -l 20', True, None],
+ )
+
+ for obj in p:
+ self.run_py_profile_path (obj[0], obj[1], compare =obj[2], do_no_remove=True, do_no_remove_generated = False, tunables = obj[3])
+
+ # valgrind tests - this runs in multi thread as it safe (no output)
+ def test_valgrind_various_profiles (self):
+ print("\n")
+ threads = []
+ for profile in self.valgrind_profiles:
+ print("\n*** VALGRIND: testing profile '{0}' ***\n".format(profile))
+ obj = {'t': None, 'rc': None}
+ t = Thread(target = self.run_sim,
+ kwargs = {'obj': obj, 'yaml': profile, 'output':None, 'options': "--cores 8 --limit 20 --valgrind", 'silent': True})
+ obj['t'] = t
+
+ threads.append(obj)
+ t.start()
+
+ for obj in threads:
+ obj['t'].join()
+
+ for obj in threads:
+ assert_equal(obj['rc'], True)
+
+
+
+ def test_multicore_scheduling (self):
+
+ seed = time.time()
+
+ # test with simple vars
+ print(format_text("\nTesting multiple flow vars for multicore\n", 'underline'))
+ rc = self.run_sim('stl/tests/multi_core_test.py', output = None, options = '--test_multi_core --limit=840 -t test_type=plain#seed={0} -m 27kpps'.format(seed), silent = True)
+ assert_equal(rc, True)
+
+
+ # test with tuple
+ print(format_text("\nTesting multiple tuple generators for multicore\n", 'underline'))
+ rc = self.run_sim('stl/tests/multi_core_test.py', output = None, options = '--test_multi_core --limit=840 -t test_type=tuple#seed={0} -m 27kpps'.format(seed), silent = True)
+ assert_equal(rc, True)
+
+ # some tests
+ mc_tests = [
+ 'stl/tests/single_cont.py',
+ 'stl/tests/single_burst.py',
+ 'stl/tests/multi_burst.py',
+ ]
+
+ for mc_test in mc_tests:
+ print(format_text("\ntesting {0} for multicore...\n".format(mc_test), 'underline'))
+ rc = self.run_sim(mc_test, output = None, options = '--test_multi_core --limit=840 -m 27kpps', silent = True)
+ assert_equal(rc, True)
+
+ return
+
+
diff --git a/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
new file mode 100755
index 00000000..5ff6b318
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/trex_cfg_creator_test.py
@@ -0,0 +1,698 @@
+#!/usr/bin/python
+
+import sys
+import copy
+from collections import OrderedDict
+from trex import CTRexScenario
+sys.path.append(CTRexScenario.scripts_path)
+from dpdk_setup_ports import ConfigCreator, DpdkSetup
+sys.path.remove(CTRexScenario.scripts_path)
+from nose.tools import assert_raises
+import yaml
+
+class CompareLinesDiff(Exception): pass
+class CompareLinesNumDiff(Exception): pass
+class CompareTypeErr(Exception): pass
+
+def compare_lines(golden, output):
+ if type(golden) is not str:
+ raise CompareTypeErr('Type of golden should be str, got: %s' % type(golden))
+ if type(output) is not str:
+ raise CompareTypeErr('Type of output should be str, got: %s' % type(output))
+ golden_lines = golden.strip().splitlines()
+ output_lines = output.strip().splitlines()
+ if len(golden_lines) != len(output_lines):
+ raise CompareLinesNumDiff('Number of lines on golden is: %s, in output: %s\nGolden:\n%s\nGenerated:\n%s\n' % (len(golden_lines), len(output_lines), golden, output))
+ for line_num, (golden_line, output_line) in enumerate(zip(golden_lines, output_lines)):
+ if golden_line != output_line:
+ raise CompareLinesDiff('Produced YAML differs from golden at line %s.Golden: %s <-> Output: %s' % (line_num + 1, golden_line, output_line))
+
+def create_config(cpu_topology, interfaces, *args, **kwargs):
+ config = ConfigCreator(cpu_topology, interfaces, *args, **kwargs)
+ return config.create_config()
+
+def verify_master_core0(output):
+ output_yaml = yaml.safe_load(output)
+ assert type(output_yaml) is list, 'Generated YAML should be list'
+ assert len(output_yaml) is 1, 'Generated YAML should be list with 1 element'
+ output_yaml = output_yaml[0]
+ assert 'platform' in output_yaml, 'Generated YAML has no platform section:\n%s' % output
+ assert 'master_thread_id' in output_yaml['platform'], 'Generated YAML does not specify master thread id:\n%s' % output
+ assert output_yaml['platform']['master_thread_id'] is 0, 'Master thread id should be 0 in generated YAML, got:%s' % output_yaml['platform']['master_thread_id']
+
+class TRexCfgCreator_Test:
+
+ def test_vm_cfg(self):
+ cpu_topology = {0: OrderedDict([i, [i]] for i in range(5))}
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 1968,
+ 'Device_str': 'VMXNET3 Ethernet Controller',
+ 'Driver_str': 'vmxnet3',
+ 'Interface': 'ens192',
+ 'Interface_argv': '0b:00.0',
+ 'Module_str': 'igb_uio,vfio-pci,uio_pci_generic',
+ 'NUMA': -1,
+ 'PhySlot': '192',
+ 'PhySlot_str': '192',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '07b0',
+ 'SDevice_str': 'VMXNET3 Ethernet Controller',
+ 'SVendor': '15ad',
+ 'SVendor_str': 'VMware',
+ 'Slot': '0000:0b:00.0',
+ 'Slot_str': '0b:00.0',
+ 'Vendor': 5549,
+ 'Vendor_str': 'VMware',
+ 'dest_mac': '00:0c:29:92:f1:ca',
+ 'src_mac': '00:0c:29:92:f1:d4',
+ 'loopback_dest': True},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 1968,
+ 'Device_str': 'VMXNET3 Ethernet Controller',
+ 'Driver_str': 'vmxnet3',
+ 'Interface': 'ens160',
+ 'Interface_argv': '03:00.0',
+ 'Module_str': 'igb_uio,vfio-pci,uio_pci_generic',
+ 'NUMA': -1,
+ 'PhySlot': '160',
+ 'PhySlot_str': '160',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '07b0',
+ 'SDevice_str': 'VMXNET3 Ethernet Controller',
+ 'SVendor': '15ad',
+ 'SVendor_str': 'VMware',
+ 'Slot': '0000:03:00.0',
+ 'Slot_str': '03:00.0',
+ 'Vendor': 5549,
+ 'Vendor_str': 'VMware',
+ 'dest_mac': '00:0c:29:92:f1:d4',
+ 'src_mac': '00:0c:29:92:f1:ca'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 2
+ version: 2
+ interfaces: ['0b:00.0', '03:00.0']
+ port_info:
+ - dest_mac: 00:0c:29:92:f1:ca # MAC OF LOOPBACK TO IT'S DUAL INTERFACE
+ src_mac: 00:0c:29:92:f1:d4
+ - dest_mac: 00:0c:29:92:f1:d4
+ src_mac: 00:0c:29:92:f1:ca
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 1
+ dual_if:
+ - socket: 0
+ threads: [2]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+ with assert_raises(CompareLinesNumDiff):
+ compare_lines('1' + golden, output)
+ output = create_config(cpu_topology, interfaces, exclude_lcores = [0])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [1,2,3,4])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [0,2,3,4])
+ verify_master_core0(output)
+ output = create_config(cpu_topology, interfaces, include_lcores = [0,2,3,4], exclude_lcores = [0])
+ with assert_raises(AssertionError):
+ verify_master_core0(output)
+
+ def test_trex08_cfg(self):
+ cpu_topology = OrderedDict([(0, OrderedDict([(0, [0, 16]), (1, [1, 17]), (2, [2, 18]), (3, [3, 19]), (4, [4, 20]), (5, [5, 21]), (6, [6, 22]), (7, [7, 23])])), (1, OrderedDict([(0, [8, 24]), (1, [9, 25]), (2, [10, 26]), (3, [11, 27]), (4, [12, 28]), (5, [13, 29]), (6, [14, 30]), (7, [15, 31])]))])
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.0',
+ 'Slot_str': '84:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.1',
+ 'Slot_str': '84:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.0',
+ 'Slot_str': '05:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '06:00:06:00:00:00',
+ 'src_mac': '05:00:05:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.1',
+ 'Slot_str': '05:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '05:00:05:00:00:00',
+ 'src_mac': '06:00:06:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 6
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1', '05:00.0', '05:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ - dest_mac: 06:00:06:00:00:00
+ src_mac: 05:00:05:00:00:00
+ - dest_mac: 05:00:05:00:00:00
+ src_mac: 06:00:06:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 16
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4]
+
+ - socket: 1
+ threads: [8,24,9,25,10,26,11]
+
+ - socket: 0
+ threads: [20,5,21,6,22,7,23]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.0',
+ 'Slot_str': '84:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:84:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 1,
+ 'PhySlot': '0-8',
+ 'PhySlot_str': '0-8',
+ 'ProgIf': '20',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:84:00.1',
+ 'Slot_str': '84:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 4
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '84:00.0', '84:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 31
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4,20,5,21,6,22,7,23,16]
+
+ - socket: 1
+ threads: [8,24,9,25,10,26,11,27,12,28,13,29,14,30,15]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.0',
+ 'Slot_str': '05:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '04:00:04:00:00:00',
+ 'src_mac': '03:00:03:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '05:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-3',
+ 'PhySlot_str': '0-3',
+ 'ProgIf': '01',
+ 'Rev': '02',
+ 'Rev_str': '02',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:05:00.1',
+ 'Slot_str': '05:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '03:00:03:00:00:00',
+ 'src_mac': '04:00:04:00:00:00'}]
+ golden = '''
+### Config file generated by dpdk_setup_ports.py ###
+
+- port_limit: 4
+ version: 2
+ interfaces: ['02:00.0', '02:00.1', '05:00.0', '05:00.1']
+ port_bandwidth_gb: 40
+ port_info:
+ - dest_mac: 02:00:02:00:00:00
+ src_mac: 01:00:01:00:00:00
+ - dest_mac: 01:00:01:00:00:00
+ src_mac: 02:00:02:00:00:00
+
+ - dest_mac: 04:00:04:00:00:00
+ src_mac: 03:00:03:00:00:00
+ - dest_mac: 03:00:03:00:00:00
+ src_mac: 04:00:04:00:00:00
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 16
+ dual_if:
+ - socket: 0
+ threads: [1,17,2,18,3,19,4]
+
+ - socket: 0
+ threads: [20,5,21,6,22,7,23]
+'''
+ output = create_config(cpu_topology, interfaces)
+ verify_master_core0(output)
+ compare_lines(golden, output)
+
+ def test_cfg_negative(self):
+ cpu_topology = OrderedDict([(0, OrderedDict([(0, [0, 16]), (1, [1, 17]), (2, [2, 18]), (3, [3, 19]), (4, [4, 20]), (5, [5, 21]), (6, [6, 22]), (7, [7, 23])])), (1, OrderedDict([(0, [8, 24]), (1, [9, 25]), (2, [10, 26]), (3, [11, 27]), (4, [12, 28]), (5, [13, 29]), (6, [14, 30]), (7, [15, 31])]))])
+ interfaces = [{'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.0',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0002',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.0',
+ 'Slot_str': '02:00.0',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '02:00:02:00:00:00',
+ 'src_mac': '01:00:01:00:00:00'},
+ {'Active': '',
+ 'Class': '0200',
+ 'Class_str': 'Ethernet controller',
+ 'Device': 5507,
+ 'Device_str': 'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'Driver_str': 'igb_uio',
+ 'Interface': '',
+ 'Interface_argv': '0000:02:00.1',
+ 'Module_str': 'vfio-pci,uio_pci_generic',
+ 'NUMA': 0,
+ 'PhySlot': '0-1',
+ 'PhySlot_str': '0-1',
+ 'ProgIf': '01',
+ 'Rev': '01',
+ 'Rev_str': '01',
+ 'SDevice': '0000',
+ 'SDevice_str': 'Ethernet Converged Network Adapter XL710-Q2',
+ 'SVendor': '8086',
+ 'SVendor_str': 'Intel Corporation',
+ 'Slot': '0000:02:00.1',
+ 'Slot_str': '02:00.1',
+ 'Vendor': 32902,
+ 'Vendor_str': 'Intel Corporation',
+ 'dest_mac': '01:00:01:00:00:00',
+ 'src_mac': '02:00:02:00:00:00'}]
+ # types errors
+ with assert_raises(AssertionError):
+ create_config(None, None)
+ with assert_raises(AssertionError):
+ create_config(cpu_topology, None)
+ with assert_raises(AssertionError):
+ create_config(None, interfaces)
+ with assert_raises(AssertionError):
+ create_config(cpu_topology, [])
+ with assert_raises(AssertionError):
+ create_config({}, interfaces)
+ with assert_raises(AssertionError):
+ create_config({}, [])
+ # not enough cores at NUMA 0
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[]}, 1:{0:[1,2,3,4,5,6,7]}}, interfaces)
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[1]}, 1:{0:[3]}}, interfaces)
+ with assert_raises(DpdkSetup):
+ create_config({0:{0:[1,2]}}, interfaces)
+ # no NUMA 0 info, NICs at NUMA 0
+ cpu_topo1 = copy.deepcopy(cpu_topology)
+ del cpu_topo1[0]
+ with assert_raises(KeyError):
+ create_config(cpu_topo1, interfaces)
+ int1 = copy.deepcopy(interfaces)
+ for interface in int1:
+ interface['NUMA'] = 1
+ # now should work, as interfaces use NUMA 1
+ create_config(cpu_topo1, int1)
+ int2 = copy.deepcopy(interfaces)
+ int2[1]['NUMA'] = 1
+ # interfaces on different NUMAs
+ with assert_raises(DpdkSetup):
+ create_config(cpu_topology, int2)
+
+
+ def test_inner_comparator(self):
+ compare_lines('', '')
+ compare_lines('one\ntwo', 'one\ntwo')
+ with assert_raises(CompareLinesNumDiff):
+ compare_lines('one\ntwo', 'one\ntwo\nthree')
+ with assert_raises(CompareLinesDiff):
+ compare_lines('one\ntwo', 'one\ntwo1')
+ with assert_raises(CompareLinesDiff):
+ compare_lines('one\ntwo', 'one\nthree')
+ with assert_raises(CompareTypeErr):
+ compare_lines(None, 'one\nthree')
+ with assert_raises(CompareTypeErr):
+ compare_lines('one\ntwo', None)
+ with assert_raises(CompareTypeErr):
+ compare_lines(None, None)
+
+ @classmethod
+ def tearDownClass(cls):
+ sys.path.remove(CTRexScenario.scripts_path)
+ del sys.modules['dpdk_setup_ports']
diff --git a/scripts/automation/regression/hltapi_playground.py b/scripts/automation/regression/hltapi_playground.py
new file mode 100755
index 00000000..b790fe25
--- /dev/null
+++ b/scripts/automation/regression/hltapi_playground.py
@@ -0,0 +1,193 @@
+#!/router/bin/python
+
+import outer_packages
+#from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, CStreamsPerPort
+from trex_stl_lib.trex_stl_hltapi import *
+import traceback
+import sys, time
+from pprint import pprint
+import argparse
+
+def error(err = None):
+ if not err:
+ raise Exception('Unknown exception, look traceback')
+ if type(err) is str and not err.startswith('[ERR]'):
+ err = '[ERR] ' + err
+ print err
+ sys.exit(1)
+
+def check_res(res):
+ if res['status'] == 0:
+ error('Encountered error:\n%s' % res['log'])
+ return res
+
+def print_brief_stats(res):
+ title_str = ' '*3
+ tx_str = 'TX:'
+ rx_str = 'RX:'
+ for port_id, stat in res.iteritems():
+ if type(port_id) is not int:
+ continue
+ title_str += ' '*10 + 'Port%s' % port_id
+ tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
+ rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
+ print(title_str)
+ print(tx_str)
+ print(rx_str)
+
+def wait_with_progress(seconds):
+ for i in range(0, seconds):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ print('')
+
+if __name__ == "__main__":
+ try:
+ parser = argparse.ArgumentParser(description='Example of using stateless TRex via HLT API.', formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('-v', dest = 'verbose', default = 0, help='Stateless API verbosity:\n0: No prints\n1: Commands and their status\n2: Same as 1 + ZMQ in&out')
+ parser.add_argument('--device', dest = 'device', default = 'localhost', help='Address of TRex server')
+ args = parser.parse_args()
+ hlt_client = CTRexHltApi(verbose = int(args.verbose))
+
+ print('Connecting to %s...' % args.device)
+ res = check_res(hlt_client.connect(device = args.device, port_list = [0, 1], username = 'danklei', break_locks = True, reset = True))
+ port_handle = res['port_handle']
+ print('Connected, got port handles %s' % port_handle)
+ ports_streams_dict = CStreamsPerPort()
+ print hlt_client.traffic_control(action = 'poll')
+
+ print hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
+ l3_protocol = 'ipv4',
+ #length_mode = 'imix', l3_length = 200,
+ ipv6_dst_mode = 'decrement', ipv6_dst_count = 300, ipv6_dst_addr = 'fe80:0:0:0:0:0:0:000f',
+ port_handle = port_handle, port_handle2 = port_handle[1],
+ #save_to_yaml = '/tmp/d1.yaml',
+ #stream_id = 1,
+ )
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_control(action = 'run')
+ print hlt_client.traffic_control(action = 'poll')
+ wait_with_progress(2)
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_control(action = 'stop')
+ print hlt_client.traffic_control(action = 'poll')
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ print hlt_client.traffic_control(action = 'clear_stats')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ wait_with_progress(1)
+ print hlt_client.traffic_stats(mode = 'aggregate')
+ #print res
+ #print hlt_client._streams_history
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[1])
+ #ports_streams_dict.add_streams_from_res(res)
+ sys.exit(0)
+ res = check_res(hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
+ port_handle = port_handle[0], port_handle2 = port_handle[1], save_to_yaml = '/tmp/d1.yaml',
+ l4_protocol = 'udp',
+ #udp_src_port_mode = 'decrement',
+ #udp_src_port_count = 10, udp_src_port = 5,
+ ))
+ ports_streams_dict.add_streams_from_res(res)
+ sys.exit(0)
+ #print ports_streams_dict
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
+ mac_src = '1-2-3:4:5:6', l4_protocol = 'udp', save_to_yaml = '/tmp/d2.yaml'))
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ #print hlt_client._streams_history
+ res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
+ mac_dst = '{ 7 7 7-7:7:7}', save_to_yaml = '/tmp/d3.yaml'))
+ #print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle))
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', bidirectional = True, length_mode = 'fixed',
+ port_handle = port_handle[0], port_handle2 = port_handle[1],
+ transmit_mode = 'single_burst', pkts_per_burst = 100, rate_pps = 100,
+ mac_src = '1-2-3-4-5-6',
+ mac_dst = '6:5:4:4:5:6',
+ save_to_yaml = '/tmp/imix.yaml'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ print('Create single_burst 100 packets rate_pps=100 on port 0')
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], transmit_mode = 'single_burst',
+ pkts_per_burst = 100, rate_pps = 100))
+ ports_streams_dict.add_streams_from_res(res)
+
+ # playground - creating various streams on port 1
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt2.yaml',
+ tcp_src_port_mode = 'decrement',
+ tcp_src_port_count = 10, tcp_dst_port_count = 10, tcp_dst_port_mode = 'random'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt3.yaml',
+ l4_protocol = 'udp',
+ udp_src_port_mode = 'decrement',
+ udp_src_port_count = 10, udp_dst_port_count = 10, udp_dst_port_mode = 'random'))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt4.yaml',
+ length_mode = 'increment',
+ #ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
+ ports_streams_dict.add_streams_from_res(res)
+
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt5.yaml',
+ length_mode = 'decrement', frame_size_min = 100, frame_size_max = 3000,
+ #ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ #ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2
+ ))
+ ports_streams_dict.add_streams_from_res(res)
+
+ # remove the playground
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[1]))
+
+ print('Create continuous stream for port 1, rate_pps = 1')
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt1.yaml',
+ #length_mode = 'increment', l3_length_min = 200,
+ ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
+ ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
+
+ check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle))
+ wait_with_progress(1)
+ print('Sample after 1 seconds (only packets count)')
+ res = check_res(hlt_client.traffic_stats(mode = 'all', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+
+ print('Port 0 has finished the burst, put continuous instead with rate 1000. No stopping of other ports.')
+ check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[0]))
+ check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[0]))
+ res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], rate_pps = 1000))
+ ports_streams_dict.add_streams_from_res(res)
+ check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle[0]))
+ wait_with_progress(5)
+ print('Sample after another 5 seconds (only packets count)')
+ res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+
+ print('Stop traffic at port 1')
+ res = check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[1]))
+ wait_with_progress(5)
+ print('Sample after another %s seconds (only packets count)' % 5)
+ res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
+ print_brief_stats(res)
+ print ''
+ print('Full HLT stats:')
+ pprint(res)
+
+ check_res(hlt_client.cleanup_session())
+ except Exception as e:
+ print(traceback.print_exc())
+ print(e)
+ raise
+ finally:
+ print('Done.')
diff --git a/scripts/automation/regression/interactive_platform b/scripts/automation/regression/interactive_platform
new file mode 100755
index 00000000..5c5e920e
--- /dev/null
+++ b/scripts/automation/regression/interactive_platform
@@ -0,0 +1,4 @@
+#!/bin/bash
+/router/bin/python-2.7.4 interactive_platform.py $@
+sts=$?
+exit $sts \ No newline at end of file
diff --git a/scripts/automation/regression/interactive_platform.py b/scripts/automation/regression/interactive_platform.py
new file mode 100755
index 00000000..10e89910
--- /dev/null
+++ b/scripts/automation/regression/interactive_platform.py
@@ -0,0 +1,338 @@
+#!/router/bin/python-2.7.4
+
+from CPlatform import *
+import cmd
+import outer_packages
+import termstyle
+import os
+from misc_methods import load_object_config_file
+from optparse import OptionParser
+from CShowParser import PlatformResponseMissmatch, PlatformResponseAmbiguity
+
+class InteractivePlatform(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to control a remote Cisco IOS platform.\nType help to view available pre-defined configurations\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, cfg_yaml_path = None, silent_mode = False, virtual_mode = False ):
+# super(InteractivePlatform, self).__init__()
+ cmd.Cmd.__init__(self)
+ self.virtual_mode = virtual_mode
+ self.platform = CPlatform(silent_mode)
+ if cfg_yaml_path is None:
+ try:
+ cfg_yaml_path = raw_input(termstyle.cyan("Please enter a readable .yaml configuration file path: "))
+ cfg_yaml_path = os.path.abspath(cfg_yaml_path)
+ except KeyboardInterrupt:
+ exit(-1)
+ try:
+ self.device_cfg = CDeviceCfg(cfg_yaml_path)
+ self.platform.load_platform_data_from_file(self.device_cfg)
+ if not virtual_mode:
+ # if not virtual mode, try to establish a phyisical connection to platform
+ self.platform.launch_connection(self.device_cfg)
+
+ except Exception as inst:
+ print(termstyle.magenta(inst))
+ exit(-1)
+
+ def do_show_cfg (self, line):
+ """Outputs the loaded interface configuration"""
+ self.platform.get_if_manager().dump_if_config()
+ print(termstyle.green("*** End of interface configuration ***"))
+
+ def do_show_nat_cfg (self, line):
+ """Outputs the loaded nat provided configuration"""
+ try:
+ self.platform.dump_obj_config('nat')
+ print(termstyle.green("*** End of nat configuration ***"))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+
+ def do_show_static_route_cfg (self, line):
+ """Outputs the loaded static route configuration"""
+ try:
+ self.platform.dump_obj_config('static_route')
+ print(termstyle.green("*** End of static route configuration ***"))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+ def do_switch_cfg (self, cfg_file_path):
+ """Switch the current platform interface configuration with another one"""
+ if cfg_file_path:
+ cfg_yaml_path = os.path.abspath(cfg_file_path)
+ self.device_cfg = CDeviceCfg(cfg_yaml_path)
+ self.platform.load_platform_data_from_file(self.device_cfg)
+ if not self.virtual_mode:
+ self.platform.reload_connection(self.device_cfg)
+ print(termstyle.green("Configuration switching completed successfully."))
+ else:
+ print(termstyle.magenta("Configuration file is missing. Please try again."))
+
+ def do_load_clean (self, arg):
+ """Loads a clean configuration file onto the platform
+ Specify no arguments will load 'clean_config.cfg' file from bootflash disk
+ First argument is clean config filename
+ Second argument is platform file's disk"""
+ if arg:
+ in_val = arg.split(' ')
+ if len(in_val)==2:
+ self.platform.load_clean_config(in_val[0], in_val[1])
+ else:
+ print(termstyle.magenta("One of the config inputs is missing."))
+ else:
+ self.platform.load_clean_config()
+# print termstyle.magenta("Configuration file definition is missing. use 'help load_clean' for further info.")
+
+ def do_basic_if_config(self, line):
+ """Apply basic interfaces configuartion to all platform interfaces"""
+ self.platform.configure_basic_interfaces()
+ print(termstyle.green("Basic interfaces configuration applied successfully."))
+
+ def do_pbr(self, line):
+ """Apply IPv4 PBR configuration on all interfaces"""
+ self.platform.config_pbr()
+ print(termstyle.green("IPv4 PBR configuration applied successfully."))
+
+ def do_no_pbr(self, line):
+ """Removes IPv4 PBR configuration from all interfaces"""
+ self.platform.config_no_pbr()
+ print(termstyle.green("IPv4 PBR configuration removed successfully."))
+
+ def do_nbar(self, line):
+ """Apply NBAR PD configuration on all interfaces"""
+ self.platform.config_nbar_pd()
+ print(termstyle.green("NBAR configuration applied successfully."))
+
+ def do_no_nbar(self, line):
+ """Removes NBAR PD configuration from all interfaces"""
+ self.platform.config_no_nbar_pd()
+ print(termstyle.green("NBAR configuration removed successfully."))
+
+ def do_static_route(self, arg):
+ """Apply IPv4 static routing configuration on all interfaces
+ Specify no arguments will apply static routing with following config:
+ 1. clients_start - 16.0.0.1
+ 2. servers_start - 48.0.0.1
+ 3. dual_port_mask - 1.0.0.0
+ 4. client_destination_mask - 255.0.0.0
+ 5. server_destination_mask - 255.0.0.0
+ """
+ if arg:
+ stat_route_dict = load_object_config_file(arg)
+# else:
+# print termstyle.magenta("Unknown configutaion option requested. use 'help static_route' for further info.")
+ else:
+ stat_route_dict = { 'clients_start' : '16.0.0.1',
+ 'servers_start' : '48.0.0.1',
+ 'dual_port_mask': '1.0.0.0',
+ 'client_destination_mask' : '255.0.0.0',
+ 'server_destination_mask' : '255.0.0.0' }
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.platform.config_static_routing(stat_route_obj)
+ print(termstyle.green("IPv4 static routing configuration applied successfully."))
+# print termstyle.magenta("Specific configutaion is missing. use 'help static_route' for further info.")
+
+ def do_no_static_route(self, line):
+ """Removes IPv4 static route configuration from all non-duplicated interfaces"""
+ try:
+ self.platform.config_no_static_routing()
+ print(termstyle.green("IPv4 static routing configuration removed successfully."))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+ def do_nat(self, arg):
+ """Apply NAT configuration on all non-duplicated interfaces
+ Specify no arguments will apply NAT with following config:
+ 1. clients_net_start - 16.0.0.0
+ 2. client_acl_wildcard_mask - 0.0.0.255
+ 3. dual_port_mask - 1.0.0.0
+ 4. pool_start - 200.0.0.0
+ 5. pool_netmask - 255.255.255.0
+ """
+ if arg:
+ nat_dict = load_object_config_file(arg)
+# else:
+# print termstyle.magenta("Unknown nat configutaion option requested. use 'help nat' for further info.")
+ else:
+# print termstyle.magenta("Specific nat configutaion is missing. use 'help nat' for further info.")
+ nat_dict = { 'clients_net_start' : '16.0.0.0',
+ 'client_acl_wildcard_mask' : '0.0.0.255',
+ 'dual_port_mask' : '1.0.0.0',
+ 'pool_start' : '200.0.0.0',
+ 'pool_netmask' : '255.255.255.0' }
+ nat_obj = CNatConfig(nat_dict)
+ self.platform.config_nat(nat_obj)
+ print(termstyle.green("NAT configuration applied successfully."))
+
+ def do_no_nat(self, arg):
+ """Removes NAT configuration from all non-duplicated interfaces"""
+ try:
+ self.platform.config_no_nat()
+ print(termstyle.green("NAT configuration removed successfully."))
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+
+
+ def do_ipv6_pbr(self, line):
+ """Apply IPv6 PBR configuration on all interfaces"""
+ self.platform.config_ipv6_pbr()
+ print(termstyle.green("IPv6 PBR configuration applied successfully."))
+
+ def do_no_ipv6_pbr(self, line):
+ """Removes IPv6 PBR configuration from all interfaces"""
+ self.platform.config_no_ipv6_pbr()
+ print(termstyle.green("IPv6 PBR configuration removed successfully."))
+
+ def do_zbf(self, line):
+ """Apply Zone-Based policy Firewall configuration on all interfaces"""
+ self.platform.config_zbf()
+ print(termstyle.green("Zone-Based policy Firewall configuration applied successfully."))
+
+ def do_no_zbf(self, line):
+ """Removes Zone-Based policy Firewall configuration from all interfaces"""
+ self.platform.config_no_zbf()
+ print(termstyle.green("Zone-Based policy Firewall configuration removed successfully."))
+
+ def do_show_cpu_util(self, line):
+ """Fetches CPU utilization stats from the platform"""
+ try:
+ print(self.platform.get_cpu_util())
+ print(termstyle.green("*** End of show_cpu_util output ***"))
+ except PlatformResponseMissmatch as inst:
+ print(termstyle.magenta(inst))
+
+ def do_show_drop_stats(self, line):
+ """Fetches packet drop stats from the platform.\nDrop are summed and presented for both input and output traffic of each interface"""
+ print(self.platform.get_drop_stats())
+ print(termstyle.green("*** End of show_drop_stats output ***"))
+
+ def do_show_nbar_stats(self, line):
+ """Fetches NBAR classification stats from the platform.\nStats are available both as raw data and as percentage data."""
+ try:
+ print(self.platform.get_nbar_stats())
+ print(termstyle.green("*** End of show_nbar_stats output ***"))
+ except PlatformResponseMissmatch as inst:
+ print(termstyle.magenta(inst))
+
+ def do_show_nat_stats(self, line):
+ """Fetches NAT translations stats from the platform"""
+ print(self.platform.get_nat_stats())
+ print(termstyle.green("*** End of show_nat_stats output ***"))
+
+ def do_show_cft_stats(self, line):
+ """Fetches CFT stats from the platform"""
+ print(self.platform.get_cft_stats())
+ print(termstyle.green("*** End of show_sft_stats output ***"))
+
+ def do_show_cvla_memory_usage(self, line):
+ """Fetches CVLA memory usage stats from the platform"""
+ (res, res2) = self.platform.get_cvla_memory_usage()
+ print(res)
+ print(res2)
+ print(termstyle.green("*** End of show_cvla_memory_usage output ***"))
+
+ def do_clear_counters(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_counters()
+ print(termstyle.green("*** clear counters completed ***"))
+
+ def do_clear_nbar_stats(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_nbar_stats()
+ print(termstyle.green("*** clear nbar stats completed ***"))
+
+ def do_clear_cft_counters(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_cft_counters()
+ print(termstyle.green("*** clear cft counters completed ***"))
+
+ def do_clear_drop_stats(self, line):
+ """Clears interfaces counters"""
+ self.platform.clear_packet_drop_stats()
+ print(termstyle.green("*** clear packet drop stats completed ***"))
+
+ def do_clear_nat_translations(self, line):
+ """Clears nat translations"""
+ self.platform.clear_nat_translations()
+ print(termstyle.green("*** clear nat translations completed ***"))
+
+ def do_set_tftp_server (self, line):
+ """Configures TFTP access on platform"""
+ self.platform.config_tftp_server(self.device_cfg)
+ print(termstyle.green("*** TFTP config deployment completed ***"))
+
+ def do_show_running_image (self, line):
+ """Fetches currently loaded image of the platform"""
+ res = self.platform.get_running_image_details()
+ print(res)
+ print(termstyle.green("*** Show running image completed ***"))
+
+ def do_check_image_existence(self, arg):
+ """Check if specific image file (usually *.bin) is already stored in platform drive"""
+ if arg:
+ try:
+ res = self.platform.check_image_existence(arg.split(' ')[0])
+ print(res)
+ print(termstyle.green("*** Check image existence completed ***"))
+ except PlatformResponseAmbiguity as inst:
+ print(termstyle.magenta(inst))
+ else:
+ print(termstyle.magenta("Please provide an image name in order to check for existance."))
+
+ def do_load_image (self, arg):
+ """Loads a given image filename from tftp server (if not available on disk) and sets it as the boot image on the platform"""
+ if arg:
+ try:
+ self.platform.load_platform_image('asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin')#arg.split(' ')[0])
+ except UserWarning as inst:
+ print(termstyle.magenta(inst))
+ else:
+ print(termstyle.magenta("Image filename is missing."))
+
+ def do_reload (self, line):
+ """Reloads the platform"""
+
+ ans = misc_methods.query_yes_no('This will reload the platform. Are you sure?', default = None)
+ if ans:
+ # user confirmed he wishes to reload the platform
+ self.platform.reload_platform(self.device_cfg)
+ print(termstyle.green("*** Platform reload completed ***"))
+ else:
+ print(termstyle.green("*** Platform reload aborted ***"))
+
+ def do_quit(self, arg):
+ """Quits the application"""
+ return True
+
+ def do_exit(self, arg):
+ """Quits the application"""
+ return True
+
+ def do_all(self, arg):
+ """Configures bundle of commands to set PBR routing"""
+ self.do_load_clean('')
+ self.do_set_tftp_server('')
+ self.do_basic_if_config('')
+ self.do_pbr('')
+ self.do_ipv6_pbr('')
+
+
+
+if __name__ == "__main__":
+ parser = OptionParser(version="%prog 1.0 \t (C) Cisco Systems Inc.\n")
+ parser.add_option("-c", "--config-file", dest="cfg_yaml_path",
+ action="store", help="Define the interface configuration to load the applicatino with.", metavar="FILE_PATH")
+ parser.add_option("-s", "--silent", dest="silent_mode", default = False,
+ action="store_true", help="Silence the generated input when commands launched.")
+ parser.add_option("-v", "--virtual", dest="virtual_mode", default = False,
+ action="store_true", help="Interact with a virtual router, no actual link will apply. Show commands are NOT available in this mode.")
+ (options, args) = parser.parse_args()
+
+ try:
+ InteractivePlatform(**vars(options)).cmdloop()
+
+ except KeyboardInterrupt:
+ exit(-1)
+
diff --git a/scripts/automation/regression/interfaces_e.py b/scripts/automation/regression/interfaces_e.py
new file mode 100755
index 00000000..0c2ce5d2
--- /dev/null
+++ b/scripts/automation/regression/interfaces_e.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+import outer_packages
+from enum import Enum
+
+
+# define the states in which a TRex can hold during its lifetime
+IFType = Enum('IFType', 'Client Server All')
diff --git a/scripts/automation/regression/misc_methods.py b/scripts/automation/regression/misc_methods.py
new file mode 100755
index 00000000..99071f81
--- /dev/null
+++ b/scripts/automation/regression/misc_methods.py
@@ -0,0 +1,284 @@
+#!/router/bin/python
+import sys
+if sys.version_info >= (3, 0):
+ import configparser
+else:
+ import ConfigParser
+
+import outer_packages
+import yaml
+from collections import namedtuple
+import subprocess, shlex
+import os
+
+TRexConfig = namedtuple('TRexConfig', 'trex, router, tftp')
+
+# debug/development purpose, lists object's attributes and their values
+def print_r(obj):
+ for attr in dir(obj):
+ print('obj.%s %s' % (attr, getattr(obj, attr)))
+
+def mix_string (str):
+ """Convert all string to lowercase letters, and replaces spaces with '_' char"""
+ return str.replace(' ', '_').lower()
+
+# executes given command, returns tuple (return_code, stdout, stderr)
+def run_command(cmd, background = False):
+ if background:
+ print('Running command in background: %s' % cmd)
+ with open(os.devnull, 'w') as tempf:
+ subprocess.Popen(shlex.split(cmd), stdin=tempf, stdout=tempf, stderr=tempf)
+ return (None,)*3
+ else:
+ print('Running command: %s' % cmd)
+ proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ stdout = stdout.decode()
+ stderr = stderr.decode()
+ if stdout:
+ print('Stdout:\n%s' % stdout)
+ if proc.returncode:
+ if stderr:
+ print('Stderr:\n%s' % stderr)
+ print('Return code: %s' % proc.returncode)
+ return (proc.returncode, stdout, stderr)
+
+
+def run_remote_command(host, command_string, background = False, timeout = 20):
+ cmd = 'ssh -tt %s \'sudo%s sh -ec "%s"\'' % (host, (' timeout %s' % timeout) if (timeout and not background) else '', command_string)
+ return run_command(cmd, background)
+
+
+def generate_intf_lists (interfacesList):
+ retDict = {
+ 'relevant_intf' : [],
+ 'relevant_ip_addr' : [],
+ 'relevant_mac_addr' : [],
+ 'total_pairs' : None
+ }
+
+ for intf in interfacesList:
+ retDict['relevant_intf'].append(intf['client'])
+ retDict['relevant_ip_addr'].append(intf['client_config']['ip_addr'])
+ retDict['relevant_mac_addr'].append(intf['client_config']['mac_addr'])
+ retDict['relevant_intf'].append(intf['server'])
+ retDict['relevant_ip_addr'].append(intf['server_config']['ip_addr'])
+ retDict['relevant_mac_addr'].append(intf['server_config']['mac_addr'])
+
+ retDict['total_pairs'] = len(interfacesList)
+
+ return retDict
+
+def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'):
+ """ get_single_net_client_addr(ip_addr, octetListDict, ip_type) -> str
+
+ Parameters
+ ----------
+ ip_addr : str
+ a string an IP address (by default, of type A.B.C.D)
+ octetListDict : dict
+ a ditionary representing the octets on which to act such that ip[octet_key] = ip[octet_key] + octet_value
+ ip_type : str
+ a string that defines the ip type to parse. possible inputs are 'ipv4', 'ipv6'
+
+ By default- Returns a new ip address - A.B.C.(D+1)
+ """
+ if ip_type == 'ipv4':
+ ip_lst = ip_addr.split('.')
+
+ for octet,increment in octetListDict.items():
+ int_octet = int(octet)
+ if ((int_octet < 0) or (int_octet > 3)):
+ raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
+ else:
+ if (int(ip_lst[int_octet]) + increment) < 255:
+ ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment)
+ else:
+ raise ValueError('the requested increment exceeds 255 client address limit')
+
+ return '.'.join(ip_lst)
+
+ else: # this is a ipv6 address, handle accordingly
+ ip_lst = ip_addr.split(':')
+
+ for octet,increment in octetListDict.items():
+ int_octet = int(octet)
+ if ((int_octet < 0) or (int_octet > 7)):
+ raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )
+ else:
+ if (int(ip_lst[int_octet]) + increment) < 65535:
+ ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X')
+ else:
+ raise ValueError('the requested increment exceeds 65535 client address limit')
+
+ return ':'.join(ip_lst)
+
+
+def load_complete_config_file (filepath):
+ """load_complete_config_file(filepath) -> list
+
+ Loads a configuration file (.yaml) for both trex config and router config
+ Returns a list with a dictionary to each of the configurations
+ """
+
+ # create response dictionaries
+ trex_config = {}
+ rtr_config = {}
+ tftp_config = {}
+
+ try:
+ with open(filepath, 'r') as f:
+ config = yaml.load(f)
+
+ # Handle TRex configuration
+ trex_config['trex_name'] = config["trex"]["hostname"]
+ trex_config['trex_password'] = config["trex"].get("password")
+ #trex_config['trex_is_dual'] = config["trex"]["is_dual"]
+ trex_config['trex_cores'] = int(config["trex"]["cores"])
+ #trex_config['trex_latency'] = int(config["trex"]["latency"])
+# trex_config['trex_version_path'] = config["trex"]["version_path"]
+ trex_config['modes'] = config['trex'].get('modes', [])
+
+ if 'loopback' not in trex_config['modes']:
+ trex_config['router_interface'] = config["router"]["ip_address"]
+
+ # Handle Router configuration
+ rtr_config['model'] = config["router"]["model"]
+ rtr_config['hostname'] = config["router"]["hostname"]
+ rtr_config['ip_address'] = config["router"]["ip_address"]
+ rtr_config['image'] = config["router"]["image"]
+ rtr_config['line_pswd'] = config["router"]["line_password"]
+ rtr_config['en_pswd'] = config["router"]["en_password"]
+ rtr_config['interfaces'] = config["router"]["interfaces"]
+ rtr_config['clean_config'] = config["router"]["clean_config"]
+ rtr_config['intf_masking'] = config["router"]["intf_masking"]
+ rtr_config['ipv6_mask'] = config["router"]["ipv6_mask"]
+ rtr_config['mgmt_interface'] = config["router"]["mgmt_interface"]
+
+ # Handle TFTP configuration
+ tftp_config['hostname'] = config["tftp"]["hostname"]
+ tftp_config['ip_address'] = config["tftp"]["ip_address"]
+ tftp_config['images_path'] = config["tftp"]["images_path"]
+
+ if rtr_config['clean_config'] is None:
+ raise ValueError('A clean router configuration wasn`t provided.')
+
+ except ValueError:
+ print("")
+ raise
+
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ raise inst
+
+ return TRexConfig(trex_config, rtr_config, tftp_config)
+
+def load_object_config_file (filepath):
+ try:
+ with open(filepath, 'r') as f:
+ config = yaml.load(f)
+ return config
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
+ exit(-1)
+
+
+def query_yes_no(question, default="yes"):
+ """Ask a yes/no question via raw_input() and return their answer.
+
+ "question" is a string that is presented to the user.
+ "default" is the presumed answer if the user just hits <Enter>.
+ It must be "yes" (the default), "no" or None (meaning
+ an answer is required of the user).
+
+ The "answer" return value is True for "yes" or False for "no".
+ """
+ valid = { "yes": True, "y": True, "ye": True,
+ "no": False, "n": False }
+ if default is None:
+ prompt = " [y/n] "
+ elif default == "yes":
+ prompt = " [Y/n] "
+ elif default == "no":
+ prompt = " [y/N] "
+ else:
+ raise ValueError("invalid default answer: '%s'" % default)
+
+ while True:
+ sys.stdout.write(question + prompt)
+ choice = input().lower()
+ if default is not None and choice == '':
+ return valid[default]
+ elif choice in valid:
+ return valid[choice]
+ else:
+ sys.stdout.write("Please respond with 'yes' or 'no' "
+ "(or 'y' or 'n').\n")
+
+
+def load_benchmark_config_file (filepath):
+ """load_benchmark_config_file(filepath) -> list
+
+ Loads a configuration file (.yaml) for both trex config and router config
+ Returns a list with a dictionary to each of the configurations
+ """
+
+ # create response dictionary
+ benchmark_config = {}
+
+ try:
+ with open(filepath, 'r') as f:
+ benchmark_config = yaml.load(f)
+
+ except Exception as inst:
+ print("\nBad configuration file provided: '{0}'\n".format(filepath))
+ print(inst)
+ exit(-1)
+
+ return benchmark_config
+
+
+def get_benchmark_param (benchmark_path, test_name, param, sub_param = None):
+
+ config = load_benchmark_config_file(benchmark_path)
+ if sub_param is None:
+ return config[test_name][param]
+ else:
+ return config[test_name][param][sub_param]
+
+def gen_increment_dict (dual_port_mask):
+ addr_lst = dual_port_mask.split('.')
+ result = {}
+ for idx, octet_increment in enumerate(addr_lst):
+ octet_int = int(octet_increment)
+ if octet_int>0:
+ result[str(idx)] = octet_int
+
+ return result
+
+
+def get_network_addr (ip_type = 'ipv4'):
+ ipv4_addr = [1, 1, 1, 0] # base ipv4 address to start generating from- 1.1.1.0
+ ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0] # base ipv6 address to start generating from- 2001:DB8:1111:2222:0:0
+ while True:
+ if ip_type == 'ipv4':
+ if (ipv4_addr[2] < 255):
+ yield [".".join( map(str, ipv4_addr) ), '255.255.255.0']
+ ipv4_addr[2] += 1
+ else: # reached defined maximum limit of address allocation
+ return
+ else: # handling ipv6 addressing
+ if (ipv6_addr[2] < 4369):
+ tmp_ipv6_addr = list(ipv6_addr)
+ tmp_ipv6_addr[2] = hex(tmp_ipv6_addr[2])[2:]
+ yield ":".join( map(str, tmp_ipv6_addr) )
+ ipv6_addr[2] += 1
+ else: # reached defined maximum limit of address allocation
+ return
+
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
new file mode 100755
index 00000000..61ddc5cd
--- /dev/null
+++ b/scripts/automation/regression/outer_packages.py
@@ -0,0 +1,71 @@
+#!/router/bin/python
+
+import sys, site
+import platform, os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) # alternate use with: os.getcwd()
+TREX_PATH = os.getenv('TREX_UNDER_TEST') # path to <trex-core>/scripts directory, env. variable TREX_UNDER_TEST should override it.
+if not TREX_PATH or not os.path.isfile('%s/trex_daemon_server' % TREX_PATH):
+ TREX_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir, os.pardir))
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(TREX_PATH, 'external_libs'))
+PATH_TO_CTRL_PLANE = os.path.abspath(os.path.join(TREX_PATH, 'automation', 'trex_control_plane'))
+PATH_STF_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stf'))
+PATH_STL_API = os.path.abspath(os.path.join(PATH_TO_CTRL_PLANE, 'stl'))
+
+
+NIGHTLY_MODULES = [ {'name': 'ansi2html'},
+ {'name': 'enum34-1.0.4'},
+ {'name': 'rednose-0.4.1'},
+ {'name': 'progressbar-2.2'},
+ {'name': 'termstyle'},
+ {'name': 'pyyaml-3.11', 'py-dep': True},
+ {'name': 'nose-1.3.4', 'py-dep': True}
+ ]
+
+
+def generate_module_path (module, is_python3, is_64bit, is_cel):
+ platform_path = [module['name']]
+
+ if module.get('py-dep'):
+ platform_path.append('python3' if is_python3 else 'python2')
+
+ if module.get('arch-dep'):
+ platform_path.append('cel59' if is_cel else 'fedora18')
+ platform_path.append('64bit' if is_64bit else '32bit')
+
+ return os.path.normcase(os.path.join(PATH_TO_PYTHON_LIB, *platform_path))
+
+
+def import_module_list(modules_list):
+
+ # platform data
+ is_64bit = platform.architecture()[0] == '64bit'
+ is_python3 = (sys.version_info >= (3, 0))
+ is_cel = os.path.exists('/etc/system-profile')
+
+ # regular modules
+ for p in modules_list:
+ full_path = generate_module_path(p, is_python3, is_64bit, is_cel)
+
+ if not os.path.exists(full_path):
+ print("Unable to find required module library: '{0}'".format(p['name']))
+ print("Please provide the correct path using PATH_TO_PYTHON_LIB variable")
+ print("current path used: '{0}'".format(full_path))
+ exit(0)
+
+ sys.path.insert(1, full_path)
+
+
+def import_nightly_modules ():
+ sys.path.append(TREX_PATH)
+ #sys.path.append(PATH_TO_CTRL_PLANE)
+ sys.path.append(PATH_STL_API)
+ sys.path.append(PATH_STF_API)
+ import_module_list(NIGHTLY_MODULES)
+
+
+import_nightly_modules()
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/platform_cmd_link.py b/scripts/automation/regression/platform_cmd_link.py
new file mode 100755
index 00000000..275da656
--- /dev/null
+++ b/scripts/automation/regression/platform_cmd_link.py
@@ -0,0 +1,488 @@
+#!/router/bin/python
+
+from interfaces_e import IFType
+import CustomLogger
+import misc_methods
+import telnetlib
+import socket
+import time
+from collections import OrderedDict
+
+class CCommandCache(object):
+ def __init__(self):
+ self.__gen_clean_data_structure()
+
+ def __gen_clean_data_structure (self):
+ self.cache = {"IF" : OrderedDict(),
+ "CONF" : [],
+ "EXEC" : []}
+
+ def __list_append (self, dest_list, cmd):
+ if isinstance(cmd, list):
+ dest_list.extend( cmd )
+ else:
+ dest_list.append( cmd )
+
+ def add (self, cmd_type, cmd, interface = None):
+
+ if interface is not None: # this is an interface ("IF") config command
+ if interface in self.cache['IF']:
+ # interface commands already exists
+ self.__list_append(self.cache['IF'][interface], cmd)
+ else:
+ # no chached commands for this interface
+ self.cache['IF'][interface] = []
+ self.__list_append(self.cache['IF'][interface], cmd)
+ else: # this is either a CONF or EXEC command
+ self.__list_append(self.cache[cmd_type.upper()], cmd)
+
+ def dump_config (self):
+ # dump IF config:
+ print("configure terminal")
+ for intf, intf_cmd_list in self.cache['IF'].items():
+ print("interface {if_name}".format( if_name = intf ))
+ print('\n'.join(intf_cmd_list))
+
+ if self.cache['IF']:
+ # add 'exit' note only if if config actually took place
+ print('exit') # exit to global config mode
+
+ # dump global config
+ if self.cache['CONF']:
+ print('\n'.join(self.cache['CONF']))
+
+ # exit back to en mode
+ print("exit")
+
+ # dump exec config
+ if self.cache['EXEC']:
+ print('\n'.join(self.cache['EXEC']))
+
+ def get_config_list (self):
+ conf_list = []
+
+ conf_list.append("configure terminal")
+ for intf, intf_cmd_list in self.cache['IF'].items():
+ conf_list.append( "interface {if_name}".format( if_name = intf ) )
+ conf_list.extend( intf_cmd_list )
+ if len(conf_list)>1:
+ # add 'exit' note only if if config actually took place
+ conf_list.append("exit")
+
+ conf_list.extend( self.cache['CONF'] )
+ conf_list.append("exit")
+ conf_list.extend( self.cache['EXEC'] )
+
+
+ return conf_list
+
+ def clear_cache (self):
+ # clear all pointers to cache data (erase the data structure)
+ self.cache.clear()
+ # Re-initialize the cache
+ self.__gen_clean_data_structure()
+
+ pass
+
+
+class CCommandLink(object):
+ def __init__(self, silent_mode = False, debug_mode = False):
+ self.history = []
+ self.virtual_mode = True
+ self.silent_mode = silent_mode
+ self.telnet_con = None
+ self.debug_mode = debug_mode
+
+
+ def __transmit (self, cmd_list, **kwargs):
+ self.history.extend(cmd_list)
+ if not self.silent_mode:
+ print('\n'.join(cmd_list)) # prompting the pushed platform commands
+ if not self.virtual_mode:
+ # transmit the command to platform.
+ return self.telnet_con.write_ios_cmd(cmd_list, debug_mode = self.debug_mode, **kwargs)
+
+ def run_command (self, cmd_list, **kwargs):
+ response = ''
+ for cmd in cmd_list:
+
+ # check which type of cmd we handle
+ if isinstance(cmd, CCommandCache):
+ tmp_response = self.__transmit( cmd.get_config_list(), **kwargs ) # join the commands with new-line delimiter
+ else:
+ tmp_response = self.__transmit([cmd], **kwargs)
+ if not self.virtual_mode:
+ response += tmp_response
+ return response
+
+ def run_single_command (self, cmd, **kwargs):
+ return self.run_command([cmd], **kwargs)
+
+ def get_history (self, as_string = False):
+ if as_string:
+ return '\n'.join(self.history)
+ else:
+ return self.history
+
+ def clear_history (self):
+ # clear all pointers to history data (erase the data structure)
+ del self.history[:]
+ # Re-initialize the histoyr with clear one
+ self.history = []
+
+ def launch_platform_connectivity (self, device_config_obj):
+ connection_info = device_config_obj.get_platform_connection_data()
+ self.telnet_con = CIosTelnet( **connection_info )
+ self.virtual_mode = False # if physical connectivity was successful, toggle virtual mode off
+
+ def close_platform_connection(self):
+ if self.telnet_con is not None:
+ self.telnet_con.close()
+
+
+
+class CDeviceCfg(object):
+ def __init__(self, cfg_yaml_path = None):
+ if cfg_yaml_path is not None:
+ (self.platform_cfg, self.tftp_cfg) = misc_methods.load_complete_config_file(cfg_yaml_path)[1:3]
+
+ self.interfaces_cfg = self.platform_cfg['interfaces'] # extract only the router interface configuration
+
+ def set_platform_config(self, config_dict):
+ self.platform_cfg = config_dict
+ self.interfaces_cfg = self.platform_cfg['interfaces']
+
+ def set_tftp_config(self, tftp_cfg):
+ self.tftp_cfg = tftp_cfg
+
+ def get_interfaces_cfg (self):
+ return self.interfaces_cfg
+
+ def get_ip_address (self):
+ return self.__get_attr('ip_address')
+
+ def get_line_password (self):
+ return self.__get_attr('line_pswd')
+
+ def get_en_password (self):
+ return self.__get_attr('en_pswd')
+
+ def get_mgmt_interface (self):
+ return self.__get_attr('mgmt_interface')
+
+ def get_platform_connection_data (self):
+ return { 'host' : self.get_ip_address(), 'line_pass' : self.get_line_password(), 'en_pass' : self.get_en_password() }
+
+ def get_tftp_info (self):
+ return self.tftp_cfg
+
+ def get_image_name (self):
+ return self.__get_attr('image')
+
+ def __get_attr (self, attr):
+ return self.platform_cfg[attr]
+
+ def dump_config (self):
+ import yaml
+ print(yaml.dump(self.interfaces_cfg, default_flow_style=False))
+
+class CIfObj(object):
+ _obj_id = 0
+
+ def __init__(self, if_name, ipv4_addr, ipv6_addr, src_mac_addr, dest_mac_addr, dest_ipv6_mac_addr, if_type):
+ self.__get_and_increment_id()
+ self.if_name = if_name
+ self.if_type = if_type
+ self.src_mac_addr = src_mac_addr
+ self.dest_mac_addr = dest_mac_addr
+ self.dest_ipv6_mac_addr = dest_ipv6_mac_addr
+ self.ipv4_addr = ipv4_addr
+ self.ipv6_addr = ipv6_addr
+ self.pair_parent = None # a pointer to CDualIfObj which holds this interface and its pair-complement
+
+ def __get_and_increment_id (self):
+ self._obj_id = CIfObj._obj_id
+ CIfObj._obj_id += 1
+
+ def get_name (self):
+ return self.if_name
+
+ def get_src_mac_addr (self):
+ return self.src_mac_addr
+
+ def get_dest_mac (self):
+ return self.dest_mac_addr
+
+ def get_ipv6_dest_mac (self):
+ if self.dest_mac_addr != 0:
+ return self.dest_mac_addr
+ else:
+ return self.dest_ipv6_mac_addr
+
+ def get_id (self):
+ return self._obj_id
+
+ def get_if_type (self):
+ return self.if_type
+
+ def get_ipv4_addr (self):
+ return self.ipv4_addr
+
+ def get_ipv6_addr (self):
+ return self.ipv6_addr
+
+ def set_ipv4_addr (self, addr):
+ self.ipv4_addr = addr
+
+ def set_ipv6_addr (self, addr):
+ self.ipv6_addr = addr
+
+ def set_pair_parent (self, dual_if_obj):
+ self.pair_parent = dual_if_obj
+
+ def get_pair_parent (self):
+ return self.pair_parent
+
+ def is_client (self):
+ return (self.if_type == IFType.Client)
+
+ def is_server (self):
+ return (self.if_type == IFType.Server)
+
+ pass
+
+
+class CDualIfObj(object):
+ _obj_id = 0
+
+ def __init__(self, vrf_name, client_if_obj, server_if_obj):
+ self.__get_and_increment_id()
+ self.vrf_name = vrf_name
+ self.client_if = client_if_obj
+ self.server_if = server_if_obj
+
+ # link if_objects to its parent dual_if
+ self.client_if.set_pair_parent(self)
+ self.server_if.set_pair_parent(self)
+ pass
+
+ def __get_and_increment_id (self):
+ self._obj_id = CDualIfObj._obj_id
+ CDualIfObj._obj_id += 1
+
+ def get_id (self):
+ return self._obj_id
+
+ def get_vrf_name (self):
+ return self.vrf_name
+
+ def is_duplicated (self):
+ return self.vrf_name != None
+
+class CIfManager(object):
+ _ipv4_gen = misc_methods.get_network_addr()
+ _ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
+
+ def __init__(self):
+ self.interfarces = OrderedDict()
+ self.dual_intf = []
+ self.full_device_cfg = None
+
+ def __add_if_to_manager (self, if_obj):
+ self.interfarces[if_obj.get_name()] = if_obj
+
+ def __add_dual_if_to_manager (self, dual_if_obj):
+ self.dual_intf.append(dual_if_obj)
+
+ def __get_ipv4_net_client_addr(self, ipv4_addr):
+ return misc_methods.get_single_net_client_addr (ipv4_addr)
+
+ def __get_ipv6_net_client_addr(self, ipv6_addr):
+ return misc_methods.get_single_net_client_addr (ipv6_addr, {'7' : 1}, ip_type = 'ipv6')
+
+ def load_config (self, device_config_obj):
+ self.full_device_cfg = device_config_obj
+ # first, erase all current config
+ self.interfarces.clear()
+ del self.dual_intf[:]
+
+ # than, load the configuration
+ intf_config = device_config_obj.get_interfaces_cfg()
+
+ # finally, parse the information into data-structures
+ for intf_pair in intf_config:
+ # generate network addresses for client side, and initialize client if object
+ tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0])
+ tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen))
+
+ if 'dest_mac_addr' in intf_pair['client']:
+ client_dest_mac = intf_pair['client']['dest_mac_addr']
+ else:
+ client_dest_mac = 0
+ if 'dest_ipv6_mac_addr' in intf_pair['client']:
+ client_dest_ipv6_mac = intf_pair['client']['dest_ipv6_mac_addr']
+ else:
+ client_dest_ipv6_mac = 0
+ client_obj = CIfObj(if_name = intf_pair['client']['name'],
+ ipv4_addr = tmp_ipv4_addr,
+ ipv6_addr = tmp_ipv6_addr,
+ src_mac_addr = intf_pair['client']['src_mac_addr'],
+ dest_mac_addr = client_dest_mac,
+ dest_ipv6_mac_addr = client_dest_ipv6_mac,
+ if_type = IFType.Client)
+
+ # generate network addresses for server side, and initialize server if object
+ tmp_ipv4_addr = self.__get_ipv4_net_client_addr (next(CIfManager._ipv4_gen)[0])
+ tmp_ipv6_addr = self.__get_ipv6_net_client_addr (next(CIfManager._ipv6_gen))
+
+ if 'dest_mac_addr' in intf_pair['server']:
+ server_dest_mac = intf_pair['server']['dest_mac_addr']
+ else:
+ server_dest_mac = 0
+ if 'dest_ipv6_mac_addr' in intf_pair['server']:
+ server_dest_ipv6_mac = intf_pair['server']['dest_ipv6_mac_addr']
+ else:
+ server_dest_ipv6_mac = 0
+ server_obj = CIfObj(if_name = intf_pair['server']['name'],
+ ipv4_addr = tmp_ipv4_addr,
+ ipv6_addr = tmp_ipv6_addr,
+ src_mac_addr = intf_pair['server']['src_mac_addr'],
+ dest_mac_addr = server_dest_mac,
+ dest_ipv6_mac_addr = server_dest_ipv6_mac,
+ if_type = IFType.Server)
+
+ dual_intf_obj = CDualIfObj(vrf_name = intf_pair['vrf_name'],
+ client_if_obj = client_obj,
+ server_if_obj = server_obj)
+
+ # update single interfaces pointers
+ client_obj.set_pair_parent(dual_intf_obj)
+ server_obj.set_pair_parent(dual_intf_obj)
+
+ # finally, update the data-structures with generated objects
+ self.__add_if_to_manager(client_obj)
+ self.__add_if_to_manager(server_obj)
+ self.__add_dual_if_to_manager(dual_intf_obj)
+
+
+ def get_if_list (self, if_type = IFType.All, is_duplicated = None):
+ result = []
+ for if_name,if_obj in self.interfarces.items():
+ if (if_type == IFType.All) or ( if_obj.get_if_type() == if_type) :
+ if (is_duplicated is None) or (if_obj.get_pair_parent().is_duplicated() == is_duplicated):
+ # append this if_obj only if matches both IFType and is_duplicated conditions
+ result.append(if_obj)
+ return result
+
+ def get_duplicated_if (self):
+ result = []
+ for dual_if_obj in self.dual_intf:
+ if dual_if_obj.get_vrf_name() is not None :
+ result.extend( (dual_if_obj.client_if, dual_if_obj.server_if) )
+ return result
+
+ def get_dual_if_list (self, is_duplicated = None):
+ result = []
+ for dual_if in self.dual_intf:
+ if (is_duplicated is None) or (dual_if.is_duplicated() == is_duplicated):
+ result.append(dual_if)
+ return result
+
+ def dump_if_config (self):
+ if self.full_device_cfg is None:
+ print("Device configuration isn't loaded.\nPlease load config and try again.")
+ else:
+ self.full_device_cfg.dump_config()
+
+
+class AuthError(Exception):
+ pass
+
+class CIosTelnet(telnetlib.Telnet):
+ AuthError = AuthError
+
+ # wrapper for compatibility with Python2/3, convert input to bytes
+ def str_to_bytes_wrapper(self, func, text, *args, **kwargs):
+ if type(text) in (list, tuple):
+ text = [elem.encode('ascii') if type(elem) is str else elem for elem in text]
+ res = func(self, text.encode('ascii') if type(text) is str else text, *args, **kwargs)
+ return res.decode() if type(res) is bytes else res
+
+ def read_until(self, *args, **kwargs):
+ return self.str_to_bytes_wrapper(telnetlib.Telnet.read_until, *args, **kwargs)
+
+ def write(self, *args, **kwargs):
+ return self.str_to_bytes_wrapper(telnetlib.Telnet.write, *args, **kwargs)
+
+ def expect(self, *args, **kwargs):
+ res = self.str_to_bytes_wrapper(telnetlib.Telnet.expect, *args, **kwargs)
+ return [elem.decode() if type(elem) is bytes else elem for elem in res]
+
+ def __init__ (self, host, line_pass, en_pass, port = 23, str_wait = "#"):
+ telnetlib.Telnet.__init__(self)
+ self.host = host
+ self.port = port
+ self.line_passwd = line_pass
+ self.enable_passwd = en_pass
+ self.pr = str_wait
+# self.set_debuglevel (1)
+ try:
+ self.open(self.host,self.port, timeout = 5)
+ self.read_until("word:",1)
+ self.write("{line_pass}\n".format(line_pass = self.line_passwd) )
+ res = self.read_until(">",1)
+ if 'Password' in res:
+ raise AuthError('Invalid line password was provided')
+ self.write("enable 15\n")
+ self.read_until("d:",1)
+ self.write("{en_pass}\n".format(en_pass = self.enable_passwd) )
+ res = self.read_until(self.pr,1)
+ if 'Password' in res:
+ raise AuthError('Invalid en password was provided')
+ self.write_ios_cmd(['terminal length 0'])
+
+ except socket.timeout:
+ raise socket.timeout('A timeout error has occured.\nCheck platform connectivity or the hostname defined in the config file')
+ except Exception as inst:
+ raise
+
+ def write_ios_cmd (self, cmd_list, result_from = 0, timeout = 60, **kwargs):
+ assert (isinstance (cmd_list, list) == True)
+ self.read_until(self.pr, timeout = 1)
+
+ res = ''
+ if 'read_until' in kwargs:
+ wf = kwargs['read_until']
+ else:
+ wf = self.pr
+
+ for idx, cmd in enumerate(cmd_list):
+ start_time = time.time()
+ self.write(cmd+'\r\n')
+ if kwargs.get('debug_mode'):
+ print('-->\n%s' % cmd)
+ if type(wf) is list:
+ output = self.expect(wf, timeout)[2]
+ else:
+ output = self.read_until(wf, timeout)
+ if idx >= result_from:
+ res += output
+ if kwargs.get('debug_mode'):
+ print('<-- (%ss)\n%s' % (round(time.time() - start_time, 2), output))
+ if time.time() - start_time > timeout - 1:
+ raise Exception('Timeout while performing telnet command: %s' % cmd)
+ if 'Invalid' in res:
+ print('Warning: telnet command probably failed.\nCommand: %s\nResponse: %s' % (cmd_list, res))
+# return res.split('\r\n')
+ return res # return the received response as a string, each line is seperated by '\r\n'.
+
+
+if __name__ == "__main__":
+# dev_cfg = CDeviceCfg('config/config.yaml')
+# print dev_cfg.get_platform_connection_data()
+# telnet = CIosTelnet( **(dev_cfg.get_platform_connection_data() ) )
+
+# if_mng = CIfManager()
+# if_mng.load_config(dev_cfg)
+# if_mng.dump_config()
+ pass
diff --git a/scripts/automation/regression/reports/.keep b/scripts/automation/regression/reports/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/reports/.keep
diff --git a/scripts/automation/regression/setups/dave/benchmark.yaml b/scripts/automation/regression/setups/dave/benchmark.yaml
new file mode 100755
index 00000000..aac2d805
--- /dev/null
+++ b/scripts/automation/regression/setups/dave/benchmark.yaml
@@ -0,0 +1,118 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+test_nbar_simple :
+ multiplier : 0.5
+ cores : 4
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+ exp_max_latency : 1000
+
+ nbar_classification:
+ http : 29.95
+ rtp_audio : 20.75
+ oracle_sqlnet : 11.09
+ rtp : 10.9
+ exchange : 8.16
+ citrix : 5.54
+ rtsp : 2.85
+ sctp : 3.83
+ ssl : 2.41
+ sip : 0.09
+ dns : 1.92
+ smtp : 0.56
+ pop3 : 0.36
+ unknown : 3.15
+
+test_rx_check :
+ multiplier : 25
+ cores : 4
+ rx_sample_rate : 128
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+
+test_nat_simple :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ nat_dict :
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+ multiplier : 400
+ cpu_to_core_ratio : 37270000
+ cores : 4
+ exp_bw : 1
+ exp_latency : 1
+ allow_timeout_dev : YES
+
+test_nat_learning :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 400
+ cores : 4
+ nat_opened : 100000
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+ allow_timeout_dev : YES
+
+test_routing_imix_64 :
+ multiplier : 2500
+ cores : 4
+ cpu_to_core_ratio : 8900
+ exp_latency : 1
+
+test_routing_imix :
+ multiplier : 70
+ cores : 2
+ cpu_to_core_ratio : 8900
+ exp_latency : 1
+
+test_static_routing_imix :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 70
+ cores : 2
+ cpu_to_core_ratio : 3766666
+ exp_latency : 1
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 36
+ cores : 1
+ cpu_to_core_ratio : 3766666
+ exp_latency : 1
+
+test_ipv6_simple :
+ multiplier : 36
+ cores : 4
+ cpu_to_core_ratio : 30070000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+
+
+
diff --git a/scripts/automation/regression/setups/dave/config.yaml b/scripts/automation/regression/setups/dave/config.yaml
new file mode 100755
index 00000000..8aa763bc
--- /dev/null
+++ b/scripts/automation/regression/setups/dave/config.yaml
@@ -0,0 +1,94 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : cpp-rtp-trex-01
+ cores : 4
+
+router:
+ model : ESP100
+ hostname : cpp-rtp-ts-15
+ ip_address : 172.18.4.34
+ port : 2054
+ image : trex_regression_v155_315.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : dummy
+ clean_config : dummy
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : TenGigabitEthernet0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet0/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/images/
+ images_path : /images/RP1/
diff --git a/scripts/automation/regression/setups/dummy/config.yaml b/scripts/automation/regression/setups/dummy/config.yaml
new file mode 100644
index 00000000..16e3b0cc
--- /dev/null
+++ b/scripts/automation/regression/setups/dummy/config.yaml
@@ -0,0 +1,11 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+# dummy setup, all Trex tests are expected to be skipped
+
+trex:
+ hostname : csi-trex-04
+ cores : 2
+ modes : [loopback, virtual, dummy_mode]
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
new file mode 100644
index 00000000..41688906
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -0,0 +1,298 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 55
+ cores : 1
+ bw_per_core : 647.305
+
+
+test_routing_imix:
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.131
+
+
+test_routing_imix_64:
+ multiplier : 2500
+ cores : 4
+ bw_per_core : 7.427
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 32
+ cores : 2
+ bw_per_core : 39.039
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 16
+ cores : 1
+ bw_per_core : 38.796
+
+
+test_ipv6_simple:
+ multiplier : 32
+ cores : 4
+ bw_per_core : 19.283
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 10000
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 45.304
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 100000
+
+
+test_nbar_simple:
+ multiplier : 20
+ cores : 2
+ bw_per_core : 18.243
+ nbar_classification:
+ http : 30.41
+ rtp_audio : 21.22
+ rtp : 11.4
+ oracle_sqlnet : 11.3
+ exchange : 10.95
+ citrix : 5.65
+ rtsp : 2.67
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ sctp : 0.09
+ sip : 0.09
+ ssl : 0.06
+ unknown : 3.2
+
+
+test_rx_check_http: &rx_http
+ multiplier : 40000
+ cores : 2
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 38.071
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 46.733
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 25
+ cores : 4
+ rx_sample_rate : 32
+ error_tolerance : 0.01
+ bw_per_core : 16.915
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 20.323
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 11.5
+ max: 13.1
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 22.0
+ max: 25.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 9.7
+ max: 12.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 19.0
+ max: 22.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 4
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.5
+
diff --git a/scripts/automation/regression/setups/kiwi02/config.yaml b/scripts/automation/regression/setups/kiwi02/config.yaml
new file mode 100644
index 00000000..d6c13a22
--- /dev/null
+++ b/scripts/automation/regression/setups/kiwi02/config.yaml
@@ -0,0 +1,95 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : 10.56.217.210 #10.56.192.189
+ cores : 4
+
+router:
+ model : ESP100
+ hostname : csi-mcp-asr1k-40
+ ip_address : 10.56.192.57
+ image : BLD_V155_2_S_XE315_THROTTLE_LATEST_20150424_100040-std.bin # is in harddisk of router
+ #image : asr1000rp2-adventerprisek9.2014-11-10_18.33_etis.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /tmp/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : TenGigabitEthernet0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet0/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet0/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : duplicate
+ - client :
+ name : TenGigabitEthernet1/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+ - client :
+ name : TenGigabitEthernet1/2/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : TenGigabitEthernet1/3/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name :
+
+
+tftp:
+ hostname : kiwi02_tftp_server
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : hhaim/
diff --git a/scripts/automation/regression/setups/trex-dan/benchmark.yaml b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
new file mode 100644
index 00000000..de56089b
--- /dev/null
+++ b/scripts/automation/regression/setups/trex-dan/benchmark.yaml
@@ -0,0 +1,253 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 67.030
+
+
+test_routing_imix:
+ multiplier : 1
+ cores : 1
+ bw_per_core : 3.979
+
+
+test_routing_imix_64:
+ multiplier : 150
+ cores : 4
+ bw_per_core : .681
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 0.7
+ cores : 1
+ bw_per_core : 3.837
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 3.939
+
+
+test_ipv6_simple:
+ multiplier : 1.5
+ cores : 2
+ bw_per_core : 4.719
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 550
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 7.465
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ bw_per_core : 7.377
+ nat_opened : 40000
+
+
+test_nbar_simple:
+ multiplier : 1.5
+ cores : 2
+ bw_per_core : 4.465
+ nbar_classification:
+ http : 30.3
+ rtp_audio : 21.06
+ oracle_sqlnet : 11.25
+ rtp : 11.1
+ exchange : 10.16
+ citrix : 5.6
+ rtsp : 2.84
+ sctp : 0.65
+ ssl : 0.8
+ sip : 0.09
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ unknown : 3.19
+
+
+test_rx_check_http: &rx_http
+ multiplier : 2200
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 8.142
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 8.591
+
+test_rx_check_http_negative:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ bw_per_core : 8.037
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 1.7
+ cores : 2
+ rx_sample_rate : 16
+ bw_per_core : 4.473
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 4.773
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex-dan/config.yaml b/scripts/automation/regression/setups/trex-dan/config.yaml
new file mode 100644
index 00000000..fbed3cb7
--- /dev/null
+++ b/scripts/automation/regression/setups/trex-dan/config.yaml
@@ -0,0 +1,68 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : trex-dan
+ cores : 2
+ modes : [VM]
+
+router:
+ model : 1RU
+ hostname : ASR1001_T-Rex
+ ip_address : 10.56.199.247
+ image : asr1001-universalk9.BLD_V155_1_S_XE314_THROTTLE_LATEST_20141112_090734-std.bin
+ #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150121_110036-std.bin
+ #image : asr1001-universalk9.BLD_V155_2_S_XE315_THROTTLE_LATEST_20150324_100047-std.bin
+ line_password : lab
+ en_password : lab
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f20.e6ce
+ server :
+ name : GigabitEthernet0/0/2
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f20.e6cf
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/
+ images_path : /images/1RU/
diff --git a/scripts/automation/regression/setups/trex04/benchmark.yaml b/scripts/automation/regression/setups/trex04/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex04/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex04/config.yaml b/scripts/automation/regression/setups/trex04/config.yaml
new file mode 100644
index 00000000..bf1c68e6
--- /dev/null
+++ b/scripts/automation/regression/setups/trex04/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-04
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
new file mode 100644
index 00000000..0dc340b0
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -0,0 +1,244 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
+ smtp : 0.57
+ pop3 : 0.36
+ ssl : 0.17
+ sctp : 0.13
+ sip : 0.09
+ unknown : 3.41
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex07/config.yaml b/scripts/automation/regression/setups/trex07/config.yaml
new file mode 100644
index 00000000..db6e9bf8
--- /dev/null
+++ b/scripts/automation/regression/setups/trex07/config.yaml
@@ -0,0 +1,66 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-07
+ cores : 4
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.120
+ image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0002
+ dest_mac_addr : 0000.0001.0001
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0002.0002
+ dest_mac_addr : 0000.0002.0001
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml
new file mode 100644
index 00000000..8f83e8f9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex08/benchmark.yaml
@@ -0,0 +1,181 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 150
+ cores : 2
+ bw_per_core : 962.464
+
+
+test_routing_imix:
+ multiplier : 80
+ cores : 4
+ bw_per_core : 55.130
+
+
+test_routing_imix_64:
+ multiplier : 8000
+ cores : 7
+ bw_per_core : 11.699
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 70
+ cores : 3
+ bw_per_core : 50.561
+
+
+test_ipv6_simple:
+ multiplier : 80
+ cores : 7
+ bw_per_core : 25.948
+
+
+test_rx_check_http: &rx_http
+ multiplier : 99000
+ cores : 3
+ rx_sample_rate : 128
+ bw_per_core : 49.464
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 80
+ cores : 7
+ rx_sample_rate : 128
+ bw_per_core : 20.871
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex08/config.yaml b/scripts/automation/regression/setups/trex08/config.yaml
new file mode 100644
index 00000000..affe9bc9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex08/config.yaml
@@ -0,0 +1,40 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-08
+ cores : 7
+ modes : ['loopback']
+
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
new file mode 100644
index 00000000..d1f5f56c
--- /dev/null
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -0,0 +1,234 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 110
+ cores : 1
+ bw_per_core : 767.198
+
+
+test_routing_imix:
+ multiplier : 64
+ cores : 2
+ bw_per_core : 35.889
+
+
+test_routing_imix_64:
+ multiplier : 5000
+ cores : 2
+ bw_per_core : 10.672
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 32
+ cores : 1
+ bw_per_core : 52.738
+
+
+test_ipv6_simple:
+ multiplier : 64
+ cores : 3
+ bw_per_core : 22.808
+
+
+test_rx_check_http: &rx_http
+ multiplier : 90000
+ cores : 2
+ rx_sample_rate : 32
+ bw_per_core : 46.075
+
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 50
+ cores : 3
+ rx_sample_rate : 32
+ bw_per_core : 20.469
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
+# performance tests
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 16.2
+ max: 17.3
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 29.5
+ max: 31.2
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 12.9
+ max: 14.5
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 15.2
+ max: 16.3
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 28.8
+ max: 29.5
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 2
+ mult : "90%"
+ mpps_per_core_golden :
+ min: 13.0
+ max: 13.8
+
diff --git a/scripts/automation/regression/setups/trex09/config.yaml b/scripts/automation/regression/setups/trex09/config.yaml
new file mode 100644
index 00000000..724de6e7
--- /dev/null
+++ b/scripts/automation/regression/setups/trex09/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-09
+ cores : 2
+ modes : ['loopback']
diff --git a/scripts/automation/regression/setups/trex10/benchmark.yaml b/scripts/automation/regression/setups/trex10/benchmark.yaml
new file mode 100644
index 00000000..fb900cbb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex10/benchmark.yaml
@@ -0,0 +1,60 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+
+test_rx_check :
+ multiplier : 0.8
+ cores : 1
+ rx_sample_rate : 128
+ exp_gbps : 0.5
+ cpu_to_core_ratio : 37270000
+ exp_bw : 1
+ exp_latency : 1
+
+
+test_routing_imix_64 :
+ multiplier : 37
+ cores : 1
+ cpu_to_core_ratio : 280
+ exp_latency : 1
+
+test_routing_imix :
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix :
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict :
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+ multiplier : 0.8
+ cores : 1
+ cpu_to_core_ratio : 1800
+ exp_latency : 1
+
+test_ipv6_simple :
+ multiplier : 0.5
+ cores : 1
+ cpu_to_core_ratio : 30070000
+ cpu2core_custom_dev: YES
+ cpu2core_dev : 0.07
+
+
+
diff --git a/scripts/automation/regression/setups/trex10/config.yaml b/scripts/automation/regression/setups/trex10/config.yaml
new file mode 100644
index 00000000..8b031c88
--- /dev/null
+++ b/scripts/automation/regression/setups/trex10/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-10
+ cores : 2
+ modes : [loopback, virtual]
diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex11/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex11/config.yaml b/scripts/automation/regression/setups/trex11/config.yaml
new file mode 100644
index 00000000..782b7542
--- /dev/null
+++ b/scripts/automation/regression/setups/trex11/config.yaml
@@ -0,0 +1,38 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * virtual - virtual OS (accept low CPU utilization in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-11
+ cores : 1
+ modes : ['loopback', 'VM', 'virt_nics']
diff --git a/scripts/automation/regression/setups/trex12/benchmark.yaml b/scripts/automation/regression/setups/trex12/benchmark.yaml
new file mode 100644
index 00000000..87bd3114
--- /dev/null
+++ b/scripts/automation/regression/setups/trex12/benchmark.yaml
@@ -0,0 +1,182 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 14
+ cores : 1
+ bw_per_core : 689.664
+
+
+test_routing_imix:
+ multiplier : 8
+ cores : 1
+ bw_per_core : 45.422
+
+
+test_routing_imix_64:
+ multiplier : 2200
+ cores : 1
+ bw_per_core : 11.655
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 4
+ cores : 1
+ bw_per_core : 45.294
+
+
+test_ipv6_simple:
+ multiplier : 8
+ cores : 1
+ bw_per_core : 29.332
+
+
+test_rx_check_http: &rx_http
+ multiplier : 11000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 47.813
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 55.607
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 8
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 24.203
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 28.867
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex12/config.yaml b/scripts/automation/regression/setups/trex12/config.yaml
new file mode 100644
index 00000000..f8c37c6b
--- /dev/null
+++ b/scripts/automation/regression/setups/trex12/config.yaml
@@ -0,0 +1,40 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-12
+ cores : 1
+ modes : ['loopback', '1G', 'VM']
+
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
new file mode 100644
index 00000000..04f13e79
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -0,0 +1,245 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ http : 32.58
+ rtp-audio : 21.21
+ oracle_sqlnet : 11.41
+ exchange : 11.22
+ rtp : 11.2
+ citrix : 5.65
+ rtsp : 2.87
+ dns : 1.96
+ smtp : 0.57
+ pop3 : 0.37
+ ssl : 0.28
+ sctp : 0.13
+ sip : 0.09
+ unknown : 0.45
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex14/config.yaml b/scripts/automation/regression/setups/trex14/config.yaml
new file mode 100644
index 00000000..0fd6b70e
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/config.yaml
@@ -0,0 +1,67 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-14
+ cores : 4
+ modes : []
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.103
+ image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex15/benchmark.yaml b/scripts/automation/regression/setups/trex15/benchmark.yaml
new file mode 100644
index 00000000..b366b3fb
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 106.652
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 11.577
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 2.030
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.8
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 4, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex15/config.yaml b/scripts/automation/regression/setups/trex15/config.yaml
new file mode 100644
index 00000000..c5fc3b22
--- /dev/null
+++ b/scripts/automation/regression/setups/trex15/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-15
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex17/benchmark.yaml b/scripts/automation/regression/setups/trex17/benchmark.yaml
new file mode 100644
index 00000000..8bc9d29c
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/benchmark.yaml
@@ -0,0 +1,155 @@
+################################################################
+#### TRex benchmark configuration file ####
+################################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 66.489
+
+
+test_routing_imix:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 5.530
+
+
+test_routing_imix_64:
+ multiplier : 28
+ cores : 1
+ bw_per_core : 0.859
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 9.635
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex17/config.yaml b/scripts/automation/regression/setups/trex17/config.yaml
new file mode 100644
index 00000000..7ad6a20a
--- /dev/null
+++ b/scripts/automation/regression/setups/trex17/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the Trex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc. have their limitations in tests)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-17
+ cores : 1
+ modes : [loopback, virt_nics, VM]
diff --git a/scripts/automation/regression/setups/trex24/benchmark.yaml b/scripts/automation/regression/setups/trex24/benchmark.yaml
new file mode 100644
index 00000000..ddedd844
--- /dev/null
+++ b/scripts/automation/regression/setups/trex24/benchmark.yaml
@@ -0,0 +1,155 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 67.030
+
+
+test_routing_imix:
+ multiplier : 1
+ cores : 1
+ bw_per_core : 3.979
+
+
+test_routing_imix_64:
+ multiplier : 50
+ cores : 1
+ bw_per_core : .681
+
+
+test_static_routing_imix_asymmetric:
+ multiplier : 0.5
+ cores : 1
+ bw_per_core : 13.742
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 5, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex24/config.yaml b/scripts/automation/regression/setups/trex24/config.yaml
new file mode 100644
index 00000000..f4eecdf9
--- /dev/null
+++ b/scripts/automation/regression/setups/trex24/config.yaml
@@ -0,0 +1,39 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-24
+ cores : 1
+ modes : [VM, virt_nics, loopback]
+
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
new file mode 100644
index 00000000..ccbdf6f5
--- /dev/null
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -0,0 +1,254 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 443.970
+
+
+test_routing_imix:
+ multiplier : 4
+ cores : 1
+ bw_per_core : 26.509
+
+
+test_routing_imix_64:
+ multiplier : 600
+ cores : 1
+ bw_per_core : 6.391
+
+
+test_static_routing_imix:
+ stat_route_dict : *stat_route_dict
+ multiplier : 2.8
+ cores : 1
+ bw_per_core : 24.510
+
+
+
+test_static_routing_imix_asymmetric:
+ stat_route_dict : *stat_route_dict
+ multiplier : 3.2
+ cores : 1
+ bw_per_core : 28.229
+
+
+test_ipv6_simple:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 19.185
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 2200
+ cores : 1
+ allow_timeout_dev : True
+ bw_per_core : 32.171
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning:
+ << : *test_nat_simple
+ nat_opened : 40000
+
+
+test_nbar_simple:
+ multiplier : 6
+ cores : 1
+ bw_per_core : 16.645
+ nbar_classification:
+ http : 24.55
+ rtp : 19.15
+ sqlnet : 10.38
+ secure-http : 5.11
+ citrix : 4.68
+ mapi : 4.04
+ dns : 1.56
+ sctp : 0.66
+ smtp : 0.48
+ pop3 : 0.30
+ novadigm : 0.09
+ sip : 0.08
+ h323 : 0.05
+ rtsp : 0.04
+ unknown : 28.52
+
+
+test_rx_check_http: &rx_http
+ multiplier : 8800
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 31.389
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 37.114
+
+test_rx_check_http_negative:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 6.8
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 16.063
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.663
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+# problem stabilizing CPU utilization at this setup
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 10}
+# cpu_util : 1
+# bw_per_core : 1
+
+# problem stabilizing CPU utilization at this setup
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 100}
+# cpu_util : 1
+# bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex25/config.yaml b/scripts/automation/regression/setups/trex25/config.yaml
new file mode 100644
index 00000000..c8190636
--- /dev/null
+++ b/scripts/automation/regression/setups/trex25/config.yaml
@@ -0,0 +1,93 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback, virtual etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-25
+ cores : 2
+ modes : ['1G']
+
+router:
+ model : ASR1004(RP2)
+ hostname : csi-mcp-asr1k-4ru-12
+ ip_address : 10.56.217.181
+ image : asr1000rp2-adventerprisek9.BLD_V151_1_S_XE32_THROTTLE_LATEST_20100926_034325_2.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0/0/0
+ clean_config : clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : GigabitEthernet0/1/0
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3c
+ server :
+ name : GigabitEthernet0/1/1
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3d
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/2
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3e
+ server :
+ name : GigabitEthernet0/1/4
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6a3f
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/5
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b78
+ server :
+ name : GigabitEthernet0/1/3
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b79
+ vrf_name :
+ - client :
+ name : GigabitEthernet0/1/6
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b7a
+ server :
+ name : GigabitEthernet0/1/7
+ src_mac_addr : 0000.0001.0000
+ dest_ipv6_mac_addr : a036.9f4d.6b7b
+ vrf_name :
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.128.23
+ root_dir : /auto/avc-devtest/
+ images_path : /images/1RU/
diff --git a/scripts/automation/regression/sshpass.exp b/scripts/automation/regression/sshpass.exp
new file mode 100755
index 00000000..2262290f
--- /dev/null
+++ b/scripts/automation/regression/sshpass.exp
@@ -0,0 +1,17 @@
+#!/usr/cisco/bin/expect -f
+# sample command: ./ssh.exp password 192.168.1.11 id *
+set pass [lrange $argv 0 0]
+set server [lrange $argv 1 1]
+set name [lrange $argv 2 2]
+set cmd [lrange $argv 3 10]
+
+set cmd_str [join $cmd]
+
+spawn ssh -t $name@$server $cmd_str
+match_max 100000
+expect "*?assword:*"
+send -- "$pass\r"
+send -- "\r"
+expect eof
+wait
+#interact
diff --git a/scripts/automation/regression/stateful_tests/__init__.py b/scripts/automation/regression/stateful_tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/__init__.py
diff --git a/scripts/automation/regression/stateful_tests/tests_exceptions.py b/scripts/automation/regression/stateful_tests/tests_exceptions.py
new file mode 100755
index 00000000..360f44a5
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/tests_exceptions.py
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+class TRexInUseError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexRunFailedError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexIncompleteRunError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class TRexLowCpuUtilError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class AbnormalResultError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class ClassificationMissmatchError(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
diff --git a/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..892be966
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_client_pkg_test.py
@@ -0,0 +1,34 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+
+@attr('client_package')
+class CTRexClientPKG_Test(CTRexGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+ self.unzip_client_package()
+
+ def run_client_package_stf_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stf/examples',
+ '$PYTHON stf_example.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, _, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, stderr))
+
+ def test_client_python2(self):
+ self.run_client_package_stf_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stf_example(python_version = 'python3')
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
new file mode 100755
index 00000000..e968d380
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -0,0 +1,363 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+"""
+Name:
+ trex_general_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the TRex traffic generator
+ The tested scenario is a TRex TG directly connected to a Cisco router.
+
+::
+
+ Topology:
+
+ ------- --------
+ | | Tx---1gig/10gig----Rx | |
+ | TRex | | router |
+ | | Rx---1gig/10gig----Tx | |
+ ------- --------
+
+"""
+from nose.plugins import Plugin
+from nose.plugins.skip import SkipTest
+import trex
+from trex import CTRexScenario
+import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+from CPlatform import *
+import termstyle
+import threading
+from .tests_exceptions import *
+from platform_cmd_link import *
+import unittest
+from glob import glob
+
+def setUpModule(module):
+ pass
+
+def tearDownModule(module):
+ pass
+
+class CTRexGeneral_Test(unittest.TestCase):
+ """This class defines the general stateful testcase of the TRex traffic generator"""
+ def __init__ (self, *args, **kwargs):
+ sys.stdout.flush()
+ unittest.TestCase.__init__(self, *args, **kwargs)
+ if CTRexScenario.is_test_list:
+ return
+ # Point test object to scenario global object
+ self.configuration = CTRexScenario.configuration
+ self.benchmark = CTRexScenario.benchmark
+ self.trex = CTRexScenario.trex
+ self.stl_trex = CTRexScenario.stl_trex
+ self.trex_crashed = CTRexScenario.trex_crashed
+ self.modes = CTRexScenario.modes
+ self.GAManager = CTRexScenario.GAManager
+ self.no_daemon = CTRexScenario.no_daemon
+ self.skipping = False
+ self.fail_reasons = []
+ if not hasattr(self, 'unsupported_modes'):
+ self.unsupported_modes = []
+ self.is_loopback = True if 'loopback' in self.modes else False
+ self.is_virt_nics = True if 'virt_nics' in self.modes else False
+ self.is_VM = True if 'VM' in self.modes else False
+
+ if not CTRexScenario.is_init:
+ if self.trex and not self.no_daemon: # stateful
+ CTRexScenario.trex_version = self.trex.get_trex_version()
+ if not self.is_loopback:
+ # initilize the scenario based on received configuration, once per entire testing session
+ CTRexScenario.router = CPlatform(CTRexScenario.router_cfg['silent_mode'])
+ device_cfg = CDeviceCfg()
+ device_cfg.set_platform_config(CTRexScenario.router_cfg['config_dict'])
+ device_cfg.set_tftp_config(CTRexScenario.router_cfg['tftp_config_dict'])
+ CTRexScenario.router.load_platform_data_from_file(device_cfg)
+ CTRexScenario.router.launch_connection(device_cfg)
+ if CTRexScenario.router_cfg['forceImageReload']:
+ running_image = CTRexScenario.router.get_running_image_details()['image']
+ print('Current router image: %s' % running_image)
+ needed_image = device_cfg.get_image_name()
+ if not CTRexScenario.router.is_image_matches(needed_image):
+ print('Setting router image: %s' % needed_image)
+ CTRexScenario.router.config_tftp_server(device_cfg)
+ CTRexScenario.router.load_platform_image(needed_image)
+ CTRexScenario.router.set_boot_image(needed_image)
+ CTRexScenario.router.reload_platform(device_cfg)
+ CTRexScenario.router.launch_connection(device_cfg)
+ running_image = CTRexScenario.router.get_running_image_details()['image'] # verify image
+ if not CTRexScenario.router.is_image_matches(needed_image):
+ self.fail('Unable to set router image: %s, current image is: %s' % (needed_image, running_image))
+ else:
+ print('Matches needed image: %s' % needed_image)
+ CTRexScenario.router_image = running_image
+
+ if self.modes:
+ print(termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes)))
+
+ CTRexScenario.is_init = True
+ print(termstyle.green("Done instantiating TRex scenario!\n"))
+
+# raise RuntimeError('CTRexScenario class is not initialized!')
+ self.router = CTRexScenario.router
+
+
+
+# def assert_dict_eq (self, dict, key, val, error=''):
+# v1 = int(dict[key]))
+# self.assertEqual(v1, int(val), error)
+#
+# def assert_dict_gt (self, d, key, val, error=''):
+# v1 = int(dict[key])
+# self.assert_gt(v1, int(val), error)
+
+ def assertEqual(self, v1, v2, s):
+ if v1 != v2:
+ error='ERROR '+str(v1)+' != '+str(v2)+ ' '+s;
+ self.fail(error)
+
+ def assert_gt(self, v1, v2, s):
+ if not v1 > v2:
+ error='ERROR {big} < {small} {str}'.format(big = v1, small = v2, str = s)
+ self.fail(error)
+
+ def check_results_eq (self,res,name,val):
+ if res is None:
+ self.fail('TRex results cannot be None !')
+ return
+
+ if name not in res:
+ self.fail('TRex results does not include key %s' % name)
+ return
+
+ if res[name] != float(val):
+ self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
+
+ def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
+ cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
+ trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ expected_norm_cpu = self.get_benchmark_param('bw_per_core')
+ cores = self.get_benchmark_param('cores')
+ ports_count = trex_res.get_ports_count()
+ if not (cpu_util and ports_count and cores):
+ print("Can't calculate CPU benchmark, need to divide by zero: cpu util: %s, ports: %s, cores: %s" % (cpu_util, ports_count, cores))
+ test_norm_cpu = -1
+ else:
+ test_norm_cpu = trex_tx_bps / (cpu_util * ports_count * cores * 2.5e6)
+
+ if '1G' in self.modes:
+ minimal_cpu /= 10.0
+
+ if not self.is_virt_nics:
+ if cpu_util > maximal_cpu:
+ self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+ #if cpu_util < minimal_cpu:
+ # self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+ print("TRex CPU utilization: %g%%, norm_cpu is : %g Gb/core" % (round(cpu_util, 2), round(test_norm_cpu, 2)))
+ if test_norm_cpu < 0:
+ return
+
+ if not expected_norm_cpu:
+ expected_norm_cpu = 1
+
+ calc_error_precent = abs(100.0 * test_norm_cpu / expected_norm_cpu - 100)
+ print('Err percent: %s' % calc_error_precent)
+ #if calc_error_precent > err and cpu_util > 10:
+ # self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ pass
+ #setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
+ #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
+ #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ #self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
+
+ def check_results_gt (self, res, name, val):
+ if res is None:
+ self.fail('TRex results canot be None !')
+ return
+
+ if name not in res:
+ self.fail('TRex results does not include key %s' % name)
+ return
+
+ if res[name]< float(val):
+ self.fail('TRex results[%s]<%f and not as expected greater than %f ' % (name, res[name], val))
+
+ def check_for_trex_crash(self):
+ pass
+
+ def get_benchmark_param (self, param, sub_param = None, test_name = None):
+ if not test_name:
+ test_name = self.get_name()
+ if test_name not in self.benchmark:
+ self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+ if sub_param:
+ return self.benchmark[test_name][param].get(sub_param)
+ else:
+ return self.benchmark[test_name].get(param)
+
+ def check_general_scenario_results (self, trex_res, check_latency = True):
+
+ try:
+ # check if test is valid
+ if not trex_res.is_done_warmup():
+ self.fail('TRex did not reach warm-up situtaion. Results are not valid.')
+
+ # check history size is enough
+ if len(trex_res._history) < 5:
+ self.fail('TRex results list is too short. Increase the test duration or check unexpected stopping.')
+
+ # check TRex number of drops
+ trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
+ trex_drops = trex_res.get_total_drops()
+ trex_drop_rate = trex_res.get_drop_rate()
+ if ( trex_drops > 0.001 * trex_tx_pckt) and (trex_drop_rate > 0.0): # deliberately mask kickoff drops when TRex first initiated
+ self.fail('Number of packet drops larger than 0.1% of all traffic')
+
+ # check queue full, queue drop, allocation error
+ m_total_alloc_error = trex_res.get_last_value("trex-global.data.m_total_alloc_error")
+ m_total_queue_full = trex_res.get_last_value("trex-global.data.m_total_queue_full")
+ m_total_queue_drop = trex_res.get_last_value("trex-global.data.m_total_queue_drop")
+ self.assert_gt(1000, m_total_alloc_error, 'Got allocation errors. (%s), please review multiplier and templates configuration.' % m_total_alloc_error)
+ self.assert_gt(1000, m_total_queue_drop, 'Too much queue_drop (%s), please review multiplier.' % m_total_queue_drop)
+
+ if self.is_VM:
+ allowed_queue_full = 10000 + trex_tx_pckt / 100
+ else:
+ allowed_queue_full = 1000 + trex_tx_pckt / 1000
+ self.assert_gt(allowed_queue_full, m_total_queue_full, 'Too much queue_full (%s), please review multiplier.' % m_total_queue_full)
+
+ # # check TRex expected counters
+ #trex_exp_rate = trex_res.get_expected_tx_rate().get('m_tx_expected_bps')
+ #assert trex_exp_rate is not None
+ #trex_exp_gbps = trex_exp_rate/(10**9)
+
+ if check_latency:
+ # check that max latency does not exceed 1 msec
+ if self.configuration.trex['trex_name'] == '10.56.217.210': # temporary workaround for latency issue in kiwi02, remove it ASAP. http://trex-tgn.cisco.com/youtrack/issue/trex-194
+ allowed_latency = 8000
+ elif self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
+ if max(trex_res.get_max_latency().values()) > allowed_latency:
+ self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
+
+ # check that avg latency does not exceed 1 msec
+ if self.is_VM:
+ allowed_latency = 9999999
+ else: # no excuses, check 1ms
+ allowed_latency = 1000
+ if max(trex_res.get_avg_latency().values()) > allowed_latency:
+ self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
+
+ if not self.is_loopback:
+ # check router number of drops --> deliberately masked- need to be figured out!!!!!
+ pkt_drop_stats = self.router.get_drop_stats()
+# assert pkt_drop_stats['total_drops'] < 20
+
+ # check for trex-router packet consistency
+ # TODO: check if it's ok
+ print('router drop stats: %s' % pkt_drop_stats)
+ print('TRex drop stats: %s' % trex_drops)
+ #self.assertEqual(pkt_drop_stats, trex_drops, "TRex's and router's drop stats don't match.")
+
+ except KeyError as e:
+ self.fail(e)
+ #assert False
+
+ # except AssertionError as e:
+ # e.args += ('TRex has crashed!')
+ # raise
+
+ @staticmethod
+ def unzip_client_package():
+ client_pkg_files = glob('%s/trex_client*.tar.gz' % CTRexScenario.scripts_path)
+ if not len(client_pkg_files):
+ raise Exception('Could not find client package')
+ if len(client_pkg_files) > 1:
+ raise Exception('Found more than one client packages')
+ if not os.path.exists('%s/trex_client' % CTRexScenario.scripts_path):
+ print('\nUnzipping package')
+ return_code, _, stderr = misc_methods.run_command("tar -xzf %s -C %s" % (client_pkg_files[0], CTRexScenario.scripts_path))
+ if return_code:
+ raise Exception('Could not untar the client package: %s' % stderr)
+ else:
+ print('\nClient package is untarred')
+
+ # We encountered error, don't fail the test immediately
+ def fail(self, reason = 'Unknown error'):
+ print('Error: %s' % reason)
+ self.fail_reasons.append(reason)
+
+ # skip running of the test, counts as 'passed' but prints 'skipped'
+ def skip(self, message = 'Unknown reason'):
+ print('Skip: %s' % message)
+ self.skipping = True
+ raise SkipTest(message)
+
+ # get name of currently running test
+ def get_name(self):
+ return self._testMethodName
+
+ def setUp(self):
+ test_setup_modes_conflict = self.modes & set(self.unsupported_modes)
+ if test_setup_modes_conflict:
+ self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict)
+ if not self.stl_trex and not self.trex.is_idle():
+ print('Warning: TRex is not idle at setUp, trying to stop it.')
+ self.trex.force_kill(confirm = False)
+ if not self.is_loopback:
+ print('')
+ if not self.stl_trex: # stateful
+ self.router.load_clean_config()
+ self.router.clear_counters()
+ self.router.clear_packet_drop_stats()
+
+ ########################################################################
+ #### DO NOT ADD TESTS TO THIS FILE ####
+ #### Added tests here will held once for EVERY test sub-class ####
+ ########################################################################
+
+ # masked example to such test. uncomment to watch how it affects #
+# def test_isInitialized(self):
+# assert CTRexScenario.is_init == True
+ def tearDown(self):
+ if not self.stl_trex and not self.trex.is_idle():
+ print('Warning: TRex is not idle at tearDown, trying to stop it.')
+ self.trex.force_kill(confirm = False)
+ if not self.skipping:
+ # print server logs of test run
+ if self.trex and CTRexScenario.server_logs and not self.no_daemon:
+ try:
+ print(termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<'))
+ daemon_log = self.trex.get_trex_daemon_log()
+ log_size = len(daemon_log)
+ print(''.join(daemon_log[CTRexScenario.daemon_log_lines:]))
+ CTRexScenario.daemon_log_lines = log_size
+ except Exception as e:
+ print("Can't get TRex daemon log:", e)
+ try:
+ print(termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<'))
+ print(''.join(self.trex.get_trex_log()))
+ except Exception as e:
+ print("Can't get TRex log:", e)
+ if len(self.fail_reasons):
+ sys.stdout.flush()
+ raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons))
+ sys.stdout.flush()
+
+ def check_for_trex_crash(self):
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
new file mode 100755
index 00000000..f8fe0ed1
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
@@ -0,0 +1,213 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig
+from .tests_exceptions import *
+#import sys
+import time
+from nose.tools import nottest
+
+class CTRexIMIX_Test(CTRexGeneral_Test):
+ """This class defines the IMIX testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ # super(CTRexIMIX_Test, self).__init__()
+ CTRexGeneral_Test.__init__(self, *args, **kwargs)
+
+ def setUp(self):
+ super(CTRexIMIX_Test, self).setUp() # launch super test class setUp process
+ # CTRexGeneral_Test.setUp(self) # launch super test class setUp process
+ # self.router.clear_counters()
+ pass
+
+ def test_routing_imix_64(self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+# self.trex.set_yaml_file('cap2/imix_64.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 30, l = 1000, p = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 30,
+ f = 'cap2/imix_64.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+ # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+ def dummy(self):
+ ret = self.trex.start_trex(
+ c = 1,
+ m = 1,
+ p = True,
+ nc = True,
+ d = 5,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000,
+ trex_development = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+ print(trex_res)
+
+ def test_routing_imix (self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+# self.trex.set_yaml_file('cap2/imix_fast_1g.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark(trex_res)
+
+
+ def test_static_routing_imix (self):
+ if self.is_loopback:
+ self.skip('In loopback mode the test is same as test_routing_imix')
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ # Configure static routing based on benchmark data input
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+
+ def test_static_routing_imix_asymmetric (self):
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ # Configure static routing based on benchmark data input
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ nc = True,
+ d = 100,
+ f = 'cap2/imix_fast_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 25)
+
+
+ def test_jumbo(self, duration = 100, **kwargs):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces(mtu = 9216)
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = duration,
+ f = 'cap2/imix_9k.yaml',
+ l = 1000,
+ **kwargs)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResults instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+
+ # don't include it to regular nose search
+ @nottest
+ def test_warm_up(self):
+ try:
+ self._testMethodName = 'test_jumbo'
+ self.test_jumbo(duration = 5, trex_development = True)
+ except Exception as e:
+ print('Ignoring this error: %s' % e)
+ if self.fail_reasons:
+ print('Ignoring this error(s):\n%s' % '\n'.join(self.fail_reasons))
+ self.fail_reasons = []
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ # remove nbar config here
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_ipv6_test.py b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
new file mode 100755
index 00000000..4d6f7953
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
@@ -0,0 +1,103 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+import time
+from nose.tools import assert_equal
+
+class CTRexIPv6_Test(CTRexGeneral_Test):
+ """This class defines the IPv6 testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexIPv6_Test, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ super(CTRexIPv6_Test, self).setUp() # launch super test class setUp process
+# print " before sleep setup !!"
+# time.sleep(100000);
+# pass
+
+ def test_ipv6_simple(self):
+ if self.is_virt_nics:
+ self.skip('--ipv6 flag does not work correctly in with virtual NICs') # TODO: fix
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+
+ self.router.config_pbr(mode = "config")
+ self.router.config_ipv6_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ ipv6 = True,
+ d = 60,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+
+ self.check_CPU_benchmark (trex_res, 10.0)
+
+ assert True
+
+
+ def test_ipv6_negative (self):
+ if self.is_loopback:
+ self.skip('The test checks ipv6 drops by device and we are in loopback setup')
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+ # NOT CONFIGURING IPv6 INTENTIONALLY TO GET DROPS!
+ self.router.config_pbr(mode = "config")
+
+ # same params as test_ipv6_simple
+ mult = self.get_benchmark_param('multiplier', test_name = 'test_ipv6_simple')
+ core = self.get_benchmark_param('cores', test_name = 'test_ipv6_simple')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ ipv6 = True,
+ d = 60,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ trex_tx_pckt = float(trex_res.get_last_value("trex-global.data.m_total_tx_pkts"))
+ trex_drops = int(trex_res.get_total_drops())
+
+ trex_drop_rate = trex_res.get_drop_rate()
+
+ # make sure that at least 50% of the total transmitted packets failed
+ self.assert_gt((trex_drops/trex_tx_pckt), 0.5, 'packet drop ratio is not high enough')
+
+
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ # remove config here
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
new file mode 100755
index 00000000..c23f67c4
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_nat_test.py
@@ -0,0 +1,169 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+import time
+from CPlatform import CStaticRouteConfig, CNatConfig
+from nose.tools import assert_equal
+
+
+class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+ """This class defines the NAT testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNoNat_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # NAT requires device
+
+ def setUp(self):
+ super(CTRexNoNat_Test, self).setUp() # launch super test class setUp process
+
+ def check_nat_stats (self, nat_stats):
+ pass
+
+
+ def test_nat_learning(self):
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ self.router.config_nat_verify() # shutdown duplicate interfaces
+
+# self.trex.set_yaml_file('cap2/http_simple.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(multiplier = mult, cores = core, duration = 100, l = 1000, learn_verify = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ learn_verify = True,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+
+ expected_nat_opened = self.get_benchmark_param('nat_opened')
+ learning_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+
+ if self.get_benchmark_param('allow_timeout_dev'):
+ nat_timeout_ratio = float(learning_stats['m_total_nat_time_out']) / learning_stats['m_total_nat_open']
+ if nat_timeout_ratio > 0.005:
+ self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
+ else:
+ self.check_results_eq (learning_stats, 'm_total_nat_time_out', 0.0)
+ self.check_results_eq (learning_stats, 'm_total_nat_no_fid', 0.0)
+ self.check_results_gt (learning_stats, 'm_total_nat_learn_error', 0.0)
+#
+ self.check_results_gt (learning_stats, 'm_total_nat_open', expected_nat_opened)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+
+class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+ """This class defines the NAT testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNat_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # NAT requires device
+
+ def setUp(self):
+ super(CTRexNat_Test, self).setUp() # launch super test class setUp process
+ # config nat here
+
+
+ def check_nat_stats (self, nat_stats):
+ pass
+
+
+ def test_nat_simple_mode1(self):
+ self.nat_simple_helper(learn_mode=1)
+
+ def test_nat_simple_mode2(self):
+ self.nat_simple_helper(learn_mode=2)
+
+ def test_nat_simple_mode3(self):
+ self.nat_simple_helper(learn_mode=3)
+
+ def nat_simple_helper(self, learn_mode=1):
+ # test initializtion
+ self.router.configure_basic_interfaces()
+
+
+ stat_route_dict = self.get_benchmark_param('stat_route_dict')
+ stat_route_obj = CStaticRouteConfig(stat_route_dict)
+ self.router.config_static_routing(stat_route_obj, mode = "config")
+
+ nat_dict = self.get_benchmark_param('nat_dict')
+ nat_obj = CNatConfig(nat_dict)
+ self.router.config_nat(nat_obj)
+
+# self.trex.set_yaml_file('cap2/http_simple.yaml')
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+# trex_res = self.trex.run(nc=False,multiplier = mult, cores = core, duration = 100, l = 1000, learn = True)
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ learn_mode = learn_mode,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+ if self.get_benchmark_param('allow_timeout_dev'):
+ nat_timeout_ratio = float(trex_nat_stats['m_total_nat_time_out']) / trex_nat_stats['m_total_nat_open']
+ if nat_timeout_ratio > 0.005:
+ self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
+ else:
+ self.check_results_eq (trex_nat_stats,'m_total_nat_time_out', 0.0)
+ self.check_results_eq (trex_nat_stats,'m_total_nat_no_fid', 0.0)
+ self.check_results_gt (trex_nat_stats,'m_total_nat_open', 6000)
+
+
+ self.check_general_scenario_results(trex_res, check_latency = False) # NAT can cause latency
+## test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
+# trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_bps")
+# cpu_util = int(trex_res.get_last_value("trex-global.data.m_cpu_util"))
+# test_norm_cpu = 2*(trex_tx_pckt/(core*cpu_util))
+# print "test_norm_cpu is: ", test_norm_cpu
+
+ self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+ #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > 0.03):
+ # raiseraise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds 3%')
+
+ nat_stats = self.router.get_nat_stats()
+ print(nat_stats)
+
+ self.assert_gt(nat_stats['total_active_trans'], 5000, 'total active translations is not high enough')
+ self.assert_gt(nat_stats['dynamic_active_trans'], 5000, 'total dynamic active translations is not high enough')
+ self.assertEqual(nat_stats['static_active_trans'], 0, "NAT statistics nat_stats['static_active_trans'] should be zero")
+ self.assert_gt(nat_stats['num_of_hits'], 50000, 'total nat hits is not high enough')
+
+ def tearDown(self):
+ self.router.clear_nat_translations()
+ CTRexGeneral_Test.tearDown(self)
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
new file mode 100755
index 00000000..6611ac96
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
@@ -0,0 +1,123 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from .tests_exceptions import *
+from interfaces_e import IFType
+from nose.tools import nottest
+from misc_methods import print_r
+
+class CTRexNbar_Test(CTRexGeneral_Test):
+ """This class defines the NBAR testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNbar_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
+
+ def setUp(self):
+ super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
+# self.router.kill_nbar_flows()
+ self.router.clear_cft_counters()
+ self.router.clear_nbar_stats()
+
+ def match_classification (self):
+ nbar_benchmark = self.get_benchmark_param("nbar_classification")
+ test_classification = self.router.get_nbar_stats()
+ print("TEST CLASSIFICATION:")
+ print(test_classification)
+ missmatchFlag = False
+ missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
+ fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
+ noise_level = 0.045
+
+ for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
+ client_intf = cl_intf.get_name()
+
+ for protocol, bench in nbar_benchmark.items():
+ if protocol != 'total':
+ try:
+ bench = float(bench)
+ protocol = protocol.replace('_','-')
+ protocol_test_res = test_classification[client_intf]['percentage'][protocol]
+ deviation = 100 * abs(bench/protocol_test_res - 1) # percents
+ difference = abs(bench - protocol_test_res)
+ if (deviation > 10 and difference > noise_level): # allowing 10% deviation and 'noise_level'% difference
+ missmatchFlag = True
+ missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
+ except KeyError as e:
+ missmatchFlag = True
+ print(e)
+ print("Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf ))
+ missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
+ except ZeroDivisionError as e:
+ print("ZeroDivisionError: %s" % protocol)
+ pass
+ if missmatchFlag:
+ self.fail(missmatchMsg)
+
+
+ def test_nbar_simple(self):
+ # test initializtion
+ deviation_compare_value = 0.03 # default value of deviation - 3%
+ self.router.configure_basic_interfaces()
+
+ self.router.config_pbr(mode = "config")
+ self.router.config_nbar_pd()
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ print("\nLATEST DUMP:")
+ print(trex_res.get_latest_dump())
+
+ self.check_general_scenario_results(trex_res, check_latency = False) # NBAR can cause latency
+ self.check_CPU_benchmark(trex_res)
+ self.match_classification()
+
+
+ # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+ def NBarLong(self):
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+ self.router.config_nbar_pd()
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 18000, # 5 hours
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res, check_latency = False)
+
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
new file mode 100755
index 00000000..161856b1
--- /dev/null
+++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py
@@ -0,0 +1,280 @@
+#!/router/bin/python
+from .trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig, CNatConfig
+from .tests_exceptions import *
+#import sys
+import time
+import copy
+from nose.tools import nottest
+import traceback
+
+class CTRexRx_Test(CTRexGeneral_Test):
+ """This class defines the rx testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ CTRexGeneral_Test.__init__(self, *args, **kwargs)
+ self.unsupported_modes = ['virt_nics'] # TODO: fix
+
+ def setUp(self):
+ CTRexGeneral_Test.setUp(self)
+
+
+ def check_rx_errors(self, trex_res, allow_error_tolerance = True):
+ try:
+ # counters to check
+
+ latency_counters_display = {'m_unsup_prot': 0, 'm_no_magic': 0, 'm_no_id': 0, 'm_seq_error': 0, 'm_length_error': 0, 'm_no_ipv4_option': 0, 'm_tx_pkt_err': 0}
+ rx_counters = {'m_err_drop': 0, 'm_err_aged': 0, 'm_err_no_magic': 0, 'm_err_wrong_pkt_id': 0, 'm_err_fif_seen_twice': 0, 'm_err_open_with_no_fif_pkt': 0, 'm_err_oo_dup': 0, 'm_err_oo_early': 0, 'm_err_oo_late': 0, 'm_err_flow_length_changed': 0}
+
+ # get relevant TRex results
+
+ try:
+ ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+')
+ if not ports_names:
+ raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*')
+ for port_name in ports_names:
+ path = 'trex-latecny-v2.data.%s.stats' % port_name
+ port_result = trex_res.get_last_value(path)
+ if not port_result:
+ raise AbnormalResultError('Could not find port stats in TRex results, path: %s' % path)
+ for key in latency_counters_display:
+ latency_counters_display[key] += port_result[key]
+
+ # using -k flag in TRex produces 1 error per port in latency counter m_seq_error, allow it until issue resolved. For comparing use dict with reduces m_seq_error number.
+ latency_counters_compare = copy.deepcopy(latency_counters_display)
+ latency_counters_compare['m_seq_error'] = max(0, latency_counters_compare['m_seq_error'] - len(ports_names))
+
+ path = 'rx-check.data.stats'
+ rx_check_results = trex_res.get_last_value(path)
+ if not rx_check_results:
+ raise AbnormalResultError('No TRex results by path: %s' % path)
+ for key in rx_counters:
+ rx_counters[key] = rx_check_results[key]
+
+ path = 'rx-check.data.stats.m_total_rx'
+ total_rx = trex_res.get_last_value(path)
+ if total_rx is None:
+ raise AbnormalResultError('No TRex results by path: %s' % path)
+ elif not total_rx:
+ raise AbnormalResultError('Total rx_check (%s) packets is zero.' % path)
+
+ print('Total packets checked: %s' % total_rx)
+ print('Latency counters: %s' % latency_counters_display)
+ print('rx_check counters: %s' % rx_counters)
+
+ except KeyError as e:
+ self.fail('Expected key in TRex result was not found.\n%s' % traceback.print_exc())
+
+ # the check. in loopback expect 0 problems, at others allow errors <error_tolerance>% of total_rx
+
+ total_errors = sum(rx_counters.values()) + sum(latency_counters_compare.values())
+ error_tolerance = self.get_benchmark_param('error_tolerance')
+ if not error_tolerance:
+ if not allow_error_tolerance:
+ error_tolerance = 0
+ else:
+ error_tolerance = 0.1
+ error_percentage = total_errors * 100.0 / total_rx
+
+ if total_errors > 0:
+ if error_percentage > error_tolerance:
+ self.fail('Too much errors in rx_check. (~%s%% of traffic)' % error_percentage)
+ else:
+ print('There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance))
+ else:
+ print('No errors in rx_check.')
+ except Exception as e:
+ print(traceback.print_exc())
+ self.fail('Errors in rx_check: %s' % e)
+
+ def test_rx_check_sfr(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ #print ("\nLATEST DUMP:")
+ #print trex_res.get_latest_dump()
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_http(self):
+ if not self.is_loopback:
+ # TODO: skip as test_rx_check_http_negative will cover it
+ #self.skip('This test is covered by test_rx_check_http_negative')
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_sfr_ipv6(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = 'config')
+ self.router.config_ipv6_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+ l = 1000,
+ k = 10,
+ ipv6 = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ #print ("\nLATEST DUMP:")
+ #print trex_res.get_latest_dump()
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+
+ def test_rx_check_http_ipv6(self):
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+ self.router.config_ipv6_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ cfg = '/etc/trex_cfg_mac.yaml',
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ rx_check = sample_rate,
+ d = 100,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ ipv6 = True)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+ #@nottest
+ def test_rx_check_http_negative(self):
+ if self.is_loopback:
+ self.skip('This test uses NAT, not relevant for loopback')
+
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ core = self.get_benchmark_param('cores')
+ mult = self.get_benchmark_param('multiplier')
+ sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ rx_check = sample_rate,
+ d = 60,
+ f = 'cap2/http_simple.yaml',
+ l = 1000,
+ k = 10,
+ learn_verify = True,
+ l_pkt_mode = 2)
+
+ print('Run for 40 seconds, expect no errors')
+ trex_res = self.trex.sample_x_seconds(40)
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+ self.check_rx_errors(trex_res)
+
+ print('Run until finish, expect errors')
+ old_errors = copy.deepcopy(self.fail_reasons)
+ nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple_mode1')
+ nat_obj = CNatConfig(nat_dict)
+ self.router.config_nat(nat_obj)
+ self.router.config_zbf()
+ trex_res = self.trex.sample_to_run_finish()
+ self.router.config_no_zbf()
+ self.router.config_no_nat(nat_obj)
+ #self.router.clear_nat_translations()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ self.check_rx_errors(trex_res, allow_error_tolerance = False)
+ if self.fail_reasons == old_errors:
+ self.fail('Expected errors here, got none.')
+ else:
+ print('Got errors as expected.')
+ self.fail_reasons = old_errors
+
+ def tearDown(self):
+ CTRexGeneral_Test.tearDown(self)
+ pass
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/regression/stateless_tests/__init__.py b/scripts/automation/regression/stateless_tests/__init__.py
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/__init__.py
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
new file mode 100755
index 00000000..6940efd3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
@@ -0,0 +1,75 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+from collections import deque
+from time import time, sleep
+
+class STLBenchmark_Test(CStlGeneral_Test):
+ """Benchark stateless performance"""
+
+ def test_CPU_benchmark(self):
+ critical_test = CTRexScenario.setup_name in ('kiwi02', 'trex08', 'trex09') # temporary patch, this test needs to be fixed
+ timeout = 60 # max time to wait for stabilization
+ stabilize = 5 # ensure stabilization over this period
+ print('')
+
+ for profile_bench in self.get_benchmark_param('profiles'):
+ cpu_utils = deque([0] * stabilize, maxlen = stabilize)
+ bws_per_core = deque([0] * stabilize, maxlen = stabilize)
+ kwargs = profile_bench.get('kwargs', {})
+ print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs))
+ profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs)
+
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ self.stl_trex.add_streams(profile)
+ mult = '1%' if self.is_virt_nics else '10%'
+ self.stl_trex.start(mult = mult)
+ start_time = time()
+
+ for i in range(timeout + 1):
+ stats = self.stl_trex.get_stats()
+ cpu_utils.append(stats['global']['cpu_util'])
+ bws_per_core.append(stats['global']['bw_per_core'])
+ if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95:
+ break
+ sleep(0.5)
+
+ agv_cpu_util = sum(cpu_utils) / stabilize
+ agv_bw_per_core = sum(bws_per_core) / stabilize
+
+ if critical_test and i == timeout and agv_cpu_util > 10:
+ raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils))
+ if stats[0]['opackets'] < 300 or stats[1]['opackets'] < 300:
+ raise Exception('Too few opackets, port0: %s, port1: %s' % (stats[0]['opackets'], stats[1]['opackets']))
+ if stats['global']['queue_full'] > 100000:
+ raise Exception('Too much queue_full: %s' % stats['global']['queue_full'])
+ if not cpu_utils[-1]:
+ raise Exception('CPU util is zero, last values: %s' % list(cpu_utils))
+ print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2)))
+ # TODO: add check of benchmark based on results from regression
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ pass
+ #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name,
+ # os.path.basename(profile_bench['name']),
+ # repr(kwargs).replace("'", ''))
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core', value = int(agv_bw_per_core))
+ # TODO: report expected once acquired
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ #self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
+
+ def tearDown(self):
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ CStlGeneral_Test.tearDown(self)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
new file mode 100644
index 00000000..36ac0ee1
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -0,0 +1,350 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+import glob
+
+
+def get_error_in_percentage (golden, value):
+ return abs(golden - value) / float(golden)
+
+def get_stl_profiles ():
+ profiles_path = os.path.join(CTRexScenario.scripts_path, 'stl/')
+ py_profiles = glob.glob(profiles_path + "/*.py")
+ yaml_profiles = glob.glob(profiles_path + "yaml/*.yaml")
+ return py_profiles + yaml_profiles
+
+
+class STLClient_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+
+ if self.is_virt_nics:
+ self.percentage = 5
+ self.pps = 500
+ else:
+ self.percentage = 50
+ self.pps = 50000
+
+ # strict mode is only for 'wire only' connection
+ self.strict = True if self.is_loopback and not self.is_virt_nics else False
+
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ self.c.connect()
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ self.profiles = get_stl_profiles()
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def verify (self, expected, got):
+ if self.strict:
+ assert expected == got
+ else:
+ assert get_error_in_percentage(expected, got) < 0.05
+
+
+ def test_basic_connect_disconnect (self):
+ try:
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_basic_single_burst (self):
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXSingleBurst(total_pkts = 100,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(100, stats[self.tx_port]['opackets'])
+ self.verify(100, stats[self.rx_port]['ipackets'])
+
+ self.verify(100, stats[self.rx_port]['opackets'])
+ self.verify(100, stats[self.tx_port]['ipackets'])
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_multi_burst (self):
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXMultiBurst(pkts_per_burst = 10,
+ count = 20,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(200, stats[self.tx_port]['opackets'])
+ self.verify(200, stats[self.rx_port]['ipackets'])
+
+ self.verify(200, stats[self.rx_port]['opackets'])
+ self.verify(200, stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_cont (self):
+ pps = self.pps
+ duration = 0.1
+ golden = pps * duration
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXCont(pps = pps)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port], duration = duration)
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ # cont. with duration should be quite percise - 5% error is relaxed enough
+
+ assert get_error_in_percentage(stats[self.tx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.rx_port]['ipackets'], golden) < 0.05
+
+ assert get_error_in_percentage(stats[self.rx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.tx_port]['ipackets'], golden) < 0.05
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_stress_connect_disconnect (self):
+ try:
+ for i in range(0, 100):
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_stress_tx (self):
+ try:
+ s1 = STLStream(name = 'stress',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port, self.rx_port])
+ for i in range(0, 100):
+
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_paused(), 'port should be paused'
+ assert self.c.ports[self.rx_port].is_paused(), 'port should be paused'
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ assert not self.c.ports[self.tx_port].is_active(), 'port should be idle'
+ assert not self.c.ports[self.rx_port].is_active(), 'port should be idle'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_all_profiles (self):
+ if self.is_virt_nics or not self.is_loopback:
+ self.skip('skipping profile tests for virtual / non loopback')
+ return
+
+ try:
+
+ for profile in self.profiles:
+
+ print("now testing profile {0}...\n".format(profile))
+
+ p1 = STLProfile.load(profile, port_id = self.tx_port)
+ p2 = STLProfile.load(profile, port_id = self.rx_port)
+
+ # if profile contains custom MAC addrs we need promiscuous mode
+ # but virtual NICs does not support promiscuous mode
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+ if p1.has_custom_mac_addr():
+ if not self.is_virt_nics:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True)
+ else:
+ print("\n*** profile needs promiscuous mode but running on virtual NICs - skipping... ***\n")
+ continue
+
+ if p1.has_flow_stats():
+ print("\n*** profile needs RX caps - skipping... ***\n")
+ continue
+
+ self.c.add_streams(p1, ports = self.tx_port)
+ self.c.add_streams(p2, ports = self.rx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ time.sleep(100 / 1000.0)
+
+ if p1.is_pauseable() and p2.is_pauseable():
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats, '{0} - no stats for TX port'.format(profile)
+ assert self.rx_port in stats, '{0} - no stats for RX port'.format(profile)
+
+ self.verify(stats[self.tx_port]['opackets'], stats[self.rx_port]['ipackets'])
+ self.verify(stats[self.rx_port]['opackets'], stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ finally:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+
+ # see https://trex-tgn.cisco.com/youtrack/issue/trex-226
+ def test_latency_pause_resume (self):
+
+ try:
+
+ s1 = STLStream(name = 'latency',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage),
+ flow_stats = STLFlowLatencyStats(pg_id = 1))
+
+ self.c.add_streams([s1], ports = self.tx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = self.tx_port)
+
+ for i in range(100):
+ self.c.pause()
+ self.c.resume()
+
+ self.c.stop()
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_pcap_remote (self):
+ try:
+ pcap_file = os.path.join(CTRexScenario.scripts_path, 'automation/regression/test_pcaps/pcap_dual_test.erf')
+
+ master = self.tx_port
+ slave = master ^ 0x1
+
+ self.c.reset(ports = [master, slave])
+ self.c.clear_stats()
+ self.c.push_remote(pcap_file,
+ ports = [master],
+ ipg_usec = 100,
+ is_dual = True)
+ self.c.wait_on_traffic(ports = [master])
+
+ stats = self.c.get_stats()
+
+ self.verify(stats[master]['opackets'], 52)
+ self.verify(stats[slave]['opackets'], 48)
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
new file mode 100755
index 00000000..71fc3287
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+import os, sys
+from misc_methods import run_command
+
+
+class STLExamples_Test(CStlGeneral_Test):
+ """This class defines the IMIX testcase of the TRex traffic generator"""
+
+ def explicitSetUp(self):
+ # examples connect by their own
+ if self.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+
+ def explicitTearDown(self):
+ # connect back at end of tests
+ if not self.is_connected():
+ self.stl_trex.connect()
+
+ def test_stl_examples(self):
+ examples_dir = '../trex_control_plane/stl/examples'
+ examples_to_test = [
+ 'stl_imix.py',
+ ]
+
+ for example in examples_to_test:
+ self.explicitSetUp()
+ return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name']))
+ self.explicitTearDown()
+ assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
new file mode 100644
index 00000000..590733ba
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -0,0 +1,113 @@
+import os, sys
+import unittest
+from trex import CTRexScenario
+from stateful_tests.trex_general_test import CTRexGeneral_Test
+from trex_stl_lib.api import *
+import time
+from nose.tools import nottest
+
+class CStlGeneral_Test(CTRexGeneral_Test):
+ """This class defines the general stateless testcase of the TRex traffic generator"""
+
+ def setUp(self):
+ self.stl_trex = CTRexScenario.stl_trex if CTRexScenario.stl_trex else 'mock'
+ CTRexGeneral_Test.setUp(self)
+ # check basic requirements, should be verified at test_connectivity, here only skip test
+ if CTRexScenario.stl_init_error:
+ self.skip(CTRexScenario.stl_init_error)
+
+ def connect(self, timeout = 100):
+ # need delay and check only because TRex process might be still starting
+ sys.stdout.write('Connecting')
+ for i in range(timeout):
+ try:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ self.stl_trex.connect()
+ print('')
+ return True
+ except:
+ time.sleep(0.1)
+ print('')
+ return False
+
+ def map_ports(self, timeout = 100):
+ sys.stdout.write('Mapping ports')
+ for i in range(timeout):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ CTRexScenario.stl_ports_map = stl_map_ports(self.stl_trex)
+ if self.verify_bidirectional(CTRexScenario.stl_ports_map):
+ print('')
+ return True
+ time.sleep(0.1)
+ print('')
+ return False
+
+ # verify all the ports are bidirectional
+ @staticmethod
+ def verify_bidirectional(mapping_dict):
+ if len(mapping_dict['unknown']):
+ return False
+ if len(mapping_dict['bi']) * 2 == len(mapping_dict['map']):
+ return True
+ return False
+
+ @staticmethod
+ def get_port_count():
+ return CTRexScenario.stl_trex.get_port_count()
+
+ @staticmethod
+ def is_connected():
+ return CTRexScenario.stl_trex.is_connected()
+
+class STLBasic_Test(CStlGeneral_Test):
+ # will run it first explicitly, check connectivity and configure routing
+ @nottest
+ def test_connectivity(self):
+ if not self.is_loopback:
+ try:
+ sys.stdout.flush()
+ sys.stdout.write('Configuring DUT... ')
+ start_time = time.time()
+ if CTRexScenario.router_cfg['forceCleanConfig']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ CTRexScenario.router.config_ipv6_pbr(mode = "config")
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ try:
+ sys.stdout.write('Starting TRex... ')
+ start_time = time.time()
+ cores = self.configuration.trex.get('trex_cores', 1)
+ if self.is_virt_nics and cores > 1:
+ raise Exception('Number of cores should be 1 with virtual NICs')
+ if not CTRexScenario.no_daemon:
+ self.trex.start_stateless(c = cores)
+ self.stl_trex = STLClient(username = 'TRexRegression',
+ server = self.configuration.trex['trex_name'],
+ verbose_level = CTRexScenario.json_verbose)
+ CTRexScenario.stl_trex = self.stl_trex
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not start stateless TRex, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ if not self.connect():
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
+ if not self.map_ports():
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
new file mode 100644
index 00000000..a556daf3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -0,0 +1,351 @@
+import os
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+
+def avg (values):
+ return (sum(values) / float(len(values)))
+
+# performance report object
+class PerformanceReport(object):
+ GOLDEN_NORMAL = 1
+ GOLDEN_FAIL = 2
+ GOLDEN_BETTER = 3
+
+ def __init__ (self,
+ scenario,
+ machine_name,
+ core_count,
+ avg_cpu,
+ avg_gbps,
+ avg_mpps,
+ avg_gbps_per_core,
+ avg_mpps_per_core,
+ ):
+
+ self.scenario = scenario
+ self.machine_name = machine_name
+ self.core_count = core_count
+ self.avg_cpu = avg_cpu
+ self.avg_gbps = avg_gbps
+ self.avg_mpps = avg_mpps
+ self.avg_gbps_per_core = avg_gbps_per_core
+ self.avg_mpps_per_core = avg_mpps_per_core
+
+ def show (self):
+
+ print("\n")
+ print("scenario: {0}".format(self.scenario))
+ print("machine name: {0}".format(self.machine_name))
+ print("DP core count: {0}".format(self.core_count))
+ print("average CPU: {0}".format(self.avg_cpu))
+ print("average Gbps: {0}".format(self.avg_gbps))
+ print("average Mpps: {0}".format(self.avg_mpps))
+ print("average pkt size (bytes): {0}".format( (self.avg_gbps * 1000 / 8) / self.avg_mpps))
+ print("average Gbps per core (at 100% CPU): {0}".format(self.avg_gbps_per_core))
+ print("average Mpps per core (at 100% CPU): {0}".format(self.avg_mpps_per_core))
+
+
+ def check_golden (self, golden_mpps):
+ if self.avg_mpps_per_core < golden_mpps['min']:
+ return self.GOLDEN_FAIL
+
+ if self.avg_mpps_per_core > golden_mpps['max']:
+ return self.GOLDEN_BETTER
+
+ return self.GOLDEN_NORMAL
+
+ def report_to_analytics(self, ga, golden_mpps):
+ print("\n* Reporting to GA *\n")
+ ga.gaAddTestQuery(TestName = self.scenario,
+ TRexMode = 'stl',
+ SetupName = self.machine_name,
+ TestType = 'performance',
+ Mppspc = self.avg_mpps_per_core,
+ ActionNumber = os.getenv("BUILD_ID","n/a"),
+ GoldenMin = golden_mpps['min'],
+ GoldenMax = golden_mpps['max'])
+
+ ga.emptyAndReportQ()
+
+
+class STLPerformance_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+
+ CStlGeneral_Test.setUp(self)
+
+ self.c = CTRexScenario.stl_trex
+ self.c.connect()
+ self.c.reset()
+
+
+
+ def tearDown (self):
+ CStlGeneral_Test.tearDown(self)
+
+
+ def build_perf_profile_vm (self, pkt_size, cache_size = None):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+ src_ip = '16.0.0.1'
+ dst_ip = '48.0.0.1'
+
+ base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1", max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ),
+ STLVmFixIpv4(offset = "IP")
+ ],
+ cache_size = cache_size
+ );
+
+ pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm)
+ return STLStream(packet = pkt, mode = STLTXCont())
+
+
+ def build_perf_profile_syn_attack (self, pkt_size):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="18.0.0.254",
+ size=4, op="random"),
+
+ STLVmFlowVar(name="src_port",
+ min_value=1025,
+ max_value=65000,
+ size=2, op="random"),
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="src_port",
+ pkt_offset= "TCP.sport") # fix udp len
+
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ random_seed = 0x1234,# can be remove. will give the same random value any run
+ mode = STLTXCont())
+
+
+
+ # single CPU, VM, no cache, 64 bytes
+ def test_performance_vm_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, VM, cached, 64 bytes
+ def test_performance_vm_single_cpu_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, syn attack, 64 bytes
+ def test_performance_syn_attack_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # two CPUs, VM, no cache, 64 bytes
+ def test_performance_vm_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+ # multi CPUs, VM, cached, 64 bytes
+ def test_performance_vm_multi_cpus_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # multi CPUs, syn attack, 64 bytes
+ def test_performance_syn_attack_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+############################################# test's infra functions ###########################################
+
+ def execute_single_scenario (self, scenario_cfg, iterations = 4):
+ golden = scenario_cfg['mpps_per_core_golden']
+
+
+ for i in range(iterations, -1, -1):
+ report = self.execute_single_scenario_iteration(scenario_cfg)
+ rc = report.check_golden(golden)
+
+ if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER):
+ if self.GAManager:
+ report.report_to_analytics(self.GAManager, golden)
+
+ return
+
+ if rc == PerformanceReport.GOLDEN_BETTER:
+ return
+
+ print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i))
+
+ assert 0, "performance failure"
+
+
+
+
+ def execute_single_scenario_iteration (self, scenario_cfg):
+
+ print("\nExecuting performance scenario: '{0}'\n".format(scenario_cfg['name']))
+
+ self.c.reset(ports = [0])
+ self.c.add_streams(ports = [0], streams = scenario_cfg['streams'])
+
+ # use one core
+ cores_per_port = self.c.system_info.get('dp_core_count_per_port', 0)
+ if cores_per_port < scenario_cfg['core_count']:
+ assert 0, "test configuration requires {0} cores but only {1} per port are available".format(scenario_cfg['core_count'], cores_per_port)
+
+ core_mask = (2 ** scenario_cfg['core_count']) - 1
+ self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask])
+
+ # stablize
+ print("Step 1 - waiting for stabilization... (10 seconds)")
+ for _ in range(10):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ print("\n")
+
+ samples = {'cpu' : [], 'bps': [], 'pps': []}
+
+ # let the server gather samples
+ print("Step 2 - Waiting for samples... (60 seconds)")
+
+ for i in range(0, 3):
+
+ # sample bps/pps
+ for _ in range(0, 20):
+ stats = self.c.get_stats(ports = 0)
+ samples['bps'].append(stats[0]['tx_bps'])
+ samples['pps'].append(stats[0]['tx_pps'])
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ # sample CPU per core
+ rc = self.c._transmit('get_utilization')
+ if not rc:
+ raise Exception(rc)
+
+ data = rc.data()['cpu']
+ # filter
+ data = [s for s in data if s['ports'][0] == 0]
+
+ assert len(data) == scenario_cfg['core_count'] , "sampling info does not match core count"
+
+ for s in data:
+ samples['cpu'] += s['history']
+
+
+ stats = self.c.get_stats(ports = 0)
+ self.c.stop(ports = [0])
+
+
+
+ avg_values = {k:avg(v) for k, v in samples.items()}
+ avg_cpu = avg_values['cpu'] * scenario_cfg['core_count']
+ avg_gbps = avg_values['bps'] / 1e9
+ avg_mpps = avg_values['pps'] / 1e6
+
+ avg_gbps_per_core = avg_gbps * (100.0 / avg_cpu)
+ avg_mpps_per_core = avg_mpps * (100.0 / avg_cpu)
+
+ report = PerformanceReport(scenario = scenario_cfg['name'],
+ machine_name = CTRexScenario.setup_name,
+ core_count = scenario_cfg['core_count'],
+ avg_cpu = avg_cpu,
+ avg_gbps = avg_gbps,
+ avg_mpps = avg_mpps,
+ avg_gbps_per_core = avg_gbps_per_core,
+ avg_mpps_per_core = avg_mpps_per_core)
+
+
+ report.show()
+
+ print("")
+ golden = scenario_cfg['mpps_per_core_golden']
+ print("golden Mpps per core (at 100% CPU): min: {0}, max {1}".format(golden['min'], golden['max']))
+
+
+ return report
+
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
new file mode 100644
index 00000000..524ad4bf
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -0,0 +1,568 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+
+ERROR_LATENCY_TOO_HIGH = 1
+
+class STLRX_Test(CStlGeneral_Test):
+ """Tests for RX feature"""
+
+ def setUp(self):
+ per_driver_params = {
+ 'rte_vmxnet3_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_ixgbe_pmd': {
+ 'rate_percent': 30,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 300,
+ 'latency_9k_max_latency': 400,
+ },
+ 'rte_i40e_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 100,
+ 'latency_9k_max_latency': 250,
+ },
+ 'rte_igb_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 500,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_em_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_virtio_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ 'allow_packets_drop_num': 1, # allow 1 pkt drop
+ },
+ }
+
+ CStlGeneral_Test.setUp(self)
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ port_info = self.c.get_port_info(ports = self.rx_port)[0]
+ self.speed = port_info['speed']
+
+
+ cap = port_info['rx']['caps']
+ if "flow_stats" not in cap or "latency" not in cap:
+ self.skip('port {0} does not support RX'.format(self.rx_port))
+ self.cap = cap
+
+ drv_name = port_info['driver']
+ if drv_name == 'rte_ixgbe_pmd':
+ self.ipv6_support = False
+ else:
+ self.ipv6_support = True
+ self.rate_percent = per_driver_params[drv_name]['rate_percent']
+ self.total_pkts = per_driver_params[drv_name]['total_pkts']
+ self.rate_lat = per_driver_params[drv_name].get('rate_latency', self.rate_percent)
+ self.latency_9k_enable = per_driver_params[drv_name]['latency_9k_enable']
+ self.latency_9k_max_average = per_driver_params[drv_name].get('latency_9k_max_average')
+ self.latency_9k_max_latency = per_driver_params[drv_name].get('latency_9k_max_latency')
+ self.allow_drop = per_driver_params[drv_name].get('allow_packets_drop_num', 0)
+
+ self.lat_pps = 1000
+ self.drops_expected = False
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
+ max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ]
+ # Latency is bound to one core. We test that this option is not causing trouble
+ ,split_by_field = "ip_src"
+ ,cache_size =255 # Cache is ignored by latency flows. Need to test it is not crashing.
+ );
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.ipv6pkt = STLPktBuilder(pkt = Ether()/IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815",src="2001:4860:0:2001::68")
+ /UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000))
+ self.pkt_9k = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000))
+ self.vm_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")
+ / UDP(dport=12,sport=1025)/('Your_paylaod_comes_here')
+ , vm = vm)
+ self.vm_large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000)
+ , vm = vm)
+ self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000)
+ ,vm = vm)
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def __verify_latency (self, latency_stats,max_latency,max_average):
+
+ error=0;
+ err_latency = latency_stats['err_cntrs']
+ latency = latency_stats['latency']
+
+ for key in err_latency :
+ error +=err_latency[key]
+ if error !=0 :
+ pprint.pprint(err_latency)
+ tmp = 'RX pkts ERROR - one of the error is on'
+ print(tmp)
+ assert False, tmp
+
+ if latency['average']> max_average:
+ pprint.pprint(latency_stats)
+ tmp = 'Average latency is too high {0} {1} '.format(latency['average'], max_average)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ if latency['total_max']> max_latency:
+ pprint.pprint(latency_stats)
+ tmp = 'Max latency is too high {0} {1} '.format(latency['total_max'], max_latency)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ return 0
+
+
+
+ def __verify_flow (self, pg_id, total_pkts, pkt_len, stats):
+ flow_stats = stats['flow_stats'].get(pg_id)
+ latency_stats = stats['latency'].get(pg_id)
+
+ if not flow_stats:
+ assert False, "no flow stats available"
+
+ tx_pkts = flow_stats['tx_pkts'].get(self.tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(self.tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(self.rx_port, 0)
+ if latency_stats is not None:
+ drops = latency_stats['err_cntrs']['dropped']
+ ooo = latency_stats['err_cntrs']['out_of_order']
+ dup = latency_stats['err_cntrs']['dup']
+ sth = latency_stats['err_cntrs']['seq_too_high']
+ stl = latency_stats['err_cntrs']['seq_too_low']
+ lat = latency_stats['latency']
+ if ooo != 0 or dup != 0 or stl != 0:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if (drops > self.allow_drop or sth != 0) and not self.drops_expected:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if tx_pkts != total_pkts:
+ pprint.pprint(flow_stats)
+ tmp = 'TX pkts mismatch - got: {0}, expected: {1}'.format(tx_pkts, total_pkts)
+ assert False, tmp
+
+ if tx_bytes != (total_pkts * pkt_len):
+ pprint.pprint(flow_stats)
+ tmp = 'TX bytes mismatch - got: {0}, expected: {1}'.format(tx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+ if abs(total_pkts - rx_pkts) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX pkts mismatch - got: {0}, expected: {1}'.format(rx_pkts, total_pkts)
+ assert False, tmp
+
+ if "rx_bytes" in self.cap:
+ rx_bytes = flow_stats['rx_bytes'].get(self.rx_port, 0)
+ if abs(rx_bytes / pkt_len - total_pkts ) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX bytes mismatch - got: {0}, expected: {1}'.format(rx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+
+ # RX itreation
+ def __rx_iteration (self, exp_list):
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port])
+ self.c.wait_on_traffic(ports = [self.tx_port])
+ stats = self.c.get_stats()
+
+ for exp in exp_list:
+ self.__verify_flow(exp['pg_id'], exp['total_pkts'], exp['pkt_len'], stats)
+
+
+ # one stream on TX --> RX
+ def test_one_stream(self):
+ total_pkts = self.total_pkts
+
+ try:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = self.rate_lat
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+
+ print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
+
+ exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
+
+ self.__rx_iteration( [exp] )
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_multiple_streams(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ num_latency_streams = 128
+ num_flow_stat_streams = 127
+ total_pkts = int(self.total_pkts / (num_latency_streams + num_flow_stat_streams))
+ if total_pkts == 0:
+ total_pkts = 1
+ percent = float(self.rate_lat) / (num_latency_streams + num_flow_stat_streams)
+
+ try:
+ streams = []
+ exp = []
+ # 10 identical streams
+ for pg_id in range(1, num_latency_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ for pg_id in range(num_latency_streams + 1, num_latency_streams + num_flow_stat_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ # add both streams to ports
+ self.c.add_streams(streams, ports = [self.tx_port])
+
+ self.__rx_iteration(exp)
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+ def test_1_stream_many_iterations (self):
+ total_pkts = self.total_pkts
+
+ try:
+ streams_data = [
+ {'name': 'Flow stat. No latency', 'pkt': self.pkt, 'lat': False},
+ {'name': 'Latency, no field engine', 'pkt': self.pkt, 'lat': True},
+ {'name': 'Latency, short packet with field engine', 'pkt': self.vm_pkt, 'lat': True},
+ {'name': 'Latency, large packet field engine', 'pkt': self.vm_large_pkt, 'lat': True}
+ ]
+ if self.latency_9k_enable:
+ streams_data.append({'name': 'Latency, 9k packet with field engine', 'pkt': self.vm_9k_pkt, 'lat': True})
+
+ if self.ipv6_support:
+ streams_data.append({'name': 'IPv6 flow stat. No latency', 'pkt': self.ipv6pkt, 'lat': False})
+ streams_data.append({'name': 'IPv6 latency, no field engine', 'pkt': self.ipv6pkt, 'lat': True})
+
+ streams = []
+ for data in streams_data:
+ if data['lat']:
+ flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
+ else:
+ flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
+
+ s = STLStream(name = data['name'],
+ packet = data['pkt'],
+ flow_stats = flow_stats,
+ mode = mode
+ )
+ streams.append(s)
+
+ print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
+ exp = {'pg_id': 5, 'total_pkts': total_pkts}
+
+ for stream in streams:
+ self.c.add_streams([stream], ports = [self.tx_port])
+ print("Stream: {0}".format(stream.name))
+ exp['pkt_len'] = stream.get_pkt_len()
+ for i in range(0, 10):
+ print("Iteration {0}".format(i))
+ self.__rx_iteration( [exp] )
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def __9k_stream(self,pgid,ports,precet,max_latency,avg_latency,duration,pkt_size):
+ my_pg_id=pgid
+ s_ports=ports;
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ if ports == None:
+ s_ports=all_ports
+ assert( type(s_ports)==list)
+
+ stream_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*pkt_size))
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = my_pg_id+pid),
+ mode = STLTXCont(pps = 1000))
+
+ s2 = STLStream(name = 'bulk',
+ packet = stream_pkt,
+ mode = STLTXCont(percentage =precet))
+
+
+ # add both streams to ports
+ self.c.add_streams([s1,s2], ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports,duration = duration)
+ self.c.wait_on_traffic(ports = s_ports,timeout = duration+10,rx_delay_ms = 100)
+ stats = self.c.get_stats()
+
+ for pid in s_ports:
+ latency_stats = stats['latency'].get(my_pg_id+pid)
+ #pprint.pprint(latency_stats)
+ if self.__verify_latency (latency_stats,max_latency,avg_latency) !=0:
+ return (ERROR_LATENCY_TOO_HIGH);
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+
+
+ # check low latency when you have stream of 9K stream
+ def test_9k_stream(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ if self.latency_9k_enable == False:
+ print("SKIP")
+ return
+
+ for i in range(0,5):
+ print("Iteration {0}".format(i));
+ duration=random.randint(10, 70);
+ pgid=random.randint(1, 65000);
+ pkt_size=random.randint(1000, 9000);
+ all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
+
+
+ s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
+ s_port=sorted(s_port)
+ if self.speed == 40 :
+ # the NIC does not support all full rate in case both port works let's filter odd ports
+ s_port=list(filter(lambda x: x % 2==0, s_port))
+ if len(s_port)==0:
+ s_port=[0];
+
+ error=1;
+ for j in range(0,5):
+ print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
+ if self.__9k_stream(pgid,
+ s_port,90,
+ self.latency_9k_max_latency,
+ self.latency_9k_max_average,
+ duration,
+ pkt_size)==0:
+ error=0;
+ break;
+
+ if error:
+ assert False , "Latency too high"
+ else:
+ print("===>Iteration {0} PASS {1}".format(i,j));
+
+
+ def check_stats (self,stats,a,b,err):
+ if a != b:
+ tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b)
+ pprint.pprint(stats)
+ assert False,tmp
+
+
+
+ def send_1_burst(self,from_port,is_latency,pkts):
+
+ pid = from_port
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = (60 - len(base_pkt)) * 'x'
+
+ stream_pkt = STLPktBuilder(pkt = base_pkt/pad)
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+
+ dpid = CTRexScenario.stl_ports_map['map'][pid]
+
+ s_ports =[pid]
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ if is_latency:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5 + pid),
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+ else:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+
+
+ # add both streams to ports
+ self.c.add_streams(s1, ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports)
+ self.c.wait_on_traffic(ports = s_ports)
+
+ stats = self.c.get_stats()
+
+ ips = stats[dpid]
+ ops = stats[pid]
+ tps = stats['total']
+ tbytes = pkts*64
+
+ self.check_stats (stats,ops["obytes"], tbytes,"ops[obytes]")
+ self.check_stats (stats,ops["opackets"], pkts,"ops[opackets]")
+
+ self.check_stats (stats,ips["ibytes"], tbytes,"ips[ibytes]")
+ self.check_stats (stats,ips["ipackets"], pkts,"ips[ipackets]")
+
+ self.check_stats (stats,tps['ibytes'], tbytes,"tps[ibytes]")
+ self.check_stats (stats,tps['obytes'], tbytes,"tps[obytes]")
+ self.check_stats (stats,tps['ipackets'], pkts,"tps[ipackets]")
+ self.check_stats (stats,tps['opackets'], pkts,"tps[opackets]")
+
+ if is_latency:
+ ls=stats['flow_stats'][5+ pid]
+ self.check_stats (stats,ls['rx_pkts']['total'], pkts,"ls['rx_pkts']['total']")
+ self.check_stats (stats,ls['rx_pkts'][dpid], pkts,"ls['rx_pkts'][dpid]")
+
+ self.check_stats (stats,ls['tx_pkts']['total'], pkts,"ls['tx_pkts']['total']")
+ self.check_stats (stats,ls['tx_pkts'][pid], pkts,"ls['tx_pkts'][pid]")
+
+ self.check_stats (stats,ls['tx_bytes']['total'], tbytes,"ls['tx_bytes']['total']")
+ self.check_stats (stats,ls['tx_bytes'][pid], tbytes,"ls['tx_bytes'][pid]")
+
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_fcs_stream(self):
+ """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes"""
+
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ for port in all_ports:
+ for l in [True,False]:
+ print(" test port {0} latency : {1} ".format(port,l))
+ self.send_1_burst(port,l,100)
+
+
+ # this test adds more and more latency streams and re-test with incremental
+ def test_incremental_latency_streams (self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ total_pkts = self.total_pkts
+ percent = 0.5
+
+ try:
+ # We run till maximum streams allowed. At some point, expecting drops, because rate is too high.
+ # then run with less streams again, to see that system is still working.
+ for num_iter in [128, 5]:
+ exp = []
+ for i in range(1, num_iter):
+ # mix small and large packets
+ if i % 2 != 0:
+ my_pkt = self.pkt
+ else:
+ my_pkt = self.large_pkt
+ s1 = STLStream(name = 'rx',
+ packet = my_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = i),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = percent
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+ total_percent = i * percent
+ if total_percent > self.rate_lat:
+ self.drops_expected = True
+ else:
+ self.drops_expected = False
+
+ print("port {0} : {1} streams at {2}% of line rate\n".format(self.tx_port, i, total_percent))
+
+ exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()})
+
+ self.__rx_iteration( exp )
+
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..14ef36f7
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -0,0 +1,39 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+@attr('client_package')
+class CTRexClientPKG_Test(CStlGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+ # examples connect by their own
+ if CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
+
+ def tearDown(self):
+ # connect back at end of tests
+ if not CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.connect()
+ CStlGeneral_Test.tearDown(self)
+
+ def run_client_package_stl_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stl/examples',
+ '$PYTHON stl_imix.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, stdout, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, [return_code, stdout, stderr]))
+
+ def test_client_python2(self):
+ self.run_client_package_stl_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stl_example(python_version = 'python3')
diff --git a/scripts/automation/regression/test_pcaps/pcap_dual_test.erf b/scripts/automation/regression/test_pcaps/pcap_dual_test.erf
new file mode 100644
index 00000000..26b0b6b4
--- /dev/null
+++ b/scripts/automation/regression/test_pcaps/pcap_dual_test.erf
Binary files differ
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
new file mode 100644
index 00000000..7b96f2f8
--- /dev/null
+++ b/scripts/automation/regression/trex.py
@@ -0,0 +1,457 @@
+#!/router/bin/python
+
+import os
+import sys
+import subprocess
+import misc_methods
+import re
+import signal
+import time
+from CProgressDisp import TimedProgressBar
+from stateful_tests.tests_exceptions import TRexInUseError
+import datetime
+
+class CTRexScenario:
+ modes = set() # list of modes of this setup: loopback, virtual etc.
+ server_logs = False
+ is_test_list = False
+ is_init = False
+ is_stl_init = False
+ trex_crashed = False
+ configuration = None
+ trex = None
+ stl_trex = None
+ stl_ports_map = None
+ stl_init_error = None
+ router = None
+ router_cfg = None
+ daemon_log_lines = 0
+ setup_name = None
+ setup_dir = None
+ router_image = None
+ trex_version = None
+ scripts_path = None
+ benchmark = None
+ report_dir = 'reports'
+ # logger = None
+ test_types = {'functional_tests': [], 'stateful_tests': [], 'stateless_tests': []}
+ is_copied = False
+ GAManager = None
+ no_daemon = False
+ debug_image = False
+ test = None
+ json_verbose = False
+
+class CTRexRunner:
+ """This is an instance for generating a CTRexRunner"""
+
+ def __init__ (self, config_dict, yaml):
+ self.trex_config = config_dict#misc_methods.load_config_file(config_file)
+ self.yaml = yaml
+
+
+ def get_config (self):
+ """ get_config() -> dict
+
+ Returns the stored configuration of the TRex server of the CTRexRunner instance as a dictionary
+ """
+ return self.trex_config
+
+ def set_yaml_file (self, yaml_path):
+ """ update_yaml_file (self, yaml_path) -> None
+
+ Defines the yaml file to be used by the TRex.
+ """
+ self.yaml = yaml_path
+
+
+ def generate_run_cmd (self, multiplier, cores, duration, nc = True, export_path="/tmp/trex.txt", **kwargs):
+ """ generate_run_cmd(self, multiplier, duration, export_path) -> str
+
+ Generates a custom running command for the kick-off of the TRex traffic generator.
+ Returns a command (string) to be issued on the trex server
+
+ Parameters
+ ----------
+ multiplier : float
+ Defines the TRex multiplier factor (platform dependant)
+ duration : int
+ Defines the duration of the test
+ export_path : str
+ a full system path to which the results of the trex-run will be logged.
+
+ """
+ fileName, fileExtension = os.path.splitext(self.yaml)
+ if self.yaml == None:
+ raise ValueError('TRex yaml file is not defined')
+ elif fileExtension != '.yaml':
+ raise TypeError('yaml path is not referencing a .yaml file')
+
+ if 'results_file_path' in kwargs:
+ export_path = kwargs['results_file_path']
+
+ trex_cmd_str = './t-rex-64 -c %d -m %f -d %d -f %s '
+
+ if nc:
+ trex_cmd_str = trex_cmd_str + ' --nc '
+
+ trex_cmd = trex_cmd_str % (cores,
+ multiplier,
+ duration,
+ self.yaml)
+ # self.trex_config['trex_latency'])
+
+ for key, value in kwargs.items():
+ tmp_key = key.replace('_','-')
+ dash = ' -' if (len(key)==1) else ' --'
+ if value == True:
+ trex_cmd += (dash + tmp_key)
+ else:
+ trex_cmd += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
+
+ print("\nTRex COMMAND: ", trex_cmd)
+
+ cmd = 'sshpass.exp %s %s root "cd %s; %s > %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
+ self.trex_config['trex_version_path'],
+ trex_cmd,
+ export_path)
+
+ return cmd;
+
+ def generate_fetch_cmd (self, result_file_full_path="/tmp/trex.txt"):
+ """ generate_fetch_cmd(self, result_file_full_path) -> str
+
+ Generates a custom command for which will enable to fetch the resutls of the TRex run.
+ Returns a command (string) to be issued on the trex server.
+
+ Example use: fetch_trex_results() - command that will fetch the content from the default log file- /tmp/trex.txt
+ fetch_trex_results("/tmp/trex_secondary_file.txt") - command that will fetch the content from a custom log file- /tmp/trex_secondary_file.txt
+ """
+ #dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
+ script_running_dir = os.path.dirname(os.path.realpath(__file__)) # get the current script working directory so that the sshpass could be accessed.
+ cmd = script_running_dir + '/sshpass.exp %s %s root "cat %s"' % (self.trex_config['trex_password'],
+ self.trex_config['trex_name'],
+ result_file_full_path);
+ return cmd;
+
+
+
+ def run (self, multiplier, cores, duration, **kwargs):
+ """ run(self, multiplier, duration, results_file_path) -> CTRexResults
+
+ Running the TRex server based on the config file.
+ Returns a CTRexResults object containing the results of the run.
+
+ Parameters
+ ----------
+ multiplier : float
+ Defines the TRex multiplier factor (platform dependant)
+ duration : int
+ Defines the duration of the test
+ results_file_path : str
+ a full system path to which the results of the trex-run will be logged and fetched from.
+
+ """
+ tmp_path = None
+ # print kwargs
+ if 'export_path' in kwargs:
+ tmp_path = kwargs['export_path']
+ del kwargs['export_path']
+ cmd = self.generate_run_cmd(multiplier, cores, duration, tmp_path, **kwargs)
+ else:
+ cmd = self.generate_run_cmd(multiplier, cores, duration, **kwargs)
+
+# print 'TRex complete command to be used:'
+# print cmd
+ # print kwargs
+
+ progress_thread = TimedProgressBar(duration)
+ progress_thread.start()
+ interrupted = False
+ try:
+ start_time = time.time()
+ start = datetime.datetime.now()
+ results = subprocess.call(cmd, shell = True, stdout = open(os.devnull, 'wb'))
+ end_time = time.time()
+ fin = datetime.datetime.now()
+ # print "Time difference : ", fin-start
+ runtime_deviation = abs(( (end_time - start_time)/ (duration+15) ) - 1)
+ print("runtime_deviation: %2.0f %%" % ( runtime_deviation*100.0))
+ if ( runtime_deviation > 0.6 ) :
+ # If the run stopped immediately - classify as Trex in use or reachability issue
+ interrupted = True
+ if ((end_time - start_time) < 2):
+ raise TRexInUseError ('TRex run failed since TRex is used by another process, or due to reachability issues')
+ else:
+ CTRexScenario.trex_crashed = True
+ # results = subprocess.Popen(cmd, stdout = open(os.devnull, 'wb'),
+ # shell=True, preexec_fn=os.setsid)
+ except KeyboardInterrupt:
+ print("\nTRex test interrupted by user during traffic generation!!")
+ results.killpg(results.pid, signal.SIGTERM) # Send the kill signal to all the process groups
+ interrupted = True
+ raise RuntimeError
+ finally:
+ progress_thread.join(isPlannedStop = (not interrupted) )
+
+ if results!=0:
+ sys.stderr.write("TRex run failed. Please Contact trex-dev mailer for further details")
+ sys.stderr.flush()
+ return None
+ elif interrupted:
+ sys.stderr.write("TRex run failed due user-interruption.")
+ sys.stderr.flush()
+ return None
+ else:
+
+ if tmp_path:
+ cmd = self.generate_fetch_cmd( tmp_path )#**kwargs)#results_file_path)
+ else:
+ cmd = self.generate_fetch_cmd()
+
+ try:
+ run_log = subprocess.check_output(cmd, shell = True)
+ trex_result = CTRexResult(None, run_log)
+ trex_result.load_file_lines()
+ trex_result.parse()
+
+ return trex_result
+
+ except subprocess.CalledProcessError:
+ sys.stderr.write("TRex result fetching failed. Please Contact trex-dev mailer for further details")
+ sys.stderr.flush()
+ return None
+
+class CTRexResult():
+ """This is an instance for generating a CTRexResult"""
+ def __init__ (self, file, buffer = None):
+ self.file = file
+ self.buffer = buffer
+ self.result = {}
+
+
+ def load_file_lines (self):
+ """ load_file_lines(self) -> None
+
+ Loads into the self.lines the content of self.file
+ """
+ if self.buffer:
+ self.lines = self.buffer.split("\n")
+ else:
+ f = open(self.file,'r')
+ self.lines = f.readlines()
+ f.close()
+
+
+ def dump (self):
+ """ dump(self) -> None
+
+ Prints nicely the content of self.result dictionary into the screen
+ """
+ for key, value in self.result.items():
+ print("{0:20} : \t{1}".format(key, float(value)))
+
+ def update (self, key, val, _str):
+ """ update (self, key, val, _str) -> None
+
+ Updates the self.result[key] with a possibly new value representation of val
+ Example: 15K might be updated into 15000.0
+
+ Parameters
+ ----------
+ key :
+ Key of the self.result dictionary of the TRexResult instance
+ val : float
+ Key of the self.result dictionary of the TRexResult instance
+ _str : str
+ a represntation of the BW (.
+
+ """
+
+ s = _str.strip()
+
+ if s[0]=="G":
+ val = val*1E9
+ elif s[0]=="M":
+ val = val*1E6
+ elif s[0]=="K":
+ val = val*1E3
+
+ if key in self.result:
+ if self.result[key] > 0:
+ if (val/self.result[key] > 0.97 ):
+ self.result[key]= val
+ else:
+ self.result[key] = val
+ else:
+ self.result[key] = val
+
+
+
+ def parse (self):
+ """ parse(self) -> None
+
+ Parse the content of the result file from the TRex test and upload the data into
+ """
+ stop_read = False
+ d = {
+ 'total-tx' : 0,
+ 'total-rx' : 0,
+ 'total-pps' : 0,
+ 'total-cps' : 0,
+
+ 'expected-pps' : 0,
+ 'expected-cps' : 0,
+ 'expected-bps' : 0,
+ 'active-flows' : 0,
+ 'open-flows' : 0
+ }
+
+ self.error = ""
+
+ # Parse the output of the test, line by line (each line matches another RegEx and as such
+ # different rules apply
+ for line in self.lines:
+ match = re.match(".*/var/run/.rte_config.*", line)
+ if match:
+ stop_read = True
+ continue
+
+ #Total-Tx : 462.42 Mbps Nat_time_out : 0 ==> we try to parse the next decimal in this case Nat_time_out
+# match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)\W*\w+\W+(\w+)\W*([:]|[=])\W*(\d+)(.*)", line);
+# if match:
+# key = misc_methods.mix_string(match.group(5))
+# val = float(match.group(7))
+# # continue to parse !! we try the second
+# self.result[key] = val #update latest
+
+ # check if we need to stop reading
+ match = re.match(".*latency daemon has stopped.*", line)
+ if match:
+ stop_read = True
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*ps)\s+(\w+)\W*([:]|[=])\W*(\d+)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ if key in d:
+ if stop_read == False:
+ self.update (key, val, match.group(5))
+ else:
+ self.result[key] = val # update latest
+ key2 = misc_methods.mix_string(match.group(6))
+ val2 = int(match.group(8))
+ self.result[key2] = val2 # always take latest
+
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+[.]\d+)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ if key in d:
+ if stop_read == False:
+ self.update (key, val, match.group(5))
+ else:
+ self.result[key] = val # update latest
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(\d+)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(4))
+ self.result[key] = val #update latest
+ continue
+
+ match = re.match("\W*(\w(\w|[-])+)\W*([:]|[=])\W*(OK)(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = 0 # valid
+ self.result[key] = val #update latest
+ continue
+
+ match = re.match("\W*(Cpu Utilization)\W*([:]|[=])\W*(\d+[.]\d+) %(.*)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ val = float(match.group(3))
+ if key in self.result:
+ if (self.result[key] < val): # update only if larger than previous value
+ self.result[key] = val
+ else:
+ self.result[key] = val
+ continue
+
+ match = re.match(".*(rx_check\s.*)\s+:\s+(\w+)", line)
+ if match:
+ key = misc_methods.mix_string(match.group(1))
+ try:
+ val = int(match.group(2))
+ except ValueError: # corresponds with rx_check validation case
+ val = match.group(2)
+ finally:
+ self.result[key] = val
+ continue
+
+
+ def get_status (self, drop_expected = False):
+ if (self.error != ""):
+ print(self.error)
+ return (self.STATUS_ERR_FATAL)
+
+ d = self.result
+
+ # test for latency
+ latency_limit = 5000
+ if ( d['maximum-latency'] > latency_limit ):
+ self.reason="Abnormal latency measured (higher than %s" % latency_limit
+ return self.STATUS_ERR_LATENCY
+
+ # test for drops
+ if drop_expected == False:
+ if ( d['total-pkt-drop'] > 0 ):
+ self.reason=" At least one packet dropped "
+ return self.STATUS_ERR_DROP
+
+ # test for rx/tx distance
+ rcv_vs_tx = d['total-tx']/d['total-rx']
+ if ( (rcv_vs_tx >1.2) or (rcv_vs_tx <0.9) ):
+ self.reason="rx and tx should be close"
+ return self.STATUS_ERR_RX_TX_DISTANCE
+
+ # expected measurement
+ expect_vs_measued=d['total-tx']/d['expected-bps']
+ if ( (expect_vs_measued >1.1) or (expect_vs_measued < 0.9) ) :
+ print(expect_vs_measued)
+ print(d['total-tx'])
+ print(d['expected-bps'])
+ self.reason="measure is not as expected"
+ return self.STATUS_ERR_BAD_EXPECTED_MEASUREMENT
+
+ if ( d['latency-any-error'] !=0 ):
+ self.reason=" latency-any-error has error"
+ return self.STATUS_ERR_LATENCY_ANY_ERROR
+
+ return self.STATUS_OK
+
+ # return types
+ STATUS_OK = 0
+ STATUS_ERR_FATAL = 1
+ STATUS_ERR_LATENCY = 2
+ STATUS_ERR_DROP = 3
+ STATUS_ERR_RX_TX_DISTANCE = 4
+ STATUS_ERR_BAD_EXPECTED_MEASUREMENT = 5,
+ STATUS_ERR_LATENCY_ANY_ERROR = 6
+
+def test_TRex_result_parser():
+ t=CTRexResult('trex.txt');
+ t.load_file_lines()
+ t.parse()
+ print(t.result)
+
+
+
+
+if __name__ == "__main__":
+ #test_TRex_result_parser();
+ pass
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
new file mode 100755
index 00000000..daa1abaf
--- /dev/null
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -0,0 +1,437 @@
+#!/usr/bin/env python
+
+__copyright__ = "Copyright 2014"
+
+"""
+Name:
+ trex_unit_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the TRex traffic generator
+ The tested scenario is a TRex TG directly connected to a Cisco router.
+
+::
+
+ Topology:
+
+ ------- --------
+ | | Tx---1gig/10gig----Rx | |
+ | TRex | | router |
+ | | Rx---1gig/10gig----Tx | |
+ ------- --------
+
+"""
+
+import os
+import sys
+import outer_packages
+
+import nose
+from nose.plugins import Plugin
+from nose.selector import Selector
+import CustomLogger
+import misc_methods
+from rednose import RedNose
+import termstyle
+from trex import CTRexScenario
+from trex_stf_lib.trex_client import *
+from trex_stf_lib.trex_exceptions import *
+from trex_stl_lib.api import *
+from trex_stl_lib.utils.GAObjClass import GAmanager_Regression
+import trex
+import socket
+from pprint import pprint
+import time
+from distutils.dir_util import mkpath
+
+# nose overrides
+
+# option to select wanted test by name without file, class etc.
+def new_Selector_wantMethod(self, method, orig_Selector_wantMethod = Selector.wantMethod):
+ result = orig_Selector_wantMethod(self, method)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(method, '__name__', ''))
+
+Selector.wantMethod = new_Selector_wantMethod
+
+def new_Selector_wantFunction(self, function, orig_Selector_wantFunction = Selector.wantFunction):
+ result = orig_Selector_wantFunction(self, function)
+ return result and (not CTRexScenario.test or CTRexScenario.test in getattr(function, '__name__', ''))
+
+Selector.wantFunction = new_Selector_wantFunction
+
+# override nose's strange representation of setUpClass errors
+def __suite_repr__(self):
+ if hasattr(self.context, '__module__'): # inside class, setUpClass etc.
+ class_repr = nose.suite._strclass(self.context)
+ else: # outside of class, setUpModule etc.
+ class_repr = nose.suite._strclass(self.__class__)
+ return '%s.%s' % (class_repr, getattr(self.context, '__name__', self.context))
+
+nose.suite.ContextSuite.__repr__ = __suite_repr__
+nose.suite.ContextSuite.__str__ = __suite_repr__
+
+# /nose overrides
+
+def check_trex_path(trex_path):
+ if os.path.isfile('%s/trex_daemon_server' % trex_path):
+ return os.path.abspath(trex_path)
+
+def check_setup_path(setup_path):
+ if os.path.isfile('%s/config.yaml' % setup_path):
+ return os.path.abspath(setup_path)
+
+
+def get_trex_path():
+ latest_build_path = check_trex_path(os.getenv('TREX_UNDER_TEST')) # TREX_UNDER_TEST is env var pointing to <trex-core>/scripts
+ if not latest_build_path:
+ latest_build_path = check_trex_path(os.path.join(os.pardir, os.pardir))
+ if not latest_build_path:
+ raise Exception('Could not determine trex_under_test folder, try setting env.var. TREX_UNDER_TEST')
+ return latest_build_path
+
+
+def address_to_ip(address):
+ for i in range(5):
+ try:
+ return socket.gethostbyname(address)
+ except:
+ continue
+ return socket.gethostbyname(address)
+
+
+class CTRexTestConfiguringPlugin(Plugin):
+ def options(self, parser, env = os.environ):
+ super(CTRexTestConfiguringPlugin, self).options(parser, env)
+ parser.add_option('--cfg', '--trex-scenario-config', action='store',
+ dest='config_path',
+ help='Specify path to folder with config.yaml and benchmark.yaml')
+ parser.add_option('--skip-clean', '--skip_clean', action='store_true',
+ dest='skip_clean_config',
+ help='Skip the clean configuration replace on the platform.')
+ parser.add_option('--load-image', '--load_image', action='store_true', default = False,
+ dest='load_image',
+ help='Install image specified in config file on router.')
+ parser.add_option('--log-path', '--log_path', action='store',
+ dest='log_path',
+ help='Specify path for the tests` log to be saved at. Once applied, logs capturing by nose will be disabled.') # Default is CURRENT/WORKING/PATH/trex_log/trex_log.log')
+ parser.add_option('--json-verbose', '--json_verbose', action="store_true", default = False,
+ dest="json_verbose",
+ help="Print JSON-RPC commands.")
+ parser.add_option('--telnet-verbose', '--telnet_verbose', action="store_true", default = False,
+ dest="telnet_verbose",
+ help="Print telnet commands and responces.")
+ parser.add_option('--server-logs', '--server_logs', action="store_true", default = False,
+ dest="server_logs",
+ help="Print server side (TRex and trex_daemon) logs per test.")
+ parser.add_option('--kill-running', '--kill_running', action="store_true", default = False,
+ dest="kill_running",
+ help="Kills running TRex process on remote server (useful for regression).")
+ parser.add_option('--func', '--functional', action="store_true", default = False,
+ dest="functional",
+ help="Run functional tests.")
+ parser.add_option('--stl', '--stateless', action="store_true", default = False,
+ dest="stateless",
+ help="Run stateless tests.")
+ parser.add_option('--stf', '--stateful', action="store_true", default = False,
+ dest="stateful",
+ help="Run stateful tests.")
+ parser.add_option('--pkg', action="store",
+ dest="pkg",
+ help="Run with given TRex package. Make sure the path available at server machine.")
+ parser.add_option('--collect', action="store_true", default = False,
+ dest="collect",
+ help="Alias to --collect-only.")
+ parser.add_option('--warmup', action="store_true", default = False,
+ dest="warmup",
+ help="Warm up the system for stateful: run 30 seconds 9k imix test without check of results.")
+ parser.add_option('--test-client-package', '--test_client_package', action="store_true", default = False,
+ dest="test_client_package",
+ help="Includes tests of client package.")
+ parser.add_option('--long', action="store_true", default = False,
+ dest="long",
+ help="Flag of long tests (stability).")
+ parser.add_option('--ga', action="store_true", default = False,
+ dest="ga",
+ help="Flag to send benchmarks to GA.")
+ parser.add_option('--no-daemon', action="store_true", default = False,
+ dest="no_daemon",
+ help="Flag that specifies to use running stl server, no need daemons.")
+ parser.add_option('--debug-image', action="store_true", default = False,
+ dest="debug_image",
+ help="Flag that specifies to use t-rex-64-debug as TRex executable.")
+ parser.add_option('--trex-args', action='store', default = '',
+ dest="trex_args",
+ help="Additional TRex arguments (--no-watchdog etc.).")
+ parser.add_option('-t', '--test', action='store', default = '', dest='test',
+ help='Test name to run (without file, class etc.)')
+
+
+ def configure(self, options, conf):
+ self.collect_only = options.collect_only
+ self.functional = options.functional
+ self.stateless = options.stateless
+ self.stateful = options.stateful
+ self.pkg = options.pkg
+ self.json_verbose = options.json_verbose
+ self.telnet_verbose = options.telnet_verbose
+ self.no_daemon = options.no_daemon
+ CTRexScenario.test = options.test
+ if self.collect_only or self.functional:
+ return
+ if CTRexScenario.setup_dir and options.config_path:
+ raise Exception('Please either define --cfg or use env. variable SETUP_DIR, not both.')
+ if not options.config_path and CTRexScenario.setup_dir:
+ options.config_path = CTRexScenario.setup_dir
+ if not options.config_path:
+ raise Exception('Please specify path to config.yaml using --cfg parameter or env. variable SETUP_DIR')
+ options.config_path = options.config_path.rstrip('/')
+ CTRexScenario.setup_name = os.path.basename(options.config_path)
+ self.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml'))
+ self.configuration.trex['trex_name'] = address_to_ip(self.configuration.trex['trex_name']) # translate hostname to ip
+ self.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml'))
+ self.enabled = True
+ self.modes = self.configuration.trex.get('modes', [])
+ self.kill_running = options.kill_running
+ self.load_image = options.load_image
+ self.clean_config = False if options.skip_clean_config else True
+ self.server_logs = options.server_logs
+ if options.log_path:
+ self.loggerPath = options.log_path
+ # initialize CTRexScenario global testing class, to be used by all tests
+ CTRexScenario.configuration = self.configuration
+ CTRexScenario.no_daemon = options.no_daemon
+ CTRexScenario.benchmark = self.benchmark
+ CTRexScenario.modes = set(self.modes)
+ CTRexScenario.server_logs = self.server_logs
+ CTRexScenario.debug_image = options.debug_image
+ CTRexScenario.json_verbose = self.json_verbose
+ if not self.no_daemon:
+ CTRexScenario.trex = CTRexClient(trex_host = self.configuration.trex['trex_name'],
+ verbose = self.json_verbose,
+ debug_image = options.debug_image,
+ trex_args = options.trex_args)
+ if not CTRexScenario.trex.check_master_connectivity():
+ print('Could not connect to master daemon')
+ sys.exit(-1)
+ if options.ga and CTRexScenario.setup_name:
+ CTRexScenario.GAManager = GAmanager_Regression(GoogleID = 'UA-75220362-3',
+ AnalyticsUserID = CTRexScenario.setup_name,
+ QueueSize = 100,
+ Timeout = 3, # seconds
+ UserPermission = 1,
+ BlockingMode = 0,
+ appName = 'TRex',
+ appVer = CTRexScenario.trex_version)
+
+
+ def begin (self):
+ client = CTRexScenario.trex
+ if self.pkg and not CTRexScenario.is_copied:
+ if client.master_daemon.is_trex_daemon_running() and client.get_trex_cmds() and not self.kill_running:
+ print("Can't update TRex, it's running")
+ sys.exit(-1)
+ print('Updating TRex to %s' % self.pkg)
+ if not client.master_daemon.update_trex(self.pkg):
+ print('Failed updating TRex')
+ sys.exit(-1)
+ else:
+ print('Updated')
+ CTRexScenario.is_copied = True
+ if self.functional or self.collect_only:
+ return
+ if not self.no_daemon:
+ print('Restarting TRex daemon server')
+ res = client.restart_trex_daemon()
+ if not res:
+ print('Could not restart TRex daemon server')
+ sys.exit(-1)
+ print('Restarted.')
+
+ if self.kill_running:
+ client.kill_all_trexes()
+ else:
+ if client.get_trex_cmds():
+ print('TRex is already running')
+ sys.exit(-1)
+
+ if 'loopback' not in self.modes:
+ CTRexScenario.router_cfg = dict(config_dict = self.configuration.router,
+ forceImageReload = self.load_image,
+ silent_mode = not self.telnet_verbose,
+ forceCleanConfig = self.clean_config,
+ tftp_config_dict = self.configuration.tftp)
+ try:
+ CustomLogger.setup_custom_logger('TRexLogger', self.loggerPath)
+ except AttributeError:
+ CustomLogger.setup_custom_logger('TRexLogger')
+
+ def finalize(self, result):
+ if self.functional or self.collect_only:
+ return
+ #CTRexScenario.is_init = False
+ if self.stateful:
+ CTRexScenario.trex = None
+ if self.stateless:
+ if self.no_daemon:
+ if CTRexScenario.stl_trex and CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+ else:
+ CTRexScenario.trex.force_kill(False)
+ CTRexScenario.stl_trex = None
+
+
+def save_setup_info():
+ try:
+ if CTRexScenario.setup_name and CTRexScenario.trex_version:
+ setup_info = ''
+ for key, value in CTRexScenario.trex_version.items():
+ setup_info += '{0:8}: {1}\n'.format(key, value)
+ cfg = CTRexScenario.configuration
+ setup_info += 'Server: %s, Modes: %s' % (cfg.trex.get('trex_name'), cfg.trex.get('modes'))
+ if cfg.router:
+ setup_info += '\nRouter: Model: %s, Image: %s' % (cfg.router.get('model'), CTRexScenario.router_image)
+ if CTRexScenario.debug_image:
+ setup_info += '\nDebug image: %s' % CTRexScenario.debug_image
+
+ with open('%s/report_%s.info' % (CTRexScenario.report_dir, CTRexScenario.setup_name), 'w') as f:
+ f.write(setup_info)
+ except Exception as err:
+ print('Error saving setup info: %s ' % err)
+
+
+if __name__ == "__main__":
+
+ # setting defaults. By default we run all the test suite
+ specific_tests = False
+ CTRexScenario.report_dir = 'reports'
+ need_to_copy = False
+ setup_dir = os.getenv('SETUP_DIR', '').rstrip('/')
+ CTRexScenario.setup_dir = check_setup_path(setup_dir)
+ CTRexScenario.scripts_path = get_trex_path()
+ if not CTRexScenario.setup_dir:
+ CTRexScenario.setup_dir = check_setup_path(os.path.join('setups', setup_dir))
+
+
+ nose_argv = ['', '-s', '-v', '--exe', '--rednose', '--detailed-errors']
+ test_client_package = False
+ if '--test-client-package' in sys.argv:
+ test_client_package = True
+
+ if '--collect' in sys.argv:
+ sys.argv.append('--collect-only')
+ if '--collect-only' in sys.argv: # this is a user trying simply to view the available tests. no need xunit.
+ CTRexScenario.is_test_list = True
+ xml_arg = ''
+ else:
+ xml_name = 'unit_test.xml'
+ if CTRexScenario.setup_dir:
+ CTRexScenario.setup_name = os.path.basename(CTRexScenario.setup_dir)
+ xml_name = 'report_%s.xml' % CTRexScenario.setup_name
+ xml_arg= '--xunit-file=%s/%s' % (CTRexScenario.report_dir, xml_name)
+ mkpath(CTRexScenario.report_dir)
+
+ sys_args = sys.argv[:]
+ for i, arg in enumerate(sys.argv):
+ if 'log-path' in arg:
+ nose_argv += ['--nologcapture']
+ else:
+ for tests_type in CTRexScenario.test_types.keys():
+ if tests_type in arg:
+ specific_tests = True
+ CTRexScenario.test_types[tests_type].append(arg[arg.find(tests_type):])
+ sys_args.remove(arg)
+
+ if not specific_tests:
+ for key in ('--func', '--functional'):
+ if key in sys_args:
+ CTRexScenario.test_types['functional_tests'].append('functional_tests')
+ sys_args.remove(key)
+ for key in ('--stf', '--stateful'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateful_tests'].append('stateful_tests')
+ sys_args.remove(key)
+ for key in ('--stl', '--stateless'):
+ if key in sys_args:
+ CTRexScenario.test_types['stateless_tests'].append('stateless_tests')
+ sys_args.remove(key)
+ # Run all of the tests or just the selected ones
+ if not sum([len(x) for x in CTRexScenario.test_types.values()]):
+ for key in CTRexScenario.test_types.keys():
+ CTRexScenario.test_types[key].append(key)
+
+ nose_argv += sys_args
+
+ addplugins = [RedNose(), CTRexTestConfiguringPlugin()]
+ result = True
+ try:
+ if len(CTRexScenario.test_types['functional_tests']):
+ additional_args = ['--func'] + CTRexScenario.test_types['functional_tests']
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_functional.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins)
+ if len(CTRexScenario.test_types['stateful_tests']):
+ additional_args = ['--stf']
+ if '--warmup' in sys.argv:
+ additional_args.append('stateful_tests/trex_imix_test.py:CTRexIMIX_Test.test_warm_up')
+ additional_args += CTRexScenario.test_types['stateful_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateful.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
+ if len(CTRexScenario.test_types['stateless_tests']):
+ additional_args = ['--stl', 'stateless_tests/stl_general_test.py:STLBasic_Test.test_connectivity'] + CTRexScenario.test_types['stateless_tests']
+ if not test_client_package:
+ additional_args.extend(['-a', '!client_package'])
+ if xml_arg:
+ additional_args += ['--with-xunit', xml_arg.replace('.xml', '_stateless.xml')]
+ result = nose.run(argv = nose_argv + additional_args, addplugins = addplugins) and result
+ #except Exception as e:
+ # result = False
+ # print(e)
+ finally:
+ save_setup_info()
+
+ if not CTRexScenario.is_test_list:
+ if result == True:
+ print(termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """))
+ sys.exit(0)
+ else:
+ print(termstyle.red("""
+ /\_/\
+ ( o.o )
+ > ^ <
+
+This cat is sad, test failed.
+ """))
+ sys.exit(-1)
+
+
+
+
+
+
+
+
+
+
+