summaryrefslogtreecommitdiffstats
path: root/scripts/automation
diff options
context:
space:
mode:
authorimarom <imarom@cisco.com>2017-01-22 16:20:45 +0200
committerimarom <imarom@cisco.com>2017-01-22 16:20:45 +0200
commit904eacd9be1230efb7ae0ab7997ec131b588ec8a (patch)
tree8e4bcd1b1a5f683efdb8f3eeb962acefc3201961 /scripts/automation
parentd2f1c8451e2e8ffc47b208f68f9b16697d706d60 (diff)
parentb81cdb6c2d6d118c1c346e7c8dae6a5e747d867d (diff)
Merge branch 'master' into capture
Signed-off-by: imarom <imarom@cisco.com> Conflicts: scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py src/main_dpdk.cpp
Diffstat (limited to 'scripts/automation')
-rwxr-xr-xscripts/automation/regression/CPlatform.py186
-rw-r--r--scripts/automation/regression/cfg/client_cfg_vlan.yaml31
-rw-r--r--scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml26
-rw-r--r--scripts/automation/regression/functional_tests/cpp_gtests_test.py13
-rw-r--r--scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcapbin0 -> 1842 bytes
-rw-r--r--scripts/automation/regression/functional_tests/stl_basic_tests.py134
-rwxr-xr-xscripts/automation/regression/interactive_platform.py33
-rwxr-xr-xscripts/automation/regression/outer_packages.py8
-rw-r--r--scripts/automation/regression/setups/kiwi02/benchmark.yaml26
-rw-r--r--scripts/automation/regression/setups/trex07/benchmark.yaml29
-rw-r--r--scripts/automation/regression/setups/trex08/benchmark.yaml23
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml12
-rw-r--r--scripts/automation/regression/setups/trex11/benchmark.yaml52
-rw-r--r--scripts/automation/regression/setups/trex25/benchmark.yaml10
-rw-r--r--scripts/automation/regression/stateful_tests/trex_client_cfg_test.py68
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py73
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_imix_test.py95
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_nbar_test.py24
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_benchmark_test.py82
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py65
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py2
-rw-r--r--scripts/automation/regression/trex.py15
-rw-r--r--scripts/automation/regression/trex_elk.py322
-rwxr-xr-xscripts/automation/regression/trex_unit_test.py230
-rw-r--r--scripts/automation/trex_control_plane/stf/examples/stf_active_flow.py7
-rwxr-xr-xscripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py54
-rwxr-xr-xscripts/automation/trex_control_plane/stl/console/trex_console.py21
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py93
-rwxr-xr-xscripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py5
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/ICMP echo request.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/ICMP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/TCP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/UDP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/ICMP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/TCP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/UDP.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/templates/TCP-SYN.trp1
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py9
-rw-r--r--scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py40
-rwxr-xr-xscripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py45
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py28
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py10
42 files changed, 1591 insertions, 288 deletions
diff --git a/scripts/automation/regression/CPlatform.py b/scripts/automation/regression/CPlatform.py
index 606235a6..332f700a 100755
--- a/scripts/automation/regression/CPlatform.py
+++ b/scripts/automation/regression/CPlatform.py
@@ -19,45 +19,82 @@ class CPlatform(object):
self.needed_image_path = None
self.tftp_cfg = None
self.config_history = { 'basic_if_config' : False, 'tftp_server_config' : False }
+ self.client_vlan = "100"
+ self.server_vlan = "200"
- def configure_basic_interfaces(self, mtu = 9050):
+ def configure_basic_interfaces(self, mtu = 9050, vlan=False):
cache = CCommandCache()
for dual_if in self.if_mngr.get_dual_if_list():
client_if_command_set = []
server_if_command_set = []
+ client_if_command_set_vlan = []
+ server_if_command_set_vlan = []
+
+ client_if_name = dual_if.client_if.get_name()
+ server_if_name = dual_if.server_if.get_name()
+
+ if vlan:
+ client_if_name_vlan = client_if_name + "." + self.client_vlan
+ server_if_name_vlan = server_if_name + "." + self.server_vlan
+ client_if_command_set_vlan.append('encapsulation dot1Q {vlan}'. format(vlan = self.client_vlan));
+ server_if_command_set_vlan.append('encapsulation dot1Q {vlan}'. format(vlan = self.server_vlan));
client_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.client_if.get_src_mac_addr()) )
client_if_command_set.append ('mtu %s' % mtu)
- client_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() ))
- client_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() ))
- cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ client_ip_command = 'ip address {ip} 255.255.255.0'.format( ip = dual_if.client_if.get_ipv4_addr() )
+ client_ipv6_command = 'ipv6 address {ip}/64'.format( ip = dual_if.client_if.get_ipv6_addr() )
+ if vlan:
+ client_if_command_set_vlan.append (client_ip_command)
+ client_if_command_set_vlan.append (client_ipv6_command)
+ else:
+ client_if_command_set.append (client_ip_command)
+ client_if_command_set.append (client_ipv6_command)
+
+ cache.add('IF', client_if_command_set, client_if_name)
+ if vlan:
+ cache.add('IF', client_if_command_set_vlan, client_if_name_vlan)
server_if_command_set.append ('mac-address {mac}'.format( mac = dual_if.server_if.get_src_mac_addr()) )
server_if_command_set.append ('mtu %s' % mtu)
- server_if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() ))
- server_if_command_set.append ('ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() ))
- cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
+ server_ip_command = 'ip address {ip} 255.255.255.0'.format( ip = dual_if.server_if.get_ipv4_addr() )
+ server_ipv6_command = 'ipv6 address {ip}/64'.format( ip = dual_if.server_if.get_ipv6_addr() )
+ if vlan:
+ server_if_command_set_vlan.append (server_ip_command)
+ server_if_command_set_vlan.append (server_ipv6_command)
+ else:
+ server_if_command_set.append (server_ip_command)
+ server_if_command_set.append (server_ipv6_command)
+
+ cache.add('IF', server_if_command_set, server_if_name)
+ if vlan:
+ cache.add('IF', server_if_command_set_vlan, server_if_name_vlan)
self.cmd_link.run_single_command(cache)
self.config_history['basic_if_config'] = True
-
-
- def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050):
+ def configure_basic_filtered_interfaces(self, intf_list, mtu = 9050, vlan = False):
cache = CCommandCache()
for intf in intf_list:
if_command_set = []
+ if_command_set_vlan = []
if_command_set.append ('mac-address {mac}'.format( mac = intf.get_src_mac_addr()) )
if_command_set.append ('mtu %s' % mtu)
- if_command_set.append ('ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() ))
- if_command_set.append ('ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() ))
+ ip_commands = ['ip address {ip} 255.255.255.0'.format( ip = intf.get_ipv4_addr() ),
+ 'ipv6 address {ip}/64'.format( ip = intf.get_ipv6_addr() )]
+ if vlan:
+ if_command_set_vlan.extend(ip_commands)
+ else:
+ if_command_set.extend(ip_commands)
cache.add('IF', if_command_set, intf.get_name())
+ if vlan:
+ if_name = intf.get_name() + '.' + (self.client_vlan if intf.is_client() else self.server_vlan)
+ cache.add('IF', if_command_set_vlan, if_name)
self.cmd_link.run_single_command(cache)
@@ -74,8 +111,9 @@ class CPlatform(object):
if i < 4:
continue
raise Exception('Could not load clean config, response: %s' % res)
+ break
- def config_pbr (self, mode = 'config'):
+ def config_pbr (self, mode = 'config', vlan = False):
idx = 1
unconfig_str = '' if mode=='config' else 'no '
@@ -93,30 +131,30 @@ class CPlatform(object):
if dual_if.is_duplicated():
# define the relevant VRF name
pre_commit_set.add('{mode}ip vrf {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
-
+
# assign VRF to interfaces, config interfaces with relevant route-map
client_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
- client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format(
+ client_if_command_set.append ('{mode}ip policy route-map {dup}_{p1}_to_{p2}'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
server_if_command_set.append ('{mode}ip vrf forwarding {dup}'.format( mode = unconfig_str, dup = dual_if.get_vrf_name()) )
- server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format(
+ server_if_command_set.append ('{mode}ip policy route-map {dup}_{p2}_to_{p1}'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
# config route-map routing
conf_t_command_set.append('{mode}route-map {dup}_{p1}_to_{p2} permit 10'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
if mode == 'config':
conf_t_command_set.append('set ip next-hop {next_hop}'.format(
next_hop = client_net_next_hop) )
conf_t_command_set.append('{mode}route-map {dup}_{p2}_to_{p1} permit 10'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
if mode == 'config':
conf_t_command_set.append('set ip next-hop {next_hop}'.format(
@@ -127,21 +165,21 @@ class CPlatform(object):
if dual_if.client_if.get_dest_mac():
conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
- next_hop = server_net_next_hop,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
dest_mac = dual_if.client_if.get_dest_mac()))
if dual_if.server_if.get_dest_mac():
conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
- mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
- next_hop = client_net_next_hop,
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
dest_mac = dual_if.server_if.get_dest_mac()))
else:
# config interfaces with relevant route-map
- client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format(
+ client_if_command_set.append ('{mode}ip policy route-map {p1}_to_{p2}'.format(
mode = unconfig_str,
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
- server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format(
+ server_if_command_set.append ('{mode}ip policy route-map {p2}_to_{p1}'.format(
mode = unconfig_str,
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
@@ -164,17 +202,22 @@ class CPlatform(object):
if dual_if.client_if.get_dest_mac():
conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- next_hop = server_net_next_hop,
+ next_hop = server_net_next_hop,
dest_mac = dual_if.client_if.get_dest_mac()))
if dual_if.server_if.get_dest_mac():
conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- next_hop = client_net_next_hop,
+ next_hop = client_net_next_hop,
dest_mac = dual_if.server_if.get_dest_mac()))
# assign generated config list to cache
- cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
- cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ client_if_name = dual_if.client_if.get_name()
+ server_if_name = dual_if.server_if.get_name()
+ if vlan:
+ client_if_name += "." + self.client_vlan
+ server_if_name += "." + self.server_vlan
+ cache.add('IF', server_if_command_set, server_if_name)
+ cache.add('IF', client_if_command_set, client_if_name)
cache.add('CONF', conf_t_command_set)
idx += 2
@@ -186,12 +229,12 @@ class CPlatform(object):
# deploy the configs (order is important!)
self.cmd_link.run_command( [pre_commit_cache, cache] )
if self.config_history['basic_if_config']:
- # in this case, duplicated interfaces will lose its ip address.
+ # in this case, duplicated interfaces will lose its ip address.
# re-config IPv4 addresses
- self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if(), vlan = vlan)
- def config_no_pbr (self):
- self.config_pbr(mode = 'unconfig')
+ def config_no_pbr (self, vlan = False):
+ self.config_pbr(mode = 'unconfig', vlan = vlan)
def config_static_routing (self, stat_route_obj, mode = 'config'):
@@ -241,13 +284,13 @@ class CPlatform(object):
conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
next_net = client_net,
dest_mask = stat_route_obj.client_mask,
next_hop = client_net_next_hop))
conf_t_command_set.append( "{mode}ip route vrf {dup} {next_net} {dest_mask} {next_hop}".format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
+ dup = dual_if.get_vrf_name(),
next_net = server_net,
dest_mask = stat_route_obj.server_mask,
next_hop = server_net_next_hop))
@@ -256,14 +299,14 @@ class CPlatform(object):
if dual_if.client_if.get_dest_mac():
conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
- next_hop = server_net_next_hop,
+ dup = dual_if.get_vrf_name(),
+ next_hop = server_net_next_hop,
dest_mac = dual_if.client_if.get_dest_mac()))
if dual_if.server_if.get_dest_mac():
conf_t_command_set.append('{mode}arp vrf {dup} {next_hop} {dest_mac} arpa'.format(
- mode = unconfig_str,
- dup = dual_if.get_vrf_name(),
- next_hop = client_net_next_hop,
+ mode = unconfig_str,
+ dup = dual_if.get_vrf_name(),
+ next_hop = client_net_next_hop,
dest_mac = dual_if.server_if.get_dest_mac()))
# assign generated interfaces config list to cache
@@ -286,12 +329,12 @@ class CPlatform(object):
if dual_if.client_if.get_dest_mac():
conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- next_hop = server_net_next_hop,
+ next_hop = server_net_next_hop,
dest_mac = dual_if.client_if.get_dest_mac()))
if dual_if.server_if.get_dest_mac():
conf_t_command_set.append('{mode}arp {next_hop} {dest_mac} arpa'.format(
mode = unconfig_str,
- next_hop = client_net_next_hop,
+ next_hop = client_net_next_hop,
dest_mac = dual_if.server_if.get_dest_mac()))
# bump up to the next client network address
@@ -309,9 +352,9 @@ class CPlatform(object):
# deploy the configs (order is important!)
self.cmd_link.run_command( [pre_commit_cache, cache] )
if self.config_history['basic_if_config']:
- # in this case, duplicated interfaces will lose its ip address.
+ # in this case, duplicated interfaces will lose its ip address.
# re-config IPv4 addresses
- self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if() )
+ self.configure_basic_filtered_interfaces(self.if_mngr.get_duplicated_if())
def config_no_static_routing (self, stat_route_obj = None):
@@ -424,7 +467,7 @@ class CPlatform(object):
def config_zbf (self, mode = 'config'):
cache = CCommandCache()
pre_commit_cache = CCommandCache()
- conf_t_command_set = []
+ conf_t_command_set = []
# toggle all duplicate interfaces down
self.toggle_duplicated_intf(action = 'down')
@@ -460,7 +503,7 @@ class CPlatform(object):
def config_no_zbf (self):
cache = CCommandCache()
- conf_t_command_set = []
+ conf_t_command_set = []
# define security zones and security service policy to be applied on the interfaces
conf_t_command_set.append('no zone-pair security in2out source z_in destination z_out')
@@ -485,7 +528,7 @@ class CPlatform(object):
# self.__toggle_interfaces(dup_ifs)
- def config_ipv6_pbr (self, mode = 'config'):
+ def config_ipv6_pbr (self, mode = 'config', vlan=False):
idx = 1
unconfig_str = '' if mode=='config' else 'no '
cache = CCommandCache()
@@ -496,7 +539,7 @@ class CPlatform(object):
for dual_if in self.if_mngr.get_dual_if_list():
client_if_command_set = []
server_if_command_set = []
-
+
client_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
server_net_next_hop = misc_methods.get_single_net_client_addr(dual_if.client_if.get_ipv6_addr(), {'7':1}, ip_type = 'ipv6' )
client_net_next_hop_v4 = misc_methods.get_single_net_client_addr(dual_if.server_if.get_ipv4_addr() )
@@ -510,22 +553,22 @@ class CPlatform(object):
prefix = 'ipv6_' + dual_if.get_vrf_name()
else:
prefix = 'ipv6'
-
+
# config interfaces with relevant route-map
- client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format(
+ client_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p1}_to_{p2}'.format(
mode = unconfig_str,
- pre = prefix,
+ pre = prefix,
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
- server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format(
+ server_if_command_set.append ('{mode}ipv6 policy route-map {pre}_{p2}_to_{p1}'.format(
mode = unconfig_str,
- pre = prefix,
+ pre = prefix,
p1 = 'p'+str(idx), p2 = 'p'+str(idx+1) ) )
# config global arp to interfaces net address and vrf
if dual_if.client_if.get_ipv6_dest_mac():
conf_t_command_set.append('{mode}ipv6 neighbor {next_hop} {intf} {dest_mac}'.format(
mode = unconfig_str,
- next_hop = server_net_next_hop,
+ next_hop = server_net_next_hop,
intf = dual_if.client_if.get_name(),
dest_mac = dual_if.client_if.get_ipv6_dest_mac()))
# For latency packets (which are IPv4), we need to configure also static ARP
@@ -561,17 +604,24 @@ class CPlatform(object):
conf_t_command_set.append('exit')
# assign generated config list to cache
- cache.add('IF', server_if_command_set, dual_if.server_if.get_name())
- cache.add('IF', client_if_command_set, dual_if.client_if.get_name())
+ client_if_name = dual_if.client_if.get_name()
+ server_if_name = dual_if.server_if.get_name()
+ if vlan:
+ client_if_name += "." + self.client_vlan
+ server_if_name += "." + self.server_vlan
+
+ cache.add('IF', server_if_command_set, server_if_name)
+ cache.add('IF', client_if_command_set, client_if_name)
+
idx += 2
cache.add('CONF', conf_t_command_set)
-
+
# deploy the configs (order is important!)
self.cmd_link.run_command( [cache] )
- def config_no_ipv6_pbr (self):
- self.config_ipv6_pbr(mode = 'unconfig')
+ def config_no_ipv6_pbr (self, vlan = False):
+ self.config_ipv6_pbr(mode = 'unconfig', vlan = vlan)
# show methods
def get_cpu_util (self):
@@ -679,7 +729,7 @@ class CPlatform(object):
parsed_info = CShowParser.parse_show_image_version(response)
self.running_image = parsed_info
return parsed_info
-
+
def check_image_existence (self, img_name):
""" check_image_existence(self, img_name) -> boolean
@@ -716,7 +766,7 @@ class CPlatform(object):
# tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_server_config
self.tftp_cfg = device_cfg_obj.get_tftp_info()
cache = CCommandCache()
-
+
command = "ip tftp source-interface {intf}".format( intf = device_cfg_obj.get_mgmt_interface() )
cache.add('CONF', command )
self.cmd_link.run_single_command(cache)
@@ -737,12 +787,12 @@ class CPlatform(object):
"""
if not self.check_image_existence(img_filename): # check if this image isn't already saved in platform
#tmp_tftp_config = external_tftp_config if external_tftp_config is not None else self.tftp_cfg
-
+
if self.config_history['tftp_server_config']: # make sure a TFTP configuration has been loaded
cache = CCommandCache()
if self.running_image is None:
self.get_running_image_details()
-
+
command = "copy tftp://{tftp_ip}/{img_path}/{image} bootflash:".format(
tftp_ip = self.tftp_cfg['ip_address'],
img_path = self.tftp_cfg['images_path'],
@@ -795,7 +845,7 @@ class CPlatform(object):
An image file to compare router running image
Compares image name to router running image, returns match result.
-
+
"""
if self.running_image is None:
self.get_running_image_details()
@@ -839,7 +889,7 @@ class CPlatform(object):
i = 0
sleep_time = 30 # seconds
- try:
+ try:
cache = CCommandCache()
cache.add('EXEC', ['reload','n\r','\r'] )
@@ -861,7 +911,7 @@ class CPlatform(object):
raise TimeoutError('Platform failed to reload after reboot for over {minutes} minutes!'.format(minutes = round(1 + i * sleep_time / 60)))
else:
i += 1
-
+
time.sleep(30)
self.reload_connection(device_cfg_obj)
progress_thread.join()
diff --git a/scripts/automation/regression/cfg/client_cfg_vlan.yaml b/scripts/automation/regression/cfg/client_cfg_vlan.yaml
new file mode 100644
index 00000000..db70e4e1
--- /dev/null
+++ b/scripts/automation/regression/cfg/client_cfg_vlan.yaml
@@ -0,0 +1,31 @@
+vlan: true
+#vlan: false
+
+groups:
+
+- ip_start : 16.0.0.1
+ ip_end : 16.0.1.255
+ initiator :
+ next_hop: 1.1.1.1
+ src_ip : 1.1.1.2
+ vlan : 100
+ responder :
+ next_hop: 1.1.2.1
+ src_ip : 1.1.2.2
+ vlan : 200
+
+ count : 1
+
+- ip_start : 17.0.0.1
+ ip_end : 17.0.1.255
+ initiator :
+ next_hop: 1.1.3.1
+ src_ip : 1.1.3.2
+ vlan : 100
+ responder :
+ next_hop: 1.1.4.1
+ src_ip : 1.1.4.2
+ vlan : 200
+
+ count : 1
+
diff --git a/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml b/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml
new file mode 100644
index 00000000..d6d24dbb
--- /dev/null
+++ b/scripts/automation/regression/cfg/client_cfg_vlan_mac.yaml
@@ -0,0 +1,26 @@
+vlan: true
+
+groups:
+
+- ip_start : 16.0.0.1
+ ip_end : 16.0.0.4
+ initiator :
+ dst_mac : "00:00:00:01:00:00"
+ vlan : 100
+ responder :
+ dst_mac : "00:00:00:02:00:00"
+ vlan : 200
+
+ count : 1
+
+- ip_start : 16.0.0.5
+ ip_end : 16.0.1.255
+ initiator :
+ dst_mac : "00:00:00:03:00:00"
+ vlan : 300
+ responder :
+ dst_mac : "00:00:00:04:00:00"
+ vlan : 400
+
+ count : 1
+
diff --git a/scripts/automation/regression/functional_tests/cpp_gtests_test.py b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
index 6535da84..a60b715a 100644
--- a/scripts/automation/regression/functional_tests/cpp_gtests_test.py
+++ b/scripts/automation/regression/functional_tests/cpp_gtests_test.py
@@ -4,6 +4,7 @@ import functional_general_test
from trex import CTRexScenario
import os, sys
from subprocess import Popen, STDOUT
+from stl_basic_tests import compare_caps
import shlex
import time
import errno
@@ -44,3 +45,15 @@ class CPP_Test(functional_general_test.CGeneralFunctional_Test):
print('Output:\n%s' % out)
if ret:
raise Exception('Non zero return status of Valgrind gtests (%s)' % ret)
+
+ def test_bp_sim_client_cfg(self):
+ print('')
+ cmd = './bp-sim-64 --pcap -f cap2/dns.yaml --client_cfg automation/regression/cfg/client_cfg_vlan_mac.yaml -o generated/bp_sim_dns_vlans_gen.pcap'
+ ret, out = run_command(os.path.join(CTRexScenario.scripts_path, cmd), cwd = CTRexScenario.scripts_path)
+ print('Output:\n%s' % out)
+ if ret:
+ raise Exception('Non zero return status of Valgrind gtests (%s)' % ret)
+
+ compare_caps(output = os.path.join(CTRexScenario.scripts_path, 'generated/bp_sim_dns_vlans_gen.pcap'),
+ golden = 'functional_tests/golden/bp_sim_dns_vlans.pcap')
+
diff --git a/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap b/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap
new file mode 100644
index 00000000..3dd4890c
--- /dev/null
+++ b/scripts/automation/regression/functional_tests/golden/bp_sim_dns_vlans.pcap
Binary files differ
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
index bc5bc4d5..aecbf6d1 100644
--- a/scripts/automation/regression/functional_tests/stl_basic_tests.py
+++ b/scripts/automation/regression/functional_tests/stl_basic_tests.py
@@ -25,6 +25,71 @@ import shlex
from threading import Thread
from collections import defaultdict
+
+def scapy_pkt_show_to_str (scapy_pkt):
+ capture = StringIO()
+ save_stdout = sys.stdout
+ sys.stdout = capture
+ scapy_pkt.show()
+ sys.stdout = save_stdout
+ return capture.getvalue()
+
+
+def compare_caps (output, golden, max_diff_sec = 0.000005):
+ pkts1 = []
+ pkts2 = []
+ pkts_ts_buckets = defaultdict(list)
+
+ for pkt in RawPcapReader(output):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts1.extend(sorted(pkts_ts_buckets[ts]))
+ pkts_ts_buckets.clear()
+
+ for pkt in RawPcapReader(golden):
+ ts = pkt[1][0] * 1e6 + pkt[1][1]
+ pkts_ts_buckets[ts].append(pkt)
+ # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
+ #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
+ for ts in sorted(pkts_ts_buckets.keys()):
+ pkts2.extend(sorted(pkts_ts_buckets[ts]))
+
+ assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
+
+ for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
+ ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
+ ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
+
+ if abs(ts1-ts2) > max_diff_sec: # 5 nsec
+ raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2))
+
+ if pkt1[0] != pkt2[0]:
+ errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i)
+ print(errmsg)
+
+ print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline'))
+
+ scapy_pkt1_info = scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n')
+ scapy_pkt2_info = scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n')
+
+ print(format_text("\nGot:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print(format_text("\nExpected:\n", 'bold', 'underline'))
+ for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info):
+ if line != ref:
+ print(format_text(line, 'bold'))
+
+ print("\n")
+ raise AssertionError(errmsg)
+
+
+
@attr('run_on_trex')
class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
def setUp (self):
@@ -73,69 +138,6 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
raise Exception("cannot find '{0}'".format(name))
- def scapy_pkt_show_to_str (self, scapy_pkt):
- capture = StringIO()
- save_stdout = sys.stdout
- sys.stdout = capture
- scapy_pkt.show()
- sys.stdout = save_stdout
- return capture.getvalue()
-
-
- def compare_caps (self, output, golden, max_diff_sec = 0.01):
- pkts1 = []
- pkts2 = []
- pkts_ts_buckets = defaultdict(list)
-
- for pkt in RawPcapReader(output):
- ts = pkt[1][0] * 1e6 + pkt[1][1]
- pkts_ts_buckets[ts].append(pkt)
- # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
- #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
- for ts in sorted(pkts_ts_buckets.keys()):
- pkts1.extend(sorted(pkts_ts_buckets[ts]))
- pkts_ts_buckets.clear()
-
- for pkt in RawPcapReader(golden):
- ts = pkt[1][0] * 1e6 + pkt[1][1]
- pkts_ts_buckets[ts].append(pkt)
- # don't take last ts bucket, it can be cut in middle and packets inside bucket might be different
- #for ts in sorted(pkts_ts_buckets.keys())[:-1]:
- for ts in sorted(pkts_ts_buckets.keys()):
- pkts2.extend(sorted(pkts_ts_buckets[ts]))
-
- assert_equal(len(pkts1), len(pkts2), 'Lengths of generated pcap (%s) and golden (%s) are different' % (output, golden))
-
- for pkt1, pkt2, i in zip(pkts1, pkts2, range(1, len(pkts1))):
- ts1 = float(pkt1[1][0]) + (float(pkt1[1][1]) / 1e6)
- ts2 = float(pkt2[1][0]) + (float(pkt2[1][1]) / 1e6)
-
- if abs(ts1-ts2) > 0.000005: # 5 nsec
- raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(output, golden, i, ts1, ts2))
-
- if pkt1[0] != pkt2[0]:
- errmsg = "RAW error: output file '{0}', differs from golden '{1}' in cap #{2}".format(output, golden, i)
- print(errmsg)
-
- print(format_text("\ndifferent fields for packet #{0}:".format(i), 'underline'))
-
- scapy_pkt1_info = self.scapy_pkt_show_to_str(Ether(pkt1[0])).split('\n')
- scapy_pkt2_info = self.scapy_pkt_show_to_str(Ether(pkt2[0])).split('\n')
-
- print(format_text("\nGot:\n", 'bold', 'underline'))
- for line, ref in zip(scapy_pkt1_info, scapy_pkt2_info):
- if line != ref:
- print(format_text(line, 'bold'))
-
- print(format_text("\nExpected:\n", 'bold', 'underline'))
- for line, ref in zip(scapy_pkt2_info, scapy_pkt1_info):
- if line != ref:
- print(format_text(line, 'bold'))
-
- print("\n")
- raise AssertionError(errmsg)
-
-
def run_sim (self, yaml, output, options = "", silent = False, obj = None, tunables = None):
if output:
user_cmd = "-f {0} -o {1} {2} -p {3}".format(yaml, output, options, self.scripts_path)
@@ -169,7 +171,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
tunables = None):
print('Testing profile: %s' % profile)
- output_cap = "a.pcap"
+ output_cap = "generated/a.pcap"
input_file = os.path.join('stl/', profile)
golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap');
if os.path.exists(output_cap):
@@ -186,7 +188,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
#os.system(s)
if compare:
- self.compare_caps(output_cap, golden_file)
+ compare_caps(output_cap, golden_file)
finally:
if not do_no_remove:
os.unlink(output_cap)
@@ -208,7 +210,7 @@ class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
assert_equal(rc, True, 'Simulation on profile %s (generated) failed.' % profile)
if compare:
- self.compare_caps(output_cap, golden_file)
+ compare_caps(output_cap, golden_file)
finally:
diff --git a/scripts/automation/regression/interactive_platform.py b/scripts/automation/regression/interactive_platform.py
index 10e89910..7a15bb0c 100755
--- a/scripts/automation/regression/interactive_platform.py
+++ b/scripts/automation/regression/interactive_platform.py
@@ -90,16 +90,31 @@ class InteractivePlatform(cmd.Cmd):
self.platform.configure_basic_interfaces()
print(termstyle.green("Basic interfaces configuration applied successfully."))
+ def do_basic_if_config_vlan(self, line):
+ """Apply basic interfaces configuartion with vlan to all platform interfaces"""
+ self.platform.configure_basic_interfaces(vlan = True)
+ print(termstyle.green("Basic VLAN interfaces configuration applied successfully."))
+
def do_pbr(self, line):
"""Apply IPv4 PBR configuration on all interfaces"""
self.platform.config_pbr()
print(termstyle.green("IPv4 PBR configuration applied successfully."))
+ def do_pbr_vlan(self, line):
+ """Apply IPv4 PBR configuration on all VLAN interfaces"""
+ self.platform.config_pbr(vlan = True)
+ print(termstyle.green("IPv4 VLAN PBR configuration applied successfully."))
+
def do_no_pbr(self, line):
"""Removes IPv4 PBR configuration from all interfaces"""
self.platform.config_no_pbr()
print(termstyle.green("IPv4 PBR configuration removed successfully."))
+ def do_no_pbr_vlan(self, line):
+ """Removes IPv4 PBR configuration from all VLAN interfaces"""
+ self.platform.config_no_pbr(vlan = True)
+ print(termstyle.green("IPv4 PBR VLAN configuration removed successfully."))
+
def do_nbar(self, line):
"""Apply NBAR PD configuration on all interfaces"""
self.platform.config_nbar_pd()
@@ -180,11 +195,21 @@ class InteractivePlatform(cmd.Cmd):
self.platform.config_ipv6_pbr()
print(termstyle.green("IPv6 PBR configuration applied successfully."))
+ def do_ipv6_pbr_vlan(self, line):
+ """Apply IPv6 PBR configuration on all vlan interfaces"""
+ self.platform.config_ipv6_pbr(vlan = True)
+ print(termstyle.green("IPv6 VLAN PBR configuration applied successfully."))
+
def do_no_ipv6_pbr(self, line):
"""Removes IPv6 PBR configuration from all interfaces"""
self.platform.config_no_ipv6_pbr()
print(termstyle.green("IPv6 PBR configuration removed successfully."))
+ def do_no_ipv6_pbr_vlan(self, line):
+ """Removes IPv6 PBR configuration from all VLAN interfaces"""
+ self.platform.config_no_ipv6_pbr(vlan = True)
+ print(termstyle.green("IPv6 VLAN PBR configuration removed successfully."))
+
def do_zbf(self, line):
"""Apply Zone-Based policy Firewall configuration on all interfaces"""
self.platform.config_zbf()
@@ -318,6 +343,14 @@ class InteractivePlatform(cmd.Cmd):
self.do_pbr('')
self.do_ipv6_pbr('')
+ def do_all_vlan(self, arg):
+ """Configures bundle of commands to set PBR routing using on vlan interfaces"""
+ self.do_load_clean('')
+ self.do_set_tftp_server('')
+ self.do_basic_if_config_vlan('')
+ self.do_pbr_vlan('')
+ self.do_ipv6_pbr_vlan('')
+
if __name__ == "__main__":
diff --git a/scripts/automation/regression/outer_packages.py b/scripts/automation/regression/outer_packages.py
index 61ddc5cd..b2839dee 100755
--- a/scripts/automation/regression/outer_packages.py
+++ b/scripts/automation/regression/outer_packages.py
@@ -1,7 +1,7 @@
#!/router/bin/python
-
import sys, site
import platform, os
+import pprint
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) # alternate use with: os.getcwd()
TREX_PATH = os.getenv('TREX_UNDER_TEST') # path to <trex-core>/scripts directory, env. variable TREX_UNDER_TEST should override it.
@@ -18,8 +18,11 @@ NIGHTLY_MODULES = [ {'name': 'ansi2html'},
{'name': 'rednose-0.4.1'},
{'name': 'progressbar-2.2'},
{'name': 'termstyle'},
+ {'name': 'urllib3'},
+ {'name': 'elasticsearch'},
+ {'name': 'requests'},
{'name': 'pyyaml-3.11', 'py-dep': True},
- {'name': 'nose-1.3.4', 'py-dep': True}
+ {'name': 'nose-1.3.4', 'py-dep': True},
]
@@ -62,6 +65,7 @@ def import_nightly_modules ():
sys.path.append(PATH_STL_API)
sys.path.append(PATH_STF_API)
import_module_list(NIGHTLY_MODULES)
+ #pprint.pprint(sys.path)
import_nightly_modules()
diff --git a/scripts/automation/regression/setups/kiwi02/benchmark.yaml b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
index 4e091d53..1eefccaf 100644
--- a/scripts/automation/regression/setups/kiwi02/benchmark.yaml
+++ b/scripts/automation/regression/setups/kiwi02/benchmark.yaml
@@ -4,6 +4,24 @@
#### common templates ###
+test_short_flow:
+ multiplier : 20000
+ cores : 4
+ bw_per_core : 1000
+
+test_short_flow_high_active:
+ multiplier : 20000
+ cores : 4
+ bw_per_core : 1000
+ active_flows : 4000000
+
+test_short_flow_high_active2:
+ multiplier : 15000
+ cores : 4
+ bw_per_core : 1000
+ active_flows : 4000000
+
+
stat_route_dict: &stat_route_dict
clients_start : 16.0.0.1
servers_start : 48.0.0.1
@@ -254,7 +272,7 @@ test_performance_vm_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 11.5
+ min: 11.2
max: 13.1
@@ -262,7 +280,7 @@ test_performance_vm_single_cpu_cached:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 22.0
+ min: 20.5
max: 25.0
@@ -271,7 +289,7 @@ test_performance_syn_attack_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 9.5
+ min: 9.3
max: 11.5
test_performance_vm_multi_cpus:
@@ -296,6 +314,6 @@ test_performance_syn_attack_multi_cpus:
core_count : 4
mult : "90%"
mpps_per_core_golden :
- min: 8.5
+ min: 8.4
max: 10.5
diff --git a/scripts/automation/regression/setups/trex07/benchmark.yaml b/scripts/automation/regression/setups/trex07/benchmark.yaml
index 26588ba7..7abc2e4d 100644
--- a/scripts/automation/regression/setups/trex07/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex07/benchmark.yaml
@@ -4,6 +4,24 @@
#### common templates ###
+test_short_flow:
+ multiplier : 25000
+ cores : 7
+ bw_per_core : 1000
+
+test_short_flow_high_active:
+ multiplier : 20000
+ cores : 7
+ bw_per_core : 1000
+ active_flows : 4000000
+
+test_short_flow_high_active2:
+ multiplier : 10000
+ cores : 7
+ bw_per_core : 1000
+ active_flows : 4000000
+
+
test_jumbo:
multiplier : 120
cores : 4
@@ -190,7 +208,7 @@ test_performance_vm_single_cpu_cached:
cfg:
mult : "10%"
mpps_per_core_golden :
- min: 16.0
+ min: 20.0
max: 25.0
@@ -199,7 +217,7 @@ test_performance_syn_attack_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 9.0
+ min: 8.4
max: 14.0
test_performance_vm_multi_cpus:
@@ -216,8 +234,8 @@ test_performance_vm_multi_cpus_cached:
core_count : 7
mult : "35%"
mpps_per_core_golden :
- min: 9.0
- max: 15.0
+ min: 24.5
+ max: 27.0
test_performance_syn_attack_multi_cpus:
cfg:
@@ -225,9 +243,10 @@ test_performance_syn_attack_multi_cpus:
mult : "90%"
mpps_per_core_golden :
min: 8.0
- max: 16.0
+ max: 11.0
test_all_profiles :
mult : "5%"
+ skip : ['udp_rand_len_9k.py', 'udp_inc_len_9k.py']
diff --git a/scripts/automation/regression/setups/trex08/benchmark.yaml b/scripts/automation/regression/setups/trex08/benchmark.yaml
index 70995b21..f409edb8 100644
--- a/scripts/automation/regression/setups/trex08/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex08/benchmark.yaml
@@ -4,6 +4,25 @@
### stateful ###
+test_short_flow:
+ multiplier : 50000
+ cores : 7
+ bw_per_core : 1000
+
+test_short_flow_high_active:
+ multiplier : 40000
+ cores : 7
+ bw_per_core : 1000
+ active_flows : 4000000
+
+test_short_flow_high_active2:
+ multiplier : 30000
+ cores : 7
+ bw_per_core : 1000
+ active_flows : 4000000
+
+
+
test_jumbo:
multiplier : 150
cores : 2
@@ -183,7 +202,7 @@ test_performance_vm_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 15.1
+ min: 15.5
max: 20.3
@@ -191,7 +210,7 @@ test_performance_vm_single_cpu_cached:
cfg:
mult : "10%"
mpps_per_core_golden :
- min: 29.1
+ min: 28.0
max: 32.0
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
index d8623811..79dab3e8 100644
--- a/scripts/automation/regression/setups/trex09/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -195,7 +195,7 @@ test_performance_vm_single_cpu_cached:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 28.5
+ min: 28.3
max: 31.2
@@ -204,7 +204,7 @@ test_performance_syn_attack_single_cpu:
cfg:
mult : "90%"
mpps_per_core_golden :
- min: 12.9
+ min: 12.5
max: 14.5
test_performance_vm_multi_cpus:
@@ -212,7 +212,7 @@ test_performance_vm_multi_cpus:
core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 15.2
+ min: 14.4
max: 16.3
@@ -221,14 +221,14 @@ test_performance_vm_multi_cpus_cached:
core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 26.8
- max: 29.5
+ min: 29.5
+ max: 31.5
test_performance_syn_attack_multi_cpus:
cfg:
core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 13.0
+ min: 12.5
max: 13.8
diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml
index 5ebcdd55..87654a35 100644
--- a/scripts/automation/regression/setups/trex11/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex11/benchmark.yaml
@@ -183,4 +183,54 @@ test_CPU_benchmark:
test_all_profiles :
mult : "5%"
skip : ['udp_rand_len_9k.py','udp_inc_len_9k.py'] # due to VIC 9K defect trex-282
-
+
+
+test_performance_vm_single_cpu:
+ cfg:
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 9.5
+ max: 11.5
+
+
+test_performance_vm_single_cpu_cached:
+ cfg:
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 26.5
+ max: 29.0
+
+
+
+test_performance_syn_attack_single_cpu:
+ cfg:
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 8.5
+ max: 10.0
+
+test_performance_vm_multi_cpus:
+ cfg:
+ core_count : 1
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 9.0
+ max: 11.5
+
+
+test_performance_vm_multi_cpus_cached:
+ cfg:
+ core_count : 1
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 26.5
+ max: 29.0
+
+test_performance_syn_attack_multi_cpus:
+ cfg:
+ core_count : 1
+ mult : "5%"
+ mpps_per_core_golden :
+ min: 8.0
+ max: 10.0
+
diff --git a/scripts/automation/regression/setups/trex25/benchmark.yaml b/scripts/automation/regression/setups/trex25/benchmark.yaml
index f8fd0bee..2c677b81 100644
--- a/scripts/automation/regression/setups/trex25/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex25/benchmark.yaml
@@ -79,7 +79,7 @@ test_nat_learning:
nat_opened : 40000
-test_nbar_simple:
+test_nbar_simple: &nbar_simple
multiplier : 6
cores : 1
bw_per_core : 16.645
@@ -100,6 +100,12 @@ test_nbar_simple:
rtsp : 0.04
unknown : 28.52
+test_client_cfg_nbar: &client_cfg_nbar
+ << : *nbar_simple
+
+test_client_cfg_vlan:
+ cores : 1
+ multiplier : 10
test_rx_check_http: &rx_http
multiplier : 8800
@@ -118,7 +124,7 @@ test_rx_check_http_negative:
test_rx_check_sfr: &rx_sfr
- multiplier : 6.8
+ multiplier : 3.2
cores : 1
rx_sample_rate : 16
bw_per_core : 16.063
diff --git a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py
index 852e745d..158f59b9 100644
--- a/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_client_cfg_test.py
@@ -1,5 +1,6 @@
#!/router/bin/python
from .trex_general_test import CTRexGeneral_Test, CTRexScenario
+from .trex_nbar_test import CTRexNbarBase
from CPlatform import CStaticRouteConfig
from .tests_exceptions import *
#import sys
@@ -8,44 +9,71 @@ from nose.tools import nottest
# Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors.
# If resolve will fail, TRex will exit with exit code != 0
-class CTRexClientCfg_Test(CTRexGeneral_Test):
+class CTRexClientCfg_Test(CTRexNbarBase):
"""This class defines the IMIX testcase of the TRex traffic generator"""
def __init__(self, *args, **kwargs):
- # super(CTRexClientCfg_Test, self).__init__()
- CTRexGeneral_Test.__init__(self, *args, **kwargs)
+ CTRexNbarBase.__init__(self, *args, **kwargs)
def setUp(self):
if CTRexScenario.setup_name == 'kiwi02':
self.skip("Can't run currently on kiwi02")
+
super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process
- pass
- def test_client_cfg(self):
- # test initializtion
+ def test_client_cfg_nbar(self):
if self.is_loopback:
- return
- else:
- self.router.configure_basic_interfaces()
- self.router.config_pbr(mode = "config")
-
- ret = self.trex.start_trex(
- c = 1,
- m = 1,
- d = 10,
- f = 'cap2/dns.yaml',
- v = 3,
+ self.skip('No NBAR on loopback')
+
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+ self.router.config_nbar_pd()
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex (
+ c = core,
+ m = mult,
+ nc = True,
+ p = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
client_cfg = 'automation/regression/cfg/client_cfg.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
-
print("\nLATEST RESULT OBJECT:")
print(trex_res)
+ self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
+ self.match_classification()
+
+ def test_client_cfg_vlan(self):
+ if self.is_loopback:
+ self.skip('Not relevant on loopback')
+
+ self.router.configure_basic_interfaces(vlan = True)
+ self.router.config_pbr(mode = "config", vlan = True)
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex (
+ c = core,
+ m = mult,
+ nc = True,
+ p = True,
+ d = 60,
+ f = 'cap2/dns.yaml',
+ limit_ports = 4,
+ client_cfg = 'automation/regression/cfg/client_cfg_vlan.yaml')
- self.check_general_scenario_results(trex_res)
+ trex_res = self.trex.sample_to_run_finish()
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+ self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
def tearDown(self):
- CTRexGeneral_Test.tearDown(self)
+ CTRexNbarBase.tearDown(self)
pass
if __name__ == "__main__":
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index f6d2b917..4453fd94 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -28,6 +28,7 @@ from nose.plugins.skip import SkipTest
import trex
from trex import CTRexScenario
import misc_methods
+import pprint
import sys
import os
# from CPlatformUnderTest import *
@@ -60,6 +61,7 @@ class CTRexGeneral_Test(unittest.TestCase):
self.trex_crashed = CTRexScenario.trex_crashed
self.modes = CTRexScenario.modes
self.GAManager = CTRexScenario.GAManager
+ self.elk = CTRexScenario.elk
self.no_daemon = CTRexScenario.no_daemon
self.skipping = False
self.fail_reasons = []
@@ -70,6 +72,21 @@ class CTRexGeneral_Test(unittest.TestCase):
self.is_VM = True if 'VM' in self.modes else False
if not CTRexScenario.is_init:
+ #update elk const object
+ if self.elk:
+ setup = CTRexScenario.elk_info['info']['setup']
+
+ if self.is_loopback :
+ setup['dut'] = 'loopback'
+ else:
+ setup['dut'] = 'router'
+
+ if self.is_VM:
+ setup['baremetal'] = False
+ setup['hypervisor'] = 'ESXi' #TBD
+ else:
+ setup['baremetal'] = True
+
if self.trex and not self.no_daemon: # stateful
CTRexScenario.trex_version = self.trex.get_trex_version()
if not self.is_loopback:
@@ -81,8 +98,12 @@ class CTRexGeneral_Test(unittest.TestCase):
CTRexScenario.router.load_platform_data_from_file(device_cfg)
CTRexScenario.router.launch_connection(device_cfg)
if CTRexScenario.router_cfg['forceImageReload']:
- running_image = CTRexScenario.router.get_running_image_details()['image']
+ image_d = CTRexScenario.router.get_running_image_details();
+ running_image = image_d['image']
print('Current router image: %s' % running_image)
+ if self.elk:
+ setup['dut'] = image_d.get('model','router');
+ print('Current router model : %s' % setup['dut'])
needed_image = device_cfg.get_image_name()
if not CTRexScenario.router.is_image_matches(needed_image):
print('Setting router image: %s' % needed_image)
@@ -107,7 +128,9 @@ class CTRexGeneral_Test(unittest.TestCase):
# raise RuntimeError('CTRexScenario class is not initialized!')
self.router = CTRexScenario.router
-
+ def get_elk_obj (self):
+ obj=trex.copy_elk_info ()
+ return (obj);
# def assert_dict_eq (self, dict, key, val, error=''):
# v1 = int(dict[key]))
@@ -142,9 +165,11 @@ class CTRexGeneral_Test(unittest.TestCase):
def check_CPU_benchmark (self, trex_res, err = 25, minimal_cpu = 10, maximal_cpu = 85):
cpu_util = trex_res.get_avg_steady_state_value('trex-global.data.m_cpu_util_raw')
trex_tx_bps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_bps')
+ trex_tx_pps = trex_res.get_avg_steady_state_value('trex-global.data.m_tx_pps')
expected_norm_cpu = self.get_benchmark_param('bw_per_core')
cores = self.get_benchmark_param('cores')
ports_count = trex_res.get_ports_count()
+ total_dp_cores = cores * (ports_count/2);
if not (cpu_util and ports_count and cores):
print("Can't calculate CPU benchmark, need to divide by zero: cpu util: %s, ports: %s, cores: %s" % (cpu_util, ports_count, cores))
test_norm_cpu = -1
@@ -172,16 +197,42 @@ class CTRexGeneral_Test(unittest.TestCase):
#if calc_error_precent > err and cpu_util > 10:
# self.fail('Excepted bw_per_core ratio: %s, got: %g' % (expected_norm_cpu, round(test_norm_cpu)))
+ trex_tx_gbps = trex_tx_bps/1e9
+ trex_tx_mpps = trex_tx_pps/1e6
+
+ trex_tx_gbps_pc = trex_tx_gbps*100.0/(cpu_util*total_dp_cores);
+ trex_tx_mpps_pc = trex_tx_mpps*100.0/(cpu_util*total_dp_cores)
+
+ trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
+ trex_drops = trex_res.get_total_drops()
+ trex_drop_precent = trex_drops *100.0/trex_tx_pckt;
+
# report benchmarks
- if self.GAManager:
- try:
- pass
- #setup_test = '%s.%s' % (CTRexScenario.setup_name, self.get_name())
- #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core', value = int(test_norm_cpu))
- #self.GAManager.gaAddAction(Event = 'stateful_test', action = setup_test, label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- #self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ if self.elk:
+ elk_obj = self.get_elk_obj()
+ print("Reporting to elk")
+ elk_obj['test']={ "name" : self.get_name(),
+ "type" : "stateful",
+ "cores" : total_dp_cores,
+ "cpu%" : cpu_util,
+ "mpps" : (trex_tx_mpps),
+ "streams_count" :1,
+ "mpps_pc" : (trex_tx_mpps_pc),
+ "gbps_pc" : (trex_tx_gbps_pc),
+ "gbps" : (trex_tx_gbps),
+ "kcps" : (trex_res.get_last_value("trex-global.data.m_tx_cps")/1000.0),
+ "avg-pktsize" : round((1000.0*trex_tx_gbps/(8.0*trex_tx_mpps))),
+ "latecny" : { "min" : min(trex_res.get_min_latency().values()),
+ "max" : max(trex_res.get_max_latency().values()),
+ "avr" : max(trex_res.get_avg_latency().values()),
+ "jitter" : max(trex_res.get_jitter_latency().values()),
+ "max-win" : max(trex_res.get_avg_window_latency ().values()),
+ "drop-rate" :trex_drop_precent
+ }
+ };
+ pprint.pprint(elk_obj['test']);
+ self.elk.perf.push_data(elk_obj)
+
def check_results_gt (self, res, name, val):
if res is None:
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
index f8fe0ed1..5f52fab7 100755
--- a/scripts/automation/regression/stateful_tests/trex_imix_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_imix_test.py
@@ -18,6 +18,99 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
# self.router.clear_counters()
pass
+ def test_short_flow(self):
+ """ short UDP flow with 64B packets, this test with small number of active flows """
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 30,
+ f = 'cap2/cur_flow.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+ def test_short_flow_high_active(self):
+ """ short UDP flow with 64B packets, this test with 8M active flows """
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+ active_flows =self.get_benchmark_param('active_flows')
+
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ active_flows = active_flows,
+ f = 'cap2/cur_flow.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
+ def test_short_flow_high_active2(self):
+ """ short UDP flow with 64B packets, this test with 8M active flows """
+ # test initializtion
+ if not self.is_loopback:
+ self.router.configure_basic_interfaces()
+ self.router.config_pbr(mode = "config")
+
+ mult = self.get_benchmark_param('multiplier')
+ core = self.get_benchmark_param('cores')
+ active_flows =self.get_benchmark_param('active_flows')
+
+
+ ret = self.trex.start_trex(
+ c = core,
+ m = mult,
+ p = True,
+ nc = True,
+ d = 60,
+ active_flows = active_flows,
+ f = 'cap2/cur_flow_single.yaml',
+ l = 1000)
+
+ trex_res = self.trex.sample_to_run_finish()
+
+ # trex_res is a CTRexResult instance- and contains the summary of the test results
+ # you may see all the results keys by simply calling here for 'print trex_res.result'
+ print("\nLATEST RESULT OBJECT:")
+ print(trex_res)
+
+ self.check_general_scenario_results(trex_res)
+ self.check_CPU_benchmark(trex_res)
+
def test_routing_imix_64(self):
# test initializtion
if not self.is_loopback:
@@ -112,7 +205,7 @@ class CTRexIMIX_Test(CTRexGeneral_Test):
ret = self.trex.start_trex(
c = core,
m = mult,
- p = True,
+ e = True,
nc = True,
d = 60,
f = 'cap2/imix_fast_1g.yaml',
diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
index 6611ac96..a98da9ac 100755
--- a/scripts/automation/regression/stateful_tests/trex_nbar_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
@@ -5,18 +5,7 @@ from interfaces_e import IFType
from nose.tools import nottest
from misc_methods import print_r
-class CTRexNbar_Test(CTRexGeneral_Test):
- """This class defines the NBAR testcase of the TRex traffic generator"""
- def __init__(self, *args, **kwargs):
- super(CTRexNbar_Test, self).__init__(*args, **kwargs)
- self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
-
- def setUp(self):
- super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
-# self.router.kill_nbar_flows()
- self.router.clear_cft_counters()
- self.router.clear_nbar_stats()
-
+class CTRexNbarBase(CTRexGeneral_Test):
def match_classification (self):
nbar_benchmark = self.get_benchmark_param("nbar_classification")
test_classification = self.router.get_nbar_stats()
@@ -52,6 +41,17 @@ class CTRexNbar_Test(CTRexGeneral_Test):
if missmatchFlag:
self.fail(missmatchMsg)
+class CTRexNbar_Test(CTRexNbarBase):
+ """This class defines the NBAR testcase of the TRex traffic generator"""
+ def __init__(self, *args, **kwargs):
+ super(CTRexNbar_Test, self).__init__(*args, **kwargs)
+ self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
+
+ def setUp(self):
+ super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
+# self.router.kill_nbar_flows()
+ self.router.clear_cft_counters()
+ self.router.clear_nbar_stats()
def test_nbar_simple(self):
# test initializtion
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
index 6940efd3..fbc58765 100755
--- a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
@@ -4,6 +4,7 @@ from trex_stl_lib.api import *
import os, sys
from collections import deque
from time import time, sleep
+import pprint
class STLBenchmark_Test(CStlGeneral_Test):
"""Benchark stateless performance"""
@@ -14,9 +15,21 @@ class STLBenchmark_Test(CStlGeneral_Test):
stabilize = 5 # ensure stabilization over this period
print('')
- for profile_bench in self.get_benchmark_param('profiles'):
+ #self.get_benchmark_param('profiles')
+ #profiles=[{'bw_per_core': 1,
+ # 'cpu_util': 1,
+ # 'kwargs': {'packet_len': 64},
+ # 'name': 'stl/udp_for_benchmarks.py'}]
+
+ profiles = self.get_benchmark_param('profiles')
+ dp_cores = self.stl_trex.system_info.get('dp_core_count', 0)
+
+ for profile_bench in profiles:
+
cpu_utils = deque([0] * stabilize, maxlen = stabilize)
- bws_per_core = deque([0] * stabilize, maxlen = stabilize)
+ bps = deque([0] * stabilize, maxlen = stabilize)
+ pps = deque([0] * stabilize, maxlen = stabilize)
+
kwargs = profile_bench.get('kwargs', {})
print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs))
profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs)
@@ -32,13 +45,30 @@ class STLBenchmark_Test(CStlGeneral_Test):
for i in range(timeout + 1):
stats = self.stl_trex.get_stats()
cpu_utils.append(stats['global']['cpu_util'])
- bws_per_core.append(stats['global']['bw_per_core'])
+ bps.append(stats['global']['tx_bps'])
+ pps.append(stats['global']['tx_pps'])
+
if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95:
break
sleep(0.5)
agv_cpu_util = sum(cpu_utils) / stabilize
- agv_bw_per_core = sum(bws_per_core) / stabilize
+ agv_pps = sum(pps) / stabilize
+ agv_bps = sum(bps) / stabilize
+
+ if agv_cpu_util == 0.0:
+ agv_cpu_util=1.0;
+
+ agv_mpps = (agv_pps/1e6);
+ agv_gbps = (agv_bps/1e9)
+
+
+ agv_gbps_norm = agv_gbps * 100.0/agv_cpu_util;
+ agv_mpps_norm = agv_mpps * 100.0/agv_cpu_util;
+
+ agv_gbps_norm_pc = agv_gbps_norm/dp_cores;
+ agv_mpps_norm_pc = agv_mpps_norm/dp_cores;
+
if critical_test and i == timeout and agv_cpu_util > 10:
raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils))
@@ -48,24 +78,32 @@ class STLBenchmark_Test(CStlGeneral_Test):
raise Exception('Too much queue_full: %s' % stats['global']['queue_full'])
if not cpu_utils[-1]:
raise Exception('CPU util is zero, last values: %s' % list(cpu_utils))
- print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2)))
- # TODO: add check of benchmark based on results from regression
-
- # report benchmarks
- if self.GAManager:
- try:
- pass
- #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name,
- # os.path.basename(profile_bench['name']),
- # repr(kwargs).replace("'", ''))
- #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
- # label = 'bw_per_core', value = int(agv_bw_per_core))
- # TODO: report expected once acquired
- #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
- # label = 'bw_per_core_exp', value = int(expected_norm_cpu))
- #self.GAManager.emptyAndReportQ()
- except Exception as e:
- print('Sending GA failed: %s' % e)
+ print('Done (%ss), CPU util: %4g, norm_pps_per_core:%6smpps norm_bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_mpps_norm_pc,2), round(agv_gbps_norm_pc, 2)))
+
+ # report benchmarks to elk
+ if self.elk:
+ streams=kwargs.get('stream_count',1)
+ elk_obj = self.get_elk_obj()
+ print("\n* Reporting to elk *\n")
+ name=profile_bench['name']
+ elk_obj['test']={ "name" : name,
+ "type" : "stateless-range",
+ "cores" : dp_cores,
+ "cpu%" : agv_cpu_util,
+ "mpps" : (agv_mpps),
+ "streams_count" :streams,
+ "mpps_pc" : (agv_mpps_norm_pc),
+ "gbps_pc" : (agv_gbps_norm_pc),
+ "gbps" : (agv_gbps),
+ "avg-pktsize" : round((1000.0*agv_gbps/(8.0*agv_mpps))),
+ "latecny" : { "min" : -1.0,
+ "max" : -1.0,
+ "avr" : -1.0
+ }
+ };
+ #pprint.pprint(elk_obj);
+ self.elk.perf.push_data(elk_obj)
+
def tearDown(self):
self.stl_trex.reset()
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
index 641f0a33..a1f4dd3b 100644
--- a/scripts/automation/regression/stateless_tests/stl_performance_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -1,6 +1,7 @@
import os
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
+import pprint
def avg (values):
return (sum(values) / float(len(values)))
@@ -67,6 +68,42 @@ class PerformanceReport(object):
ga.emptyAndReportQ()
+ def norm_senario (self):
+ s=self.scenario
+ s='+'.join(s.split(' '));
+ s='+'.join(s.split('-'));
+ s='+'.join(s.split(','));
+ l=s.split('+')
+ lr=[]
+ for obj in l:
+ if len(obj):
+ lr.append(obj);
+ s='-'.join(lr);
+ return(s);
+
+ def report_to_elk(self, elk,elk_obj, golden_mpps):
+ print("\n* Reporting to elk *\n")
+ elk_obj['test']={ "name" : self.norm_senario(),
+ "type" : "stateless",
+ "cores" : self.core_count,
+ "cpu%" : self.avg_cpu,
+ "mpps" : self.avg_mpps,
+ "streams_count" : 1,
+ "mpps_pc" : self.avg_mpps_per_core,
+ "gbps_pc" : self.avg_gbps_per_core,
+ "gbps" : self.avg_gbps,
+ "avg-pktsize" : ((1000.0*self.avg_gbps/(8.0*self.avg_mpps))),
+ "latecny" : { "min" : -1.0,
+ "max" : -1.0,
+ "avr" : -1.0
+ }
+ };
+
+ #pprint.pprint(elk_obj);
+ # push to elk
+ elk.perf.push_data(elk_obj)
+
+
class STLPerformance_Test(CStlGeneral_Test):
"""Tests for stateless client"""
@@ -238,24 +275,25 @@ class STLPerformance_Test(CStlGeneral_Test):
############################################# test's infra functions ###########################################
- def execute_single_scenario (self, scenario_cfg, iterations = 4):
+ def execute_single_scenario (self, scenario_cfg):
golden = scenario_cfg['mpps_per_core_golden']
-
- for i in range(iterations, -1, -1):
- report = self.execute_single_scenario_iteration(scenario_cfg)
- rc = report.check_golden(golden)
+ report = self.execute_single_scenario_iteration(scenario_cfg)
+
+ if self.GAManager:
+ report.report_to_analytics(self.GAManager, golden)
- if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER):
- if self.GAManager:
- report.report_to_analytics(self.GAManager, golden)
+ #report to elk
+ if self.elk:
+ elk_obj = self.get_elk_obj()
+ report.report_to_elk(self.elk,elk_obj, golden)
- return
+ rc = report.check_golden(golden)
- if rc == PerformanceReport.GOLDEN_BETTER:
- return
+ if rc == PerformanceReport.GOLDEN_NORMAL or rc == PerformanceReport.GOLDEN_BETTER:
+ return
- print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i))
+ print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1}'".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden']))
assert 0, "performance failure"
@@ -296,7 +334,8 @@ class STLPerformance_Test(CStlGeneral_Test):
# sample bps/pps
for _ in range(0, 20):
stats = self.c.get_stats(ports = 0)
- if stats['global'][ 'queue_full']>10000:
+ max_queue_full = 100000 if self.is_VM else 10000
+ if stats['global'][ 'queue_full'] > max_queue_full:
assert 0, "Queue is full need to tune the multiplier"
# CPU results are not valid cannot use them
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
index 4dad712f..8812ac48 100644
--- a/scripts/automation/regression/stateless_tests/stl_rx_test.py
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -52,7 +52,7 @@ class STLRX_Test(CStlGeneral_Test):
'allow_packets_drop_num': 1, # allow 1 pkt drop
},
- 'librte_pmd_mlx5': {
+ 'net_mlx5': {
'rate_percent': 80,
'total_pkts': 1000,
'rate_latency': 1,
diff --git a/scripts/automation/regression/trex.py b/scripts/automation/regression/trex.py
index 416a6e3b..4f5eba60 100644
--- a/scripts/automation/regression/trex.py
+++ b/scripts/automation/regression/trex.py
@@ -10,6 +10,7 @@ import time
from CProgressDisp import TimedProgressBar
from stateful_tests.tests_exceptions import TRexInUseError
import datetime
+import copy
class CTRexScenario:
modes = set() # list of modes of this setup: loopback, virtual etc.
@@ -41,6 +42,20 @@ class CTRexScenario:
debug_image = False
test = None
json_verbose = False
+ elk = None
+ elk_info = None
+
+def copy_elk_info ():
+ assert(CTRexScenario.elk_info)
+ d = copy.deepcopy(CTRexScenario.elk_info);
+
+ timestamp = datetime.datetime.now() - datetime.timedelta(hours=2); # Jerusalem timeZone, Kibana does not have feature to change timezone
+ d['timestamp']=timestamp.strftime("%Y-%m-%d %H:%M:%S")
+ return(d)
+
+
+
+
class CTRexRunner:
"""This is an instance for generating a CTRexRunner"""
diff --git a/scripts/automation/regression/trex_elk.py b/scripts/automation/regression/trex_elk.py
new file mode 100644
index 00000000..a5ef7a88
--- /dev/null
+++ b/scripts/automation/regression/trex_elk.py
@@ -0,0 +1,322 @@
+import os
+import outer_packages
+import json
+import pprint
+from elasticsearch import Elasticsearch
+from pprint import pprint
+from elasticsearch import helpers
+import random
+import datetime
+
+# one object example for perf
+def create_one_object (build_id):
+ d={};
+
+ sim_date=datetime.datetime.now()-datetime.timedelta(hours=random.randint(0,24*30));
+ info = {};
+
+
+ img={}
+ img['sha'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"])
+ img['build_time'] = sim_date.strftime("%Y-%m-%d %H:%M:%S")
+ img['version'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"])
+ img['formal'] = False
+
+ setup={}
+
+ setup['distro']='Ubunto14.03'
+ setup['kernel']='2.6.12'
+ setup['baremetal']=True
+ setup['hypervisor']='None'
+ setup['name']='trex07'
+ setup['cpu-sockets']=2
+ setup['cores']=16
+ setup['cpu-speed']=3.5
+
+ setup['dut'] ='loopback'
+ setup['drv-name']='mlx5'
+ setup['nic-ports']=2
+ setup['total-nic-ports']=2
+ setup['nic-speed'] ="40GbE"
+
+
+
+ info['image'] = img
+ info['setup'] = setup
+
+ d['info'] =info;
+
+ d['timestamp']=sim_date.strftime("%Y-%m-%d %H:%M:%S")
+ d['build_id']=str("build-%d" %(build_id))
+ d['test']={ "name" : "test1",
+ "type" : "stateless",
+ "cores" : random.randint(1,10),
+ "cpu%" : random.randint(60,99),
+ "mpps" : random.randint(9,32),
+ "mpps_pc" : random.randint(9,32),
+ "gbps_pc" : random.randint(9,32),
+ "gbps" : random.randint(9,32),
+ "avg-pktsize" : random.randint(60,1500),
+ "latecny" : { "min" : random.randint(1,10),
+ "max" : random.randint(100,120),
+ "avr" : random.randint(1,60)
+ }
+ };
+
+
+ return(d)
+
+
+class EsHelper(object):
+
+ def __init__ (self, es,
+ alias,
+ index_name,
+ mapping):
+ self.es = es
+ self.alias = alias
+ self.index_name = index_name
+ self.mapping = mapping
+ self.setting = { "index.mapper.dynamic":"false"};
+
+ def delete (self):
+ es=self.es;
+ es.indices.delete(index=self.alias, ignore=[400, 404]);
+
+ def is_exists (self):
+ es=self.es;
+ return es.indices.exists(index=self.alias, ignore=[400, 404])
+
+ def create_first_fime (self):
+ es=self.es;
+ index_name=self.index_name
+ es.indices.create(index=index_name, ignore=[],body = {
+ "aliases": { self.alias : {} },
+ "mappings" : { "data": self.mapping },
+ "settings" : self.setting
+ });
+
+ def update(self):
+ es=self.es;
+ es.indices.put_mapping(index=self.alias, doc_type="data",body=self.mapping);
+ es.indices.rollover(alias=self.alias,body={
+ "conditions": {
+ "max_age": "30d",
+ "max_docs": 100000
+ },
+ "mappings" : { "data": self.mapping },
+ "settings" : self.setting
+ }
+ );
+
+ def open(self):
+ if not self.is_exists():
+ self.create_first_fime ()
+ else:
+ self.update()
+
+ def close(self):
+ pass;
+
+ def push_data(self,data):
+ es=self.es;
+ es.index(index=self.alias,doc_type="data", body=data);
+
+
+
+
+def create_reg_object (build_id):
+ d={};
+
+ sim_date=datetime.datetime.now()-datetime.timedelta(hours=random.randint(0,24*30));
+ info = {};
+
+
+ img={}
+ img['sha'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"])
+ img['build_time'] = sim_date.strftime("%Y-%m-%d %H:%M:%S")
+ img['version'] = random.choice(["v2.11","v2.10","v2.12","v2.13","v2.14"])
+ img['formal'] = False
+
+ setup={}
+
+ setup['distro']='Ubunto14.03'
+ setup['kernel']='2.6.12'
+ setup['baremetal']=True
+ setup['hypervisor']='None'
+ setup['name']='trex07'
+ setup['cpu-sockets']=2
+ setup['cores']=16
+ setup['cpu-speed']=3.5
+
+ setup['dut'] ='loopback'
+ setup['drv-name']='mlx5'
+ setup['nic-ports']=2
+ setup['total-nic-ports']=2
+ setup['nic-speed'] ="40GbE"
+
+
+
+ info['image'] = img
+ info['setup'] = setup
+
+ d['info'] =info;
+
+ d['timestamp']=sim_date.strftime("%Y-%m-%d %H:%M:%S")
+ d['build_id']=str("build-%d" %(build_id))
+ d['test']= { "name" : "stateful_tests.trex_imix_test.CTRexIMIX_Test.test_routing_imix" ,
+ "type" : "stateless",
+ "duration_sec" : random.uniform(1,10),
+ "result" : random.choice(["PASS","SKIP","FAIL"]),
+ "stdout" : """
+ LATEST RESULT OBJECT:
+ Total ARP received : 16 pkts
+ maximum-latency : 300 usec
+ average-latency : 277 usec
+ latency-any-error : ERROR
+ """
+ };
+
+ return(d)
+
+
+
+# how to add new keyword
+# you can add a new field but you can't remove old field
+class TRexEs(object):
+
+ def __init__ (self, host,
+ port,
+ ):
+ self.es = Elasticsearch([{"host": host, "port": port}])
+ es=self.es;
+ res=es.info()
+ es_version=res["version"]["number"];
+ l=es_version.split('.');
+ if not(len(l)==3 and int(l[0])>=5):
+ print("NOT valid ES version should be at least 5.0.x",es_version);
+ raise RuntimeError
+
+ setup_info = { # constant per setup
+ "properties": {
+
+ "image" : {
+ "properties": {
+ "sha" : { "type": "keyword" }, # git sha
+ "build_time" : { "type": "date", # build time
+ "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"},
+ "version" : { "type": "keyword" }, # version name like 'v2.12'
+ "formal" : { "type": "boolean" }, # true for formal release
+ }
+ },
+
+ "setup" : {
+ "properties": {
+ "distro" : { "type": "keyword" }, # 'ubuntu'
+ "kernel" : { "type": "keyword" }, # 2.3.19
+ "baremetal" : { "type": "boolean" }, # true or false for
+ "hypervisor" : { "type": "keyword" }, # kvm,esxi , none
+ "name" : { "type": "keyword" }, # setup name , e.g. kiwi02
+ "cpu-sockets" : { "type": "long" }, # number of socket
+ "cores" : { "type": "long" }, # total cores
+ "cpu-speed" : { "type": "double" }, # 3.5 in ghz
+ "dut" : { "type": "keyword" }, # asr1k, loopback
+ "drv-name" : { "type": "keyword" }, # vic, mlx5,599,xl710,x710
+ "nic-ports" : { "type": "long" }, #2,1,4
+ "total-nic-ports" : { "type": "long" }, #8
+ "nic-speed" : { "type": "keyword" }, #40Gb
+ }
+ }
+ }
+ }
+
+
+ perf_mapping = {
+ "dynamic": "strict",
+ "properties": {
+
+ "scenario" : { "type": "keyword" },
+ "build_id" : { "type": "keyword" },
+ "timestamp" : { "type": "date",
+ "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"},
+
+ "info" : setup_info,
+
+ "test" : {
+ "properties": {
+ "name" : { "type": "keyword" }, # name of the test
+ "type" : { "type": "keyword" }, # stateless,stateful, other
+ "cores" : { "type": "long" },
+ "cpu%" : { "type": "double" },
+ "mpps" : { "type": "double" },
+ "streams_count" : { "type": "long" },
+ "mpps_pc" : { "type": "double" },
+ "gbps_pc" : { "type": "double" },
+ "gbps" : { "type": "double" },
+ "avg-pktsize" : { "type": "long" },
+ "kcps" : { "type": "double" },
+ "latecny" : {
+ "properties": {
+ "min" : { "type": "double" },
+ "max" : { "type": "double" },
+ "avr" : { "type": "double" },
+ "max-win" : { "type": "double" },
+ "drop-rate" : { "type": "double" },
+ "jitter" : { "type": "double" },
+ }
+ }
+
+ }
+ }
+ }
+ }
+
+ self.perf = EsHelper(es=es,
+ alias="perf",
+ index_name='trex_perf-000001',
+ mapping=perf_mapping)
+
+
+
+ reg_mapping = {
+ "dynamic": "strict",
+ "properties": {
+
+ "scenario" : { "type": "keyword" },
+ "build_id" : { "type": "keyword" },
+ "timestamp" : { "type": "date",
+ "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"},
+
+ "info" : setup_info,
+
+ "test" : {
+ "properties": {
+ "name" : { "type" : "text" }, # name of the test
+ "name_key" : { "type" : "keyword" }, # name of the test
+ "name_full" : { "type" : "keyword" }, # full name of the test
+ "type" : { "type" : "keyword" }, # stateless,stateful, other
+ "duration_sec" : { "type": "double" }, # sec
+ "result" : { "type" : "keyword" }, # PASS,FAIL,SKIP
+ "stdout" : { "type" : "text" }, # output in case of faliue
+ }
+ }
+ }
+ }
+
+
+ self.reg = EsHelper(es=es,
+ alias="reg",
+ index_name='trex_reg-000001',
+ mapping=reg_mapping)
+
+
+ self.perf.open();
+ self.reg.open();
+
+
+
+
+
+
+
+
diff --git a/scripts/automation/regression/trex_unit_test.py b/scripts/automation/regression/trex_unit_test.py
index 34d2c430..39984c7d 100755
--- a/scripts/automation/regression/trex_unit_test.py
+++ b/scripts/automation/regression/trex_unit_test.py
@@ -27,10 +27,13 @@ Description:
import os
import sys
import outer_packages
-
+import datetime
import nose
from nose.plugins import Plugin
+from nose.plugins.xunit import escape_cdata
from nose.selector import Selector
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
import CustomLogger
import misc_methods
from rednose import RedNose
@@ -40,11 +43,27 @@ from trex_stf_lib.trex_client import *
from trex_stf_lib.trex_exceptions import *
from trex_stl_lib.api import *
from trex_stl_lib.utils.GAObjClass import GAmanager_Regression
+import trex_elk
import trex
import socket
from pprint import pprint
import time
from distutils.dir_util import mkpath
+import re
+from io import StringIO
+
+
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def id_split(idval):
+ m = TEST_ID.match(idval)
+ if m:
+ name, fargs = m.groups()
+ head, tail = name.rsplit(".", 1)
+ return [head, tail+fargs]
+ else:
+ return idval.rsplit(".", 1)
# nose overrides
@@ -105,7 +124,167 @@ def address_to_ip(address):
return socket.gethostbyname(address)
+class TRexTee(object):
+ def __init__(self, encoding, *args):
+ self._encoding = encoding
+ self._streams = args
+
+ def write(self, data):
+ data = force_unicode(data, self._encoding)
+ for s in self._streams:
+ s.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for s in self._streams:
+ s.flush()
+
+ def isatty(self):
+ return False
+
+
class CTRexTestConfiguringPlugin(Plugin):
+ encoding = 'UTF-8'
+
+ def __init__(self):
+ super(CTRexTestConfiguringPlugin, self).__init__()
+ self._capture_stack = []
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _timeTaken(self):
+ if hasattr(self, '_timer'):
+ taken = time.time() - self._timer
+ else:
+ # test died before it ran (probably error in setup())
+ # or success/failure added before test started probably
+ # due to custom TestResult munging
+ taken = 0.0
+ return taken
+
+ def _startCapture(self):
+ self._capture_stack.append((sys.stdout, sys.stderr))
+ self._currentStdout = StringIO()
+ self._currentStderr = StringIO()
+ sys.stdout = TRexTee(self.encoding, self._currentStdout, sys.stdout)
+ sys.stderr = TRexTee(self.encoding, self._currentStderr, sys.stderr)
+
+ def startContext(self, context):
+ self._startCapture()
+
+ def stopContext(self, context):
+ self._endCapture()
+
+ def beforeTest(self, test):
+ self._timer = time.time()
+ self._startCapture()
+
+ def _endCapture(self):
+ if self._capture_stack:
+ sys.stdout, sys.stderr = self._capture_stack.pop()
+
+ def afterTest(self, test):
+ self._endCapture()
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _getCapturedStdout(self):
+ if self._currentStdout:
+ value = self._currentStdout.getvalue()
+ if value:
+ return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+ value)
+ return ''
+
+ def _getCapturedStderr(self):
+ if self._currentStderr:
+ value = self._currentStderr.getvalue()
+ if value:
+ return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+ value)
+ return ''
+
+ def addError(self, test, err, capt=None):
+ elk = CTRexScenario.elk
+ if elk:
+ taken = self._timeTaken()
+ id = test.id()
+ err_msg=self._getCapturedStdout()+self._getCapturedStderr();
+ name=id_split(id)[-1]
+
+ elk_obj = trex.copy_elk_info ()
+ elk_obj['test']={
+ "name" : name,
+ "name_key" : name,
+ "name_full" : id,
+ "type" : self.get_operation_mode (),
+ "duration_sec" : taken,
+ "result" : "ERROR",
+ "stdout" : err_msg,
+ };
+ #pprint(elk_obj['test']);
+ elk.reg.push_data(elk_obj)
+
+
+
+ def addFailure(self, test, err, capt=None, tb_info=None):
+ elk = CTRexScenario.elk
+ if elk:
+ taken = self._timeTaken()
+ tb = format_exception(err, self.encoding)
+ id = test.id()
+ err_msg=self._getCapturedStdout()+self._getCapturedStderr();
+ name=id_split(id)[-1]
+
+ elk_obj = trex.copy_elk_info ()
+ elk_obj['test']={
+ "name" : name,
+ "name_key" : name,
+ "name_full" : id,
+ "type" : self.get_operation_mode (),
+ "duration_sec" : taken,
+ "result" : "FAILURE",
+ "stdout" : err_msg,
+ };
+ #pprint(elk_obj['test']);
+ elk.reg.push_data(elk_obj)
+
+
+
+ def addSuccess(self, test, capt=None):
+ elk = CTRexScenario.elk
+ if elk:
+ taken = self._timeTaken()
+ id = test.id()
+ name=id_split(id)[-1]
+ elk_obj = trex.copy_elk_info ()
+ elk_obj['test']={
+ "name" : name,
+ "name_key" : name,
+ "name_full" : id,
+ "type" : self.get_operation_mode (),
+ "duration_sec" : taken,
+ "result" : "PASS",
+ "stdout" : "",
+ };
+ #pprint(elk_obj['test']);
+ elk.reg.push_data(elk_obj)
+
+
+
+ def get_operation_mode (self):
+ if self.stateful:
+ return('stateful');
+ return('stateless');
+
+
+
+
+##### option/configure
+
def options(self, parser, env = os.environ):
super(CTRexTestConfiguringPlugin, self).options(parser, env)
parser.add_option('--cfg', '--trex-scenario-config', action='store',
@@ -229,6 +408,52 @@ class CTRexTestConfiguringPlugin(Plugin):
appName = 'TRex',
appVer = CTRexScenario.trex_version)
+ CTRexScenario.elk = trex_elk.TRexEs('sceasr-b20',9200);
+ self.set_cont_elk_info ()
+
+ def set_cont_elk_info (self):
+ elk_info={}
+ timestamp = datetime.datetime.now() - datetime.timedelta(hours=2); # need to update this
+ info = {};
+
+
+ img={}
+ img['sha'] = "v2.14" #TBD
+ img['build_time'] = timestamp.strftime("%Y-%m-%d %H:%M:%S")
+ img['version'] = "v2.14" #TBD need to fix
+ img['formal'] = False
+
+ setup={}
+
+ setup['distro']='None' #TBD 'Ubunto14.03'
+ setup['kernel']='None' #TBD '2.6.12'
+ setup['baremetal']=True #TBD
+ setup['hypervisor']='None' #TBD
+ setup['name']=CTRexScenario.setup_name
+
+ setup['cpu-sockets']=0 #TBD 2
+ setup['cores']=0 #TBD 16
+ setup['cpu-speed']=-1 #TBD 3.5
+
+ setup['dut'] ='None' #TBD 'loopback'
+ setup['drv-name']='None' #TBD 'mlx5'
+ setup['nic-ports']=0 #TBD 2
+ setup['total-nic-ports']=0 #TBD 2
+ setup['nic-speed'] = "None" #"40GbE" TBD
+
+
+
+ info['image'] = img
+ info['setup'] = setup
+
+ elk_info['info'] =info;
+
+ elk_info['timestamp']=timestamp.strftime("%Y-%m-%d %H:%M:%S") # need to update it
+ elk_info['build_id']=os.environ.get('BUILD_ID')
+ elk_info['scenario']=os.environ.get('SCENARIO')
+
+ CTRexScenario.elk_info = elk_info
+
def begin (self):
client = CTRexScenario.trex
@@ -274,6 +499,9 @@ class CTRexTestConfiguringPlugin(Plugin):
CustomLogger.setup_custom_logger('TRexLogger')
def finalize(self, result):
+ while self._capture_stack:
+ self._endCapture()
+
if self.functional or self.collect_only:
return
#CTRexScenario.is_init = False
diff --git a/scripts/automation/trex_control_plane/stf/examples/stf_active_flow.py b/scripts/automation/trex_control_plane/stf/examples/stf_active_flow.py
index 0a72c9ac..8560a5db 100644
--- a/scripts/automation/trex_control_plane/stf/examples/stf_active_flow.py
+++ b/scripts/automation/trex_control_plane/stf/examples/stf_active_flow.py
@@ -14,8 +14,8 @@ def minimal_stateful_test(server,csv_file,a_active_flows):
trex_client.start_trex(
c = 7,
m = 30000,
-# f = 'cap2/cur_flow_single.yaml',
- f = 'cap2/cur_flow.yaml',
+ f = 'cap2/cur_flow_single.yaml',
+# f = 'cap2/cur_flow.yaml',
d = 30,
l = 1000,
p=True,
@@ -39,6 +39,7 @@ def minimal_stateful_test(server,csv_file,a_active_flows):
print("WARNING QUEU WAS FULL");
tuple=(active_flows[-5],cpu_utl[-5],pps[-5],queue_full[-1])
+ print(tuple)
file_writer = csv.writer(test_file)
file_writer.writerow(tuple);
@@ -58,7 +59,7 @@ if __name__ == '__main__':
max_flows = 8000000;
min_flows = 100;
active_flow = min_flows;
- num_point = 10
+ num_point = 40
factor = math.exp(math.log(max_flows/min_flows,math.e)/num_point);
for i in range(num_point+1):
print("<<=====================>>",i,math.floor(active_flow))
diff --git a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
index 0977d2eb..4e7deb93 100755
--- a/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
+++ b/scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_client.py
@@ -154,8 +154,7 @@ class CTRexClient(object):
raise ValueError('d parameter must be integer, specifying how long TRex run.')
trex_cmd_options.update( {'f' : f, 'd' : d} )
- if not trex_cmd_options.get('l'):
- self.result_obj.latency_checked = False
+ self.result_obj.latency_checked = 'l' in trex_cmd_options
if 'k' in trex_cmd_options:
timeout += int(trex_cmd_options['k']) # during 'k' seconds TRex stays in 'Starting' state
@@ -1117,6 +1116,7 @@ class CTRexResult(object):
"Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
"Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
"Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
+ "Minimum latency: {arg}\n".format( arg = self.get_min_latency() ) +
"Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
"Average latency: {arg}\n".format( arg = self.get_avg_latency() ) +
"Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) +
@@ -1163,6 +1163,36 @@ class CTRexResult(object):
"""
return self._max_latency
+ def get_min_latency (self):
+ """
+ Fetches the minimum latency measured on each of the interfaces
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ """
+ return self._min_latency
+
+
+
+ def get_jitter_latency (self):
+ """
+ Fetches the jitter latency measured on each of the interfaces from the start of TRex run
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._jitter_latency
+
def get_avg_latency (self):
"""
Fetches the average latency measured on each of the interfaces from the start of TRex run
@@ -1398,8 +1428,11 @@ class CTRexResult(object):
latency_per_port = self.get_last_value("trex-latency-v2.data", "port-")
self._max_latency = self.__get_filtered_max_latency(latency_per_port, self.filtered_latency_amount)
+ self._min_latency = self.__get_filtered_min_latency(latency_per_port)
avg_latency = self.get_last_value("trex-latency.data", "avg-")
self._avg_latency = CTRexResult.__avg_all_and_rename_keys(avg_latency)
+ jitter_latency = self.get_last_value("trex-latency.data", "jitter-")
+ self._jitter_latency = CTRexResult.__avg_all_and_rename_keys(jitter_latency)
avg_win_latency_list = self.get_value_list("trex-latency.data", "avg-")
self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
@@ -1425,7 +1458,9 @@ class CTRexResult(object):
self._expected_tx_rate = None
self._current_tx_rate = None
self._max_latency = None
+ self._min_latency = None
self._avg_latency = None
+ self._jitter_latency = None
self._avg_window_latency = None
self._total_drops = None
self._drop_rate = None
@@ -1488,6 +1523,21 @@ class CTRexResult(object):
return res
@staticmethod
+ def __get_filtered_min_latency(src_dict):
+ result = {}
+ if src_dict:
+ for port, data in src_dict.items():
+ if not port.startswith('port-'):
+ continue
+ res = data['hist']['min_usec']
+ min_port = 'min-%s' % port[5:]
+ result[min_port] = int(res)
+
+ return(result);
+
+
+
+ @staticmethod
def __get_filtered_max_latency (src_dict, filtered_latency_amount = 0.001):
result = {}
if src_dict:
diff --git a/scripts/automation/trex_control_plane/stl/console/trex_console.py b/scripts/automation/trex_control_plane/stl/console/trex_console.py
index 270ef31c..83f36820 100755
--- a/scripts/automation/trex_control_plane/stl/console/trex_console.py
+++ b/scripts/automation/trex_control_plane/stl/console/trex_console.py
@@ -859,10 +859,10 @@ def show_intro (logger, c):
# find out which NICs the server has
port_types = {}
for port in x['ports']:
- if 'supp_speeds' in port:
+ if 'supp_speeds' in port and port['supp_speeds']:
speed = max(port['supp_speeds']) // 1000
else:
- speed = port['speed']
+ speed = c.ports[port['index']].get_speed_gbps()
key = (speed, port.get('description', port['driver']))
if key not in port_types:
port_types[key] = 0
@@ -927,17 +927,16 @@ def main():
if options.readonly:
logger.log(format_text("\nRead only mode - only few commands will be available", 'bold'))
- show_intro(logger, stateless_client)
-
-
- # a script mode
- if options.batch:
- cont = run_script_file(options.batch[0], stateless_client)
- if not cont:
- return
-
# console
try:
+ show_intro(logger, stateless_client)
+
+ # a script mode
+ if options.batch:
+ cont = run_script_file(options.batch[0], stateless_client)
+ if not cont:
+ return
+
console = TRexConsole(stateless_client, options.verbose)
logger.prompt_redraw = console.prompt_redraw
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
index 654b98f7..7b19896b 100755
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_service.py
@@ -264,6 +264,34 @@ class Scapy_service_api():
"""
pass
+ def get_templates(self,client_v_handler):
+ """ get_templates(self,client_v_handler)
+
+ Returns an array of templates, which normally can be used for creating packet
+
+ Parameters
+ ----------
+
+ Returns
+ -------
+ array of templates
+ """
+ pass
+
+ def get_template(self,client_v_handler,template):
+ """ get_template(self,client_v_handler,template)
+
+ Returns a template, which normally can be used for creating packet
+
+ Parameters
+ ----------
+
+ Returns
+ -------
+ base64 of template content
+ """
+ pass
+
def is_python(version):
return version == sys.version_info[0]
@@ -853,7 +881,7 @@ class Scapy_service(Scapy_service_api):
for instruction_def in instructions_def:
instruction_id = instruction_def['id']
instruction_class = self._vm_instructions[instruction_id]
- parameters = {k: self._sanitize_value(k, v) for (k, v) in instruction_def['parameters'].iteritems()}
+ parameters = {k: self._sanitize_value(k, v) for (k, v) in instruction_def['parameters'].items()}
instructions.append(instruction_class(**parameters))
fe_parameters = field_engine_model_descriptor['global_parameters']
@@ -933,7 +961,7 @@ class Scapy_service(Scapy_service_api):
else:
return pkt_class()
-
+
def _get_payload_classes(self, pkt_class):
# tries to find, which subclasses allowed.
# this can take long time, since it tries to build packets with all subclasses(O(N))
@@ -950,6 +978,61 @@ class Scapy_service(Scapy_service_api):
pass
return allowed_subclasses
+
+
+ def _get_templates(self):
+ # Make sure you understand the three return values of os.walk:
+ #
+ # for root, subdirs, files in os.walk(rootdir):
+ # has the following meaning:
+ #
+ # root: Current path which is "walked through"
+ # subdirs: Files in root of type directory
+ # files: Files in root (not in subdirs) of type other than directory
+ # And please use os.path.join instead of concatenating with a slash!
+ # Your problem is filePath = rootdir + '/' + file - you must concatenate the currently "walked" folder instead of the topmost folder.
+ # So that must be filePath = os.path.join(root, file). BTW "file" is a builtin, so you don't normally use it as variable name.
+
+ templates = []
+ for root, subdirs, files in os.walk("templates"):
+ for file in files:
+ if file.endswith('.trp'):
+ try:
+ f = os.path.join(root, file)
+ o = open(f)
+ c = json.loads(o.read())
+ o.close()
+ id = f.replace("templates" + os.path.sep, "", 1)
+ id = id.split(os.path.sep)
+ id[-1] = id[-1].replace(".trp", "", 1)
+ id = "/".join(id)
+ t = {
+ "id": id,
+ "meta": {
+ "name": c["metadata"]["caption"],
+ "description": ""
+ }
+ }
+ templates.append(t)
+ except:
+ pass
+ return templates
+
+ def _get_template(self,template):
+ id = template["id"]
+ f2 = "templates" + os.path.sep + os.path.sep.join(id.split("/")) + ".trp"
+ for c in r'[]\;,><&*:%=+@!#^()|?^':
+ id = id.replace(c,'')
+ id = id.replace("..", "")
+ id = id.split("/")
+ f = "templates" + os.path.sep + os.path.sep.join(id) + ".trp"
+ if f != f2:
+ return ""
+ with open(f, 'r') as content_file:
+ content = base64.b64encode(content_file.read())
+ return content
+
+
def _get_fields_definition(self, pkt_class, fieldsDef):
# fieldsDef - array of field definitions(or empty array)
fields = []
@@ -1010,6 +1093,12 @@ class Scapy_service(Scapy_service_api):
return protocolDef['payload']
return [c.__name__ for c in self._get_payload_classes(pkt_class)]
+ def get_templates(self,client_v_handler):
+ return self._get_templates()
+
+ def get_template(self,client_v_handler,template):
+ return self._get_template(template)
+
#input in string encoded base64
def check_update_of_dbs(self,client_v_handler,db_md5,field_md5):
if not (self._verify_version_handler(client_v_handler)):
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
index 6489b36a..0788229a 100755
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/scapy_zmq_server.py
@@ -103,7 +103,10 @@ class Scapy_server():
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://*:"+str(port))
- self.IP_address = socket.gethostbyname(socket.gethostname())
+ try:
+ self.IP_address = socket.gethostbyname(socket.gethostname())
+ except:
+ self.IP_address = '0.0.0.0'
self.logger = logging.getLogger('scapy_logger')
self.logger.setLevel(logging.INFO)
console_h = logging.StreamHandler(sys.__stdout__)
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/ICMP echo request.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/ICMP echo request.trp
new file mode 100644
index 00000000..f8988a5f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/ICMP echo request.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[]},{"id":"IP","fields":[]},{"id":"ICMP","fields":[{"id":"type","value":"8"}]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/ICMP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/ICMP.trp
new file mode 100644
index 00000000..4ab1a1ae
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/ICMP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"2048"}]},{"id":"IP","fields":[]},{"id":"ICMP","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/TCP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/TCP.trp
new file mode 100644
index 00000000..6c94592c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/TCP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"2048"}]},{"id":"IP","fields":[]},{"id":"TCP","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/UDP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/UDP.trp
new file mode 100644
index 00000000..bef92993
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv4/UDP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"2048"}]},{"id":"IP","fields":[]},{"id":"UDP","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/ICMP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/ICMP.trp
new file mode 100644
index 00000000..c0387a0a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/ICMP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"34525"}]},{"id":"IPv6","fields":[]},{"id":"ICMPv6ND_Redirect","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/TCP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/TCP.trp
new file mode 100644
index 00000000..1cb9576f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/TCP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"34525"}]},{"id":"IPv6","fields":[]},{"id":"TCP","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/UDP.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/UDP.trp
new file mode 100644
index 00000000..da96ae89
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/IPv6/UDP.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"ICMP echo request"},"packet":[{"id":"Ether","fields":[{"id":"type","value":"34525"}]},{"id":"IPv6","fields":[]},{"id":"UDP","fields":[]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/TCP-SYN.trp b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/TCP-SYN.trp
new file mode 100644
index 00000000..8d7668cc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/templates/TCP-SYN.trp
@@ -0,0 +1 @@
+{"fileType":"trex-packet-editor","version":"1.0.0","metadata":{"caption":"TCP-SYN"},"packet":[{"id":"Ether","fields":[]},{"id":"IP","fields":[]},{"id":"TCP","fields":[{"id":"flags","value":""}]}],"fePrarameters":{"cache_size":"1000"},"feInstructions":[]} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
index e48880e8..9836c794 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/basetest.py
@@ -88,3 +88,12 @@ def adapt_json_protocol_fields(protocols_array):
# change structure for easier
if protocol.get("fields"):
protocol["fields"] = fields_to_map(protocol["fields"])
+
+def get_templates():
+ return pass_result(service.get_templates(v_handler))
+
+
+
+def get_template(t):
+ return pass_result(service.get_template(v_handler, t))
+
diff --git a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
index e1094a79..1ece5d1e 100644
--- a/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
+++ b/scripts/automation/trex_control_plane/stl/services/scapy_server/unit_tests/test_scapy_service.py
@@ -289,3 +289,43 @@ def test_generate_vm_instructions():
ttl_instruction = res['field_engine']['instructions']['instructions'][2]
assert(ttl_instruction['min_value'] == 32)
assert(ttl_instruction['max_value'] == 64)
+
+
+def test_get_templates():
+ tt = get_templates()
+ assert(tt[0]['id'])
+ assert(tt[7]["meta"]['name'])
+ try:
+ assert(tt[9]['id'])
+ except:
+ pass
+
+
+def test_get_template():
+ tt = get_templates()
+ t = tt[0]
+ res = get_template(t)
+ res2 = base64.b64decode(res)
+ obj = json.loads(res2)
+ assert(obj['packet'][0]['id'] == 'Ether')
+ assert(obj['packet'][1]['id'] == 'IP')
+ assert(obj['packet'][2]['id'] == 'ICMP')
+
+
+def test_get_template2():
+ tt = get_templates()
+ t = tt[7]
+ res = get_template(t)
+ res2 = base64.b64decode(res)
+ obj = json.loads(res2)
+ assert(obj['packet'][0]['id'] == 'Ether')
+ assert(obj['packet'][1]['id'] == 'IPv6')
+ assert(obj['packet'][2]['id'] == 'UDP')
+
+
+def test_get_template3():
+ tt = get_templates()
+ t = tt[7]
+ t["id"] = "../../" + t["id"]
+ res = get_template(t)
+ assert(res == '')
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
index c82d77fb..f7432107 100755
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_client.py
@@ -452,16 +452,16 @@ class CCommLink(object):
if not self.virtual:
return self.rpc_link.disconnect()
- def transmit(self, method_name, params = None, api_class = 'core'):
+ def transmit(self, method_name, params = None, api_class = 'core', retry = 0):
if self.virtual:
self._prompt_virtual_tx_msg()
_, msg = self.rpc_link.create_jsonrpc_v2(method_name, params, api_class)
print(msg)
return
else:
- return self.rpc_link.invoke_rpc_method(method_name, params, api_class)
+ return self.rpc_link.invoke_rpc_method(method_name, params, api_class, retry = retry)
- def transmit_batch(self, batch_list):
+ def transmit_batch(self, batch_list, retry = 0):
if self.virtual:
self._prompt_virtual_tx_msg()
print([msg
@@ -472,7 +472,7 @@ class CCommLink(object):
for command in batch_list:
batch.add(command.method, command.params, command.api_class)
# invoke the batch
- return batch.invoke()
+ return batch.invoke(retry = retry)
def _prompt_virtual_tx_msg(self):
print("Transmitting virtually over tcp://{server}:{port}".format(server=self.server,
@@ -2322,7 +2322,7 @@ class STLClient(object):
@__api_check(True)
- def stop (self, ports = None, rx_delay_ms = 10):
+ def stop (self, ports = None, rx_delay_ms = None):
"""
Stop port(s)
@@ -2356,6 +2356,12 @@ class STLClient(object):
if not rc:
raise STLError(rc)
+ if rx_delay_ms is None:
+ if self.ports[ports[0]].is_virtual(): # assume all ports have same type
+ rx_delay_ms = 100
+ else:
+ rx_delay_ms = 10
+
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
@@ -2827,7 +2833,7 @@ class STLClient(object):
@__api_check(True)
- def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = 10):
+ def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = None):
"""
.. _wait_on_traffic:
@@ -2871,6 +2877,12 @@ class STLClient(object):
if timer.has_expired():
raise STLTimeoutError(timeout)
+ if rx_delay_ms is None:
+ if self.ports[ports[0]].is_virtual(): # assume all ports have same type
+ rx_delay_ms = 100
+ else:
+ rx_delay_ms = 10
+
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
@@ -2965,7 +2977,7 @@ class STLClient(object):
:parameters:
ports - which ports to resolve
- retires - how many times to retry on each port (intervals of 100 milliseconds)
+ retries - how many times to retry on each port (intervals of 100 milliseconds)
verbose - log for each request the response
:raises:
+ :exe:'STLError'
@@ -3835,7 +3847,7 @@ class STLClient(object):
parsing_opts.SUPPORTED,
)
- opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
+ opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports())
if not opts:
return opts
@@ -3845,8 +3857,9 @@ class STLClient(object):
opts.flow_ctrl = parsing_opts.FLOW_CTRL_DICT.get(opts.flow_ctrl)
# if no attributes - fall back to printing the status
- if not list(filter(lambda x:x is not None, [opts.prom, opts.link, opts.led, opts.flow_ctrl, opts.supp])):
- self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in opts.ports)))
+ if not list(filter(lambda opt:opt[0] not in ('all_ports', 'ports') and opt[1] is not None, opts._get_kwargs())):
+ ports = opts.ports if opts.ports else self.get_all_ports()
+ self.show_stats_line("--ps --port {0}".format(' '.join(str(port) for port in ports)))
return
if opts.supp:
@@ -3859,11 +3872,13 @@ class STLClient(object):
print(' Flow control: %s' % info['fc_supported'])
print('')
else:
- self.set_port_attr(opts.ports,
- opts.prom,
- opts.link,
- opts.led,
- opts.flow_ctrl)
+ if not opts.ports:
+ raise STLError('No acquired ports!')
+ self.set_port_attr(opts.ports,
+ opts.prom,
+ opts.link,
+ opts.led,
+ opts.flow_ctrl)
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
index ff07b59a..db216532 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -32,7 +32,7 @@ class BatchMessage(object):
id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False)
self.batch_list.append(msg)
- def invoke(self, block = False, chunk_size = 500000):
+ def invoke(self, block = False, chunk_size = 500000, retry = 0):
if not self.rpc_client.connected:
return RC_ERR("Not connected to server")
@@ -54,7 +54,7 @@ class BatchMessage(object):
return response_batch
else:
batch_json = json.dumps(self.batch_list)
- return self.rpc_client.send_msg(batch_json)
+ return self.rpc_client.send_msg(batch_json, retry = retry)
# JSON RPC v2.0 client
@@ -127,16 +127,16 @@ class JsonRpcClient(object):
return id, msg
- def invoke_rpc_method (self, method_name, params = None, api_class = 'core'):
+ def invoke_rpc_method (self, method_name, params = None, api_class = 'core', retry = 0):
if not self.connected:
return RC_ERR("Not connected to server")
id, msg = self.create_jsonrpc_v2(method_name, params, api_class)
- return self.send_msg(msg)
+ return self.send_msg(msg, retry = retry)
- def send_msg (self, msg):
+ def send_msg (self, msg, retry = 0):
# print before
if self.logger.check_verbose(self.logger.VERBOSE_HIGH):
self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n")
@@ -145,9 +145,9 @@ class JsonRpcClient(object):
buffer = msg.encode()
if self.zipper.check_threshold(buffer):
- response = self.send_raw_msg(self.zipper.compress(buffer))
+ response = self.send_raw_msg(self.zipper.compress(buffer), retry = retry)
else:
- response = self.send_raw_msg(buffer)
+ response = self.send_raw_msg(buffer, retry = retry)
if not response:
return response
@@ -175,16 +175,16 @@ class JsonRpcClient(object):
# low level send of string message
- def send_raw_msg (self, msg):
+ def send_raw_msg (self, msg, retry = 0):
- tries = 0
+ retry_left = retry
while True:
try:
self.socket.send(msg)
break
except zmq.Again:
- tries += 1
- if tries > 0:
+ retry_left -= 1
+ if retry_left < 0:
self.disconnect()
return RC_ERR("*** [RPC] - Failed to send message to server")
@@ -193,14 +193,14 @@ class JsonRpcClient(object):
self.reconnect()
raise e
- tries = 0
+ retry_left = retry
while True:
try:
response = self.socket.recv()
break
except zmq.Again:
- tries += 1
- if tries > 0:
+ retry_left -= 1
+ if retry_left < 0:
self.disconnect()
return RC_ERR("*** [RPC] - Failed to get server response from {0}".format(self.transport))
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
index 654514cb..31d752af 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_port.py
@@ -267,7 +267,7 @@ class Port(object):
def add_streams (self, streams_list):
# listify
- streams_list = streams_list if isinstance(streams_list, list) else [streams_list]
+ streams_list = listify(streams_list)
lookup = {}
@@ -341,7 +341,7 @@ class Port(object):
def remove_streams (self, stream_id_list):
# single element to list
- stream_id_list = stream_id_list if isinstance(stream_id_list, list) else [stream_id_list]
+ stream_id_list = listify(stream_id_list)
# verify existance
if not all([stream_id in self.streams for stream_id in stream_id_list]):
@@ -736,7 +736,7 @@ class Port(object):
"slave_handler": slave_handler,
"min_ipg_usec": min_ipg_usec if min_ipg_usec else 0}
- rc = self.transmit("push_remote", params)
+ rc = self.transmit("push_remote", params, retry = 4)
if rc.bad():
return self.err(rc.err())
@@ -908,6 +908,10 @@ class Port(object):
def get_layer_cfg (self):
return self.__attr['layer_cfg']
+
+ def is_virtual(self):
+ return self.info.get('is_virtual')
+
def is_l3_mode (self):
return self.get_layer_cfg()['ipv4']['state'] != 'none'