aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries')
-rw-r--r--resources/libraries/python/ContainerUtils.py239
-rw-r--r--resources/libraries/python/CoreDumpUtil.py6
-rw-r--r--resources/libraries/python/DUTSetup.py4
-rw-r--r--resources/libraries/python/IPsecUtil.py157
-rw-r--r--resources/libraries/python/Trace.py6
-rw-r--r--resources/libraries/python/VPPUtil.py34
-rw-r--r--resources/libraries/python/VppConfigGenerator.py12
-rw-r--r--resources/libraries/python/VppCounters.py2
-rw-r--r--resources/libraries/python/topology.py30
-rw-r--r--resources/libraries/robot/crypto/ipsec.robot16
-rw-r--r--resources/libraries/robot/shared/container.robot81
-rw-r--r--resources/libraries/robot/shared/default.robot40
-rw-r--r--resources/libraries/robot/shared/interfaces.robot88
-rw-r--r--resources/libraries/robot/shared/test_teardown.robot1
14 files changed, 629 insertions, 87 deletions
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index 74add98359..d10360f79e 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -15,11 +15,14 @@
from collections import OrderedDict, Counter
from io import open
+from re import search
from string import Template
+from time import sleep
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
@@ -153,6 +156,12 @@ class ContainerManager:
self.engine.container = self.containers[container]
self.engine.restart_vpp()
+ def verify_vpp_in_all_containers(self):
+ """Verify that VPP is installed and running in all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.verify_vpp()
+
def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
"""Configure VPP in all containers.
@@ -208,6 +217,17 @@ class ContainerManager:
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
+ elif chain_topology == u"chain_vswitch":
+ self._configure_vpp_chain_vswitch(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs)
+ elif chain_topology == u"chain_ipsec":
+ idx_match = search(r"\d+$", self.engine.container.name)
+ if idx_match:
+ idx = int(idx_match.group())
+ self._configure_vpp_chain_ipsec(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, nf_instance=idx, **kwargs)
else:
raise RuntimeError(
f"Container topology {chain_topology} not implemented"
@@ -299,6 +319,134 @@ class ContainerManager:
vif1_mac=vif1_mac, vif2_mac=vif2_mac
)
+ def _configure_vpp_chain_vswitch(self, **kwargs):
+ """Configure VPP as vswitch in container.
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ dut = self.engine.container.name.split(u"_")[0]
+ if dut == u"DUT1":
+ if1_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs[u"dut1_if2"])
+ if2_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs[u"dut1_if1"])
+ if_red_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs[u"dut1_if2"])
+ if_black_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs[u"dut1_if1"])
+ tg_if_ip4 = kwargs[u"tg_if2_ip4"]
+ tg_if_mac = kwargs[u"tg_if2_mac"]
+ else:
+ tg_if_ip4 = kwargs[u"tg_if1_ip4"]
+ tg_if_mac = kwargs[u"tg_if1_mac"]
+ if1_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs[u"dut2_if1"])
+ if2_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs[u"dut2_if2"])
+ if_red_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs[u"dut2_if1"])
+ if_black_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs[u"dut2_if2"])
+
+ n_instances = int(kwargs[u"n_instances"])
+ rxq = 1
+ if u"rxq" in kwargs:
+ rxq = int(kwargs[u"rxq"])
+ buffers = 215040
+ if u"buffers" in kwargs:
+ buffers = int(kwargs[u"buffers"])
+ nodes = kwargs[u"nodes"]
+ cpuset_cpus = CpuUtils.get_affinity_nf(
+ nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
+ nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
+ )
+ self.engine.create_vpp_startup_config_vswitch(
+ cpuset_cpus, rxq, buffers, if1_pci, if2_pci
+ )
+
+ instances = []
+ for i in range(1, n_instances + 1):
+ instances.append(
+ f"create interface memif id {i} socket-id 1 master\n"
+ f"set interface state memif1/{i} up\n"
+ f"set interface l2 bridge memif1/{i} 1\n"
+ f"create interface memif id {i} socket-id 2 master\n"
+ f"set interface state memif2/{i} up\n"
+ f"set interface l2 bridge memif2/{i} 2\n"
+ f"set ip arp memif2/{i} {tg_if_ip4} {tg_if_mac} "
+ f"static\n\n"
+ )
+
+ self.engine.create_vpp_exec_config(
+ u"memif_create_chain_vswitch_ipsec.exec",
+ socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
+ socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
+ if_red_name=if_red_name,
+ if_black_name=if_black_name,
+ instances=u"\n\n".join(instances))
+
+
+ def _configure_vpp_chain_ipsec(self, **kwargs):
+ """Configure VPP in container with memifs.
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ nf_nodes = int(kwargs[u"nf_nodes"])
+ nf_instance = int(kwargs[u"nf_instance"])
+ nodes = kwargs[u"nodes"]
+ dut = self.engine.container.name.split(u"_")[0]
+ cpuset_cpus = CpuUtils.get_affinity_nf(
+ nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
+ nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
+ self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
+ local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
+
+ if dut == u"DUT1":
+ tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
+ tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
+ remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
+ tg_if_ip4 = kwargs[u"tg_if1_ip4"]
+ tg_if_mac = kwargs[u"tg_if1_mac"]
+ raddr_ip4 = kwargs[u"laddr_ip4"]
+ l_mac1 = 17
+ l_mac2 = 18
+ r_mac = 1
+ else:
+ tnl_local_ip = f"{local_ip_base}.{nf_instance}"
+ tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
+ remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
+ tg_if_ip4 = kwargs[u"tg_if2_ip4"]
+ tg_if_mac = kwargs[u"tg_if2_mac"]
+ raddr_ip4 = kwargs[u"raddr_ip4"]
+ l_mac1 = 1
+ l_mac2 = 2
+ r_mac = 17
+
+ self.engine.create_vpp_exec_config(
+ u"memif_create_chain_ipsec.exec",
+ socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
+ socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
+ mid1=nf_instance,
+ mid2=nf_instance,
+ sid1=u"1",
+ sid2=u"2",
+ mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
+ mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
+ tg_if2_ip4=tg_if_ip4,
+ tg_if2_mac=tg_if_mac,
+ raddr_ip4=raddr_ip4,
+ tnl_local_ip=tnl_local_ip,
+ tnl_remote_ip=tnl_remote_ip,
+ tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
+ remote_ip=f"{remote_ip_base}.{nf_instance}"
+ )
+ self.engine.execute(
+ f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
+ f"{dut}_{nf_instance}.config >> /tmp/running.exec"
+ )
+
def _configure_vpp_pipeline_ip4(self, **kwargs):
"""Configure VPP in pipeline topology with ip4.
@@ -448,15 +596,13 @@ class ContainerEngine:
self.container.node,
SocketType.PAPI,
self.container.name,
- f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
- f"api.sock"
+ f"/tmp/vpp_sockets/{self.container.name}/api.sock"
)
topo_instance.add_new_socket(
self.container.node,
SocketType.STATS,
self.container.name,
- f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
- f"stats.sock"
+ f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
)
def restart_vpp(self):
@@ -464,13 +610,37 @@ class ContainerEngine:
self.execute(u"supervisorctl restart vpp")
self.execute(u"cat /tmp/supervisord.log")
- def create_base_vpp_startup_config(self):
+ # TODO Rewrite .execute to accept retries parameter and get rid of this
+ # function.
+ def verify_vpp(self, retries=120, retry_wait=1):
+ """Verify that VPP is installed and running inside container.
+
+ :param retries: Check for VPP for this number of times Default: 120
+ :param retry_wait: Wait for this number of seconds between retries.
+
+ """
+ cmd = (u"vppctl show pci 2>&1 | "
+ u"fgrep -v 'Connection refused' | "
+ u"fgrep -v 'No such file or directory'")
+
+ for _ in range(retries + 1):
+ try:
+ self.execute(cmd)
+ break
+ except RuntimeError:
+ sleep(retry_wait)
+ else:
+ msg = f"VPP did not come up in container: {self.container.name}"
+ raise RuntimeError(msg)
+
+ def create_base_vpp_startup_config(self, cpuset_cpus=None):
"""Create base startup configuration of VPP on container.
:returns: Base VPP startup configuration.
:rtype: VppConfigGenerator
"""
- cpuset_cpus = self.container.cpuset_cpus
+ if cpuset_cpus is None:
+ cpuset_cpus = self.container.cpuset_cpus
# Create config instance
vpp_config = VppConfigGenerator()
@@ -519,8 +689,7 @@ class ContainerEngine:
# Apply configuration
self.execute(u"mkdir -p /etc/vpp/")
self.execute(
- f'echo "{vpp_config.get_config_str()}" | '
- f'tee /etc/vpp/startup.conf'
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
)
def create_vpp_startup_config_func_dev(self):
@@ -540,8 +709,58 @@ class ContainerEngine:
# Apply configuration
self.execute(u"mkdir -p /etc/vpp/")
self.execute(
- f'echo "{vpp_config.get_config_str()}" | '
- f'tee /etc/vpp/startup.conf'
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
+ )
+
+ def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, buffers,
+ *devices):
+ """Create startup configuration of VPP vswitch.
+
+ :param cpuset_cpus: CPU list to run on.
+ :param rxq: Number of interface RX queues.
+ :param buffers: Number of buffers per numa.
+ :param devices: List of PCI devices to add.
+ :type cpuset_cpus: list
+ :type rxq: int
+ :type buffers: int
+ :type devices: list
+ """
+ vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
+ vpp_config.add_dpdk_dev(*devices)
+ vpp_config.add_dpdk_log_level(u"debug")
+ vpp_config.add_plugin(u"disable", u"default")
+ vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
+ vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ vpp_config.add_buffers_per_numa(buffers)
+ vpp_config.add_dpdk_dev_default_rxq(rxq)
+
+ # Apply configuration
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
+ )
+
+ def create_vpp_startup_config_ipsec(self, cpuset_cpus):
+ """Create startup configuration of VPP with IPsec on container.
+
+ :param cpuset_cpus: CPU list to run on.
+ :type cpuset_cpus: list
+ """
+ vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
+ vpp_config.add_plugin(u"disable", u"default")
+ vpp_config.add_plugin(u"enable", u"memif_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_ia32_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
+ vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
+ vpp_config.add_heapsize(u"4G")
+ vpp_config.add_ip_heap_size(u"4G")
+ vpp_config.add_statseg_size(u"4G")
+
+ # Apply configuration
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
)
def create_vpp_exec_config(self, template_file, **kwargs):
diff --git a/resources/libraries/python/CoreDumpUtil.py b/resources/libraries/python/CoreDumpUtil.py
index 3d40ffa25d..57f0c1d896 100644
--- a/resources/libraries/python/CoreDumpUtil.py
+++ b/resources/libraries/python/CoreDumpUtil.py
@@ -112,7 +112,7 @@ class CoreDumpUtil:
:param node: DUT Node in the topology.
:type node: dict
"""
- if node['type'] == NodeType.DUT and self.is_core_limit_enabled():
+ if node[u"type"] == NodeType.DUT and self.is_core_limit_enabled():
vpp_pid = DUTSetup.get_vpp_pid(node)
self.enable_coredump_limit(node, vpp_pid)
@@ -150,6 +150,6 @@ class CoreDumpUtil:
if disable_on_success:
self.set_core_limit_disabled()
except RuntimeError:
- # If compress was not successful ignore error and skip further
- # processing.
+ # If compress was not successful ignore error and skip
+ # further processing.
continue
diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py
index 1a8899f6f6..00b553f833 100644
--- a/resources/libraries/python/DUTSetup.py
+++ b/resources/libraries/python/DUTSetup.py
@@ -132,6 +132,8 @@ class DUTSetup:
:type node: dict
:type service: str
"""
+ DUTSetup.get_service_logs(node, service)
+
command = f"supervisorctl stop {service}" \
if DUTSetup.running_in_container(node) \
else f"service {service} stop"
@@ -141,8 +143,6 @@ class DUTSetup:
node, command, timeout=180, sudo=True, message=message
)
- DUTSetup.get_service_logs(node, service)
-
@staticmethod
def stop_service_on_all_duts(nodes, service):
"""Stop the named service on all DUTs.
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 8f464d5a05..9c3e5c712e 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -26,6 +26,7 @@ from resources.libraries.python.IPUtil import IPUtil
from resources.libraries.python.InterfaceUtil import InterfaceUtil, \
InterfaceStatusFlags
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+from resources.libraries.python.ssh import scp_node
from resources.libraries.python.topology import Topology
from resources.libraries.python.VatExecutor import VatExecutor
@@ -1149,6 +1150,158 @@ class IPsecUtil:
papi_exec.get_replies(err_msg)
@staticmethod
+ def _create_ipsec_script_files(dut, instances):
+ """Create script files for configuring IPsec in containers
+
+ :param dut: DUT node on which to create the script files
+ :param instances: number of containers on DUT node
+ :type dut: string
+ :type instances: int
+ """
+ scripts = []
+ for cnf in range(0, instances):
+ script_filename = (
+ f"/tmp/ipsec_create_tunnel_cnf_{dut}_{cnf + 1}.config"
+ )
+ scripts.append(open(script_filename, 'w'))
+ return scripts
+
+ @staticmethod
+ def _close_and_copy_ipsec_script_files(
+ dut, nodes, instances, scripts):
+ """Close created scripts and copy them to containers
+
+ :param dut: DUT node on which to create the script files
+ :param nodes: VPP nodes
+ :param instances: number of containers on DUT node
+ :param scripts: dictionary holding the script files
+ :type dut: string
+ :type nodes: dict
+ :type instances: int
+ :type scripts: dict
+ """
+ for cnf in range(0, instances):
+ scripts[cnf].close()
+ script_filename = (
+ f"/tmp/ipsec_create_tunnel_cnf_{dut}_{cnf + 1}.config"
+ )
+ scp_node(nodes[dut], script_filename, script_filename)
+
+
+ @staticmethod
+ def vpp_ipsec_create_tunnel_interfaces_in_containers(
+ nodes, if1_ip_addr, if2_ip_addr, if1_key, if2_key, n_tunnels,
+ crypto_alg, integ_alg, raddr_ip1, raddr_ip2, raddr_range,
+ n_instances):
+ """Create multiple IPsec tunnel interfaces between two VPP nodes.
+
+ :param nodes: VPP nodes to create tunnel interfaces.
+ :param if1_ip_addr: VPP node 1 interface IP4 address.
+ :param if2_ip_addr: VPP node 2 interface IP4 address.
+ :param if1_key: VPP node 1 interface key from topology file.
+ :param if2_key: VPP node 2 interface key from topology file.
+ :param n_tunnels: Number of tunnell interfaces to create.
+ :param crypto_alg: The encryption algorithm name.
+ :param integ_alg: The integrity algorithm name.
+ :param raddr_ip1: Policy selector remote IPv4 start address for the
+ first tunnel in direction node1->node2.
+ :param raddr_ip2: Policy selector remote IPv4 start address for the
+ first tunnel in direction node2->node1.
+ :param raddr_range: Mask specifying range of Policy selector Remote
+ IPv4 addresses. Valid values are from 1 to 32.
+ :param n_instances: Number of containers.
+ :type nodes: dict
+ :type if1_ip_addr: str
+ :type if2_ip_addr: str
+ :type if1_key: str
+ :type if2_key: str
+ :type n_tunnels: int
+ :type crypto_alg: CryptoAlg
+ :type integ_alg: IntegAlg
+ :type raddr_ip1: string
+ :type raddr_ip2: string
+ :type raddr_range: int
+ :type n_instances: int
+ """
+ spi_1 = 100000
+ spi_2 = 200000
+ addr_incr = 1 << (32 - raddr_range)
+
+ dut1_scripts = IPsecUtil._create_ipsec_script_files(
+ u"DUT1", n_instances)
+ dut2_scripts = IPsecUtil._create_ipsec_script_files(
+ u"DUT2", n_instances)
+
+ for cnf in range(0, n_instances):
+ dut1_scripts[cnf].write(
+ u"create loopback interface\n"
+ u"set interface state loop0 up\n\n"
+ )
+ dut2_scripts[cnf].write(
+ f"ip route add {if1_ip_addr}/8 via "
+ f"{ip_address(if2_ip_addr) + cnf + 100} memif1/{cnf + 1}\n\n"
+ )
+
+ for tnl in range(0, n_tunnels):
+ tnl_incr = tnl * addr_incr
+ cnf = tnl % n_instances
+ i = tnl // n_instances
+ ckey = gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)).hex()
+ integ = u""
+ if integ_alg:
+ ikey = gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)).hex()
+ integ = (
+ f"integ-alg {integ_alg.alg_name} "
+ f"local-integ-key {ikey} "
+ f"remote-integ-key {ikey} "
+ )
+
+ # Configure tunnel end point(s) on left side
+ dut1_scripts[cnf].write(
+ u"set interface ip address loop0 "
+ f"{ip_address(if1_ip_addr) + tnl_incr}/32\n"
+ f"create ipsec tunnel "
+ f"local-ip {ip_address(if1_ip_addr) + tnl_incr} "
+ f"local-spi {spi_1 + tnl} "
+ f"remote-ip {ip_address(if2_ip_addr) + cnf} "
+ f"remote-spi {spi_2 + tnl} "
+ f"crypto-alg {crypto_alg.alg_name} "
+ f"local-crypto-key {ckey} "
+ f"remote-crypto-key {ckey} "
+ f"instance {i} "
+ f"salt 0x0 "
+ f"{integ} \n"
+ f"set interface unnumbered ipip{i} use loop0\n"
+ f"set interface state ipip{i} up\n"
+ f"ip route add {ip_address(raddr_ip2)+tnl}/32 via ipip{i}\n\n"
+ )
+
+ # Configure tunnel end point(s) on right side
+ dut2_scripts[cnf].write(
+ f"set ip arp memif1/{cnf + 1} "
+ f"{ip_address(if1_ip_addr) + tnl_incr} "
+ f"02:02:00:00:{17:02X}:{cnf:02X} static\n"
+ f"create ipsec tunnel local-ip {ip_address(if2_ip_addr) + cnf} "
+ f"local-spi {spi_2 + tnl} "
+ f"remote-ip {ip_address(if1_ip_addr) + tnl_incr} "
+ f"remote-spi {spi_1 + tnl} "
+ f"crypto-alg {crypto_alg.alg_name} "
+ f"local-crypto-key {ckey} "
+ f"remote-crypto-key {ckey} "
+ f"instance {i} "
+ f"salt 0x0 "
+ f"{integ}\n"
+ f"set interface unnumbered ipip{i} use memif1/{cnf + 1}\n"
+ f"set interface state ipip{i} up\n"
+ f"ip route add {ip_address(raddr_ip1) + tnl}/32 via ipip{i}\n\n"
+ )
+
+ IPsecUtil._close_and_copy_ipsec_script_files(
+ u"DUT1", nodes, n_instances, dut1_scripts)
+ IPsecUtil._close_and_copy_ipsec_script_files(
+ u"DUT2", nodes, n_instances, dut2_scripts)
+
+ @staticmethod
def vpp_ipsec_add_multiple_tunnels(
nodes, interface1, interface2, n_tunnels, crypto_alg, integ_alg,
tunnel_ip1, tunnel_ip2, raddr_ip1, raddr_ip2, raddr_range):
@@ -1166,8 +1319,8 @@ class IPsecUtil:
first tunnel in direction node1->node2.
:param raddr_ip2: Policy selector remote IPv4 start address for the
first tunnel in direction node2->node1.
- :param raddr_range: Mask specifying range of Policy selector Remote IPv4
- addresses. Valid values are from 1 to 32.
+ :param raddr_range: Mask specifying range of Policy selector Remote
+ IPv4 addresses. Valid values are from 1 to 32.
:type nodes: dict
:type interface1: str or int
:type interface2: str or int
diff --git a/resources/libraries/python/Trace.py b/resources/libraries/python/Trace.py
index c88150f72c..1fb645b36a 100644
--- a/resources/libraries/python/Trace.py
+++ b/resources/libraries/python/Trace.py
@@ -33,7 +33,8 @@ class Trace:
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- PapiSocketExecutor.run_cli_cmd(node, f"show trace {maximum}")
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, f"show trace {maximum}")
@staticmethod
def clear_packet_trace_on_all_duts(nodes):
@@ -44,4 +45,5 @@ class Trace:
"""
for node in nodes.values():
if node[u"type"] == NodeType.DUT:
- PapiSocketExecutor.run_cli_cmd(node, u"clear trace")
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"clear trace")
diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py
index d3ab54766c..7dabb4fc61 100644
--- a/resources/libraries/python/VPPUtil.py
+++ b/resources/libraries/python/VPPUtil.py
@@ -19,7 +19,7 @@ from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import exec_cmd_no_error
-from resources.libraries.python.topology import NodeType
+from resources.libraries.python.topology import Topology, SocketType, NodeType
class VPPUtil:
@@ -55,13 +55,20 @@ class VPPUtil:
exec_cmd_no_error(node, command, timeout=30, sudo=True)
@staticmethod
- def restart_vpp_service(node):
+ def restart_vpp_service(node, node_key=None):
"""Restart VPP service on the specified topology node.
:param node: Topology node.
+ :param node_key: Topology node key.
:type node: dict
+ :type node_key: str
"""
DUTSetup.restart_service(node, Constants.VPP_UNIT)
+ if node_key:
+ Topology.add_new_socket(
+ node, SocketType.PAPI, node_key, Constants.SOCKSVR_PATH)
+ Topology.add_new_socket(
+ node, SocketType.STATS, node_key, Constants.SOCKSTAT_PATH)
@staticmethod
def restart_vpp_service_on_all_duts(nodes):
@@ -70,18 +77,23 @@ class VPPUtil:
:param nodes: Topology nodes.
:type nodes: dict
"""
- for node in nodes.values():
+ for node_key, node in nodes.items():
if node[u"type"] == NodeType.DUT:
- VPPUtil.restart_vpp_service(node)
+ VPPUtil.restart_vpp_service(node, node_key)
@staticmethod
- def stop_vpp_service(node):
+ def stop_vpp_service(node, node_key=None):
"""Stop VPP service on the specified topology node.
:param node: Topology node.
+ :param node_key: Topology node key.
:type node: dict
+ :type node_key: str
"""
DUTSetup.stop_service(node, Constants.VPP_UNIT)
+ if node_key:
+ Topology.del_node_socket_id(node, SocketType.PAPI, node_key)
+ Topology.del_node_socket_id(node, SocketType.STATS, node_key)
@staticmethod
def stop_vpp_service_on_all_duts(nodes):
@@ -90,9 +102,9 @@ class VPPUtil:
:param nodes: Topology nodes.
:type nodes: dict
"""
- for node in nodes.values():
+ for node_key, node in nodes.items():
if node[u"type"] == NodeType.DUT:
- VPPUtil.stop_vpp_service(node)
+ VPPUtil.stop_vpp_service(node, node_key)
@staticmethod
def verify_vpp_installed(node):
@@ -227,7 +239,7 @@ class VPPUtil:
for cmd in cmds:
try:
- PapiSocketExecutor.run_cli_cmd(node, cmd)
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(node, cmd)
except AssertionError:
if fail_on_error:
raise
@@ -253,7 +265,8 @@ class VPPUtil:
:param node: Topology node.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, "elog trace api cli barrier")
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"elog trace api cli barrier")
@staticmethod
def vpp_enable_elog_traces_on_all_duts(nodes):
@@ -273,7 +286,8 @@ class VPPUtil:
:param node: Topology node.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, u"show event-logger")
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
+ node, u"show event-logger")
@staticmethod
def show_event_logger_on_all_duts(nodes):
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index 88fbb317c4..02f7cf725d 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -55,8 +55,8 @@ class VppConfigGenerator:
"""Initialize library."""
# VPP Node to apply configuration on
self._node = u""
- # VPP Hostname
- self._hostname = u""
+ # Topology node key
+ self._node_key = u""
# VPP Configuration
self._nodeconfig = dict()
# Serialized VPP Configuration
@@ -70,11 +70,13 @@ class VppConfigGenerator:
# VPP Startup config backup location
self._vpp_startup_conf_backup = None
- def set_node(self, node):
+ def set_node(self, node, node_key=None):
"""Set DUT node.
:param node: Node to store configuration on.
+ :param node_key: Topology node key.
:type node: dict
+ :type node_key: str
:raises RuntimeError: If Node type is not DUT.
"""
if node[u"type"] != NodeType.DUT:
@@ -82,7 +84,7 @@ class VppConfigGenerator:
u"Startup config can only be applied to DUTnode."
)
self._node = node
- self._hostname = Topology.get_node_hostname(node)
+ self._node_key = node_key
def set_vpp_logfile(self, logfile):
"""Set VPP logfile location.
@@ -612,7 +614,7 @@ class VppConfigGenerator:
"""
self.write_config(filename=filename)
- VPPUtil.restart_vpp_service(self._node)
+ VPPUtil.restart_vpp_service(self._node, self._node_key)
if verify_vpp:
VPPUtil.verify_vpp(self._node)
diff --git a/resources/libraries/python/VppCounters.py b/resources/libraries/python/VppCounters.py
index bb8a8d2c28..e6bb51ef4e 100644
--- a/resources/libraries/python/VppCounters.py
+++ b/resources/libraries/python/VppCounters.py
@@ -144,7 +144,7 @@ class VppCounters:
:param node: Node to run command on.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(
+ PapiSocketExecutor.run_cli_cmd_on_all_sockets(
node, u"show memory verbose api-segment stats-segment main-heap"
)
diff --git a/resources/libraries/python/topology.py b/resources/libraries/python/topology.py
index 92ade4a7a3..ed87edfa7e 100644
--- a/resources/libraries/python/topology.py
+++ b/resources/libraries/python/topology.py
@@ -41,12 +41,8 @@ def load_topo_from_yaml():
return safe_load(work_file.read())[u"nodes"]
-
class NodeType:
"""Defines node types used in topology dictionaries."""
- # TODO: Two letter initialisms are well-known, but too short for pylint.
- # Candidates: TG -> TGN, VM -> VNF.
-
# Device Under Test (this node has VPP running on it)
DUT = u"DUT"
# Traffic Generator (this node has traffic generator on it)
@@ -95,8 +91,8 @@ class Topology:
does not rely on the data retrieved from nodes, this allows to call most of
the methods without having filled active topology with internal nodes data.
"""
-
- def add_node_item(self, node, value, path):
+ @staticmethod
+ def add_node_item(node, value, path):
"""Add item to topology node.
:param node: Topology node.
@@ -114,7 +110,7 @@ class Topology:
elif isinstance(node[path[0]], str):
node[path[0]] = dict() if node[path[0]] == u"" \
else {node[path[0]]: u""}
- self.add_node_item(node[path[0]], value, path[1:])
+ Topology.add_node_item(node[path[0]], value, path[1:])
@staticmethod
def add_new_port(node, ptype):
@@ -1065,12 +1061,13 @@ class Topology:
except KeyError:
return None
- def add_new_socket(self, node, socket_type, socket_id, socket_path):
+ @staticmethod
+ def add_new_socket(node, socket_type, socket_id, socket_path):
"""Add socket file of specific SocketType and ID to node.
:param node: Node to add socket on.
:param socket_type: Socket type.
- :param socket_id: Socket id.
+ :param socket_id: Socket id, currently equals to unique node key.
:param socket_path: Socket absolute path.
:type node: dict
:type socket_type: SocketType
@@ -1078,7 +1075,20 @@ class Topology:
:type socket_path: str
"""
path = [u"sockets", socket_type, socket_id]
- self.add_node_item(node, socket_path, path)
+ Topology.add_node_item(node, socket_path, path)
+
+ @staticmethod
+ def del_node_socket_id(node, socket_type, socket_id):
+ """Delete socket of specific SocketType and ID from node.
+
+ :param node: Node to delete socket from.
+ :param socket_type: Socket type.
+ :param socket_id: Socket id, currently equals to unique node key.
+ :type node: dict
+ :type socket_type: SocketType
+ :type socket_id: str
+ """
+ node[u"sockets"][socket_type].pop(socket_id)
@staticmethod
def get_node_sockets(node, socket_type=None):
diff --git a/resources/libraries/robot/crypto/ipsec.robot b/resources/libraries/robot/crypto/ipsec.robot
index 33702a43ae..20529dc2e2 100644
--- a/resources/libraries/robot/crypto/ipsec.robot
+++ b/resources/libraries/robot/crypto/ipsec.robot
@@ -197,3 +197,19 @@
| | ... | interface=${dut1_if1}
| | Vpp Route Add | ${dut2} | ${raddr_ip4} | 8 | gateway=${tg_if2_ip4}
| | ... | interface=${dut2_if2}
+
+| Initialize IPSec in 3-node circular container topology
+| | [Documentation]
+| | ... | Set UP state on VPP interfaces in path on nodes in 3-node circular
+| | ... | topology. Get the interface MAC addresses and setup ARP on all VPP
+| | ... | interfaces. Setup IPv4 addresses with /24 prefix on DUT-TG and
+| | ... | DUT1-DUT2 links. Set routing for encrypted traffic on both DUT nodes
+| | ... | with prefix /8 and next hop of neighbour DUT or TG interface IPv4
+| | ... | address.
+| |
+| | Set interfaces in path up on DUT | DUT1
+| | VPP Interface Set IP Address | ${dut1} | ${dut1_if1}
+| | ... | ${dut1_if1_ip4} | 24
+| | VPP Add IP Neighbor | ${dut1} | ${dut1_if1} | ${tg_if1_ip4} | ${tg_if1_mac}
+| | Vpp Route Add | ${dut1} | ${laddr_ip4} | 8 | gateway=${tg_if1_ip4}
+| | ... | interface=${dut1_if1}
diff --git a/resources/libraries/robot/shared/container.robot b/resources/libraries/robot/shared/container.robot
index 0b5cd3ee2b..4d82cf7086 100644
--- a/resources/libraries/robot/shared/container.robot
+++ b/resources/libraries/robot/shared/container.robot
@@ -214,9 +214,12 @@
| | ... | nodes=${nf_nodes}
| | ... | ELSE IF | '${container_chain_topology}' == 'chain_ipsec'
| | ... | ${group}.Configure VPP In All Containers | ${container_chain_topology}
+| | ... | tg_if1_ip4=${tg_if1_ip4} | tg_if1_mac=${tg_if1_mac}
| | ... | tg_if2_ip4=${tg_if2_ip4} | tg_if2_mac=${tg_if2_mac}
+| | ... | dut1_if1_ip4=${dut1_if1_ip4} | dut1_if2_ip4=${dut1_if2_ip4}
| | ... | dut2_if1_ip4=${dut2_if1_ip4} | dut2_if2_ip4=${dut2_if2_ip4}
-| | ... | raddr_ip4=${raddr_ip4} | nodes=${nodes} | nf_nodes=${nf_nodes}
+| | ... | raddr_ip4=${raddr_ip4} | laddr_ip4=${laddr_ip4}
+| | ... | nodes=${nodes} | nf_nodes=${nf_nodes}
| | ... | ELSE IF | '${container_chain_topology}' == 'pipeline_ip4'
| | ... | ${group}.Configure VPP In All Containers | ${container_chain_topology}
| | ... | tg_if1_mac=${tg_if1_mac} | tg_if2_mac=${tg_if2_mac}
@@ -239,6 +242,13 @@
| |
| | Run Keyword | ${group}.Destroy all containers
+| Verify VPP in all '${group}' containers
+| | [Documentation] | Verify that VPP is running inside containers in specific
+| | ... | container group on all DUT nodes. Does 120 retries with one second
+| | ... | between retries.
+| |
+| | Run Keyword | ${group}.Verify VPP in all containers
+
| Start containers for test
| | [Documentation]
| | ... | Start containers for test.
@@ -275,3 +285,72 @@
| | Start VPP in all '${container_group}' containers
| | Append To List | ${container_groups} | ${container_group}
| | Save VPP PIDs
+
+# TODO: Remove the vswitch startup.conf and read the host configuration instead.
+| Start vswitch in container on DUT
+| | [Documentation]
+| | ... | Configure and start vswitch in container.
+| |
+| | ... | *Arguments:*
+| | ... | - dut: DUT node on which to install vswitch. Type: string
+| | ... | - phy_cores - Number of physical cores to use. Type: integer
+| | ... | - rx_queues: Number of RX queues. Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Start vswitch in container on DUT \| DUT1 \| 1 \| 1 \|
+| |
+| | [Arguments] | ${dut} | ${phy_cores} | ${rx_queues}=${None}
+| |
+| | Set Test Variable | ${container_group} | VSWITCH
+| | Import Library | resources.libraries.python.ContainerUtils.ContainerManager
+| | ... | engine=${container_engine} | WITH NAME | VSWITCH
+| | Construct container on DUT | ${dut}
+| | ... | nf_chains=${1} | nf_nodes=${1} | nf_chain=${1}
+| | ... | nf_node=${1} | auto_scale=${False} | pinning=${False}
+| | Acquire all '${container_group}' containers
+| | Create all '${container_group}' containers
+| | ${cpu_count_int} | Convert to Integer | ${phy_cores}
+| | ${thr_count_int} | Convert to Integer | ${phy_cores}
+| | ${smt_used}= | Is SMT enabled | ${nodes['${dut}']['cpuinfo']}
+| | ${thr_count_int}= | Run keyword if | ${smt_used}
+| | ... | Evaluate | int(${cpu_count_int}*2)
+| | ... | ELSE | Set variable | ${thr_count_int}
+| | ${rxq_count_int}= | Run keyword if | ${rx_queues}
+| | ... | Set variable | ${rx_queues}
+| | ... | ELSE | Evaluate | int(${thr_count_int}/2)
+| | ${rxq_count_int}= | Run keyword if | ${rxq_count_int} == 0
+| | ... | Set variable | ${1}
+| | ... | ELSE | Set variable | ${rxq_count_int}
+| | VSWITCH.Configure VPP in all containers | chain_vswitch
+| | ... | rxq=${rxq_count_int} | n_instances=${n_instances}
+| | ... | buffers=${215040} | node=${dut}
+| | ... | dut1_if1=${dut1_if1} | dut1_if2=${dut1_if2}
+| | ... | dut2_if1=${dut2_if1} | dut2_if2=${dut2_if2}
+| | ... | dut2_if2_ip4=${dut2_if2_ip4}
+| | ... | tg_if1_ip4=${tg_if1_ip4} | tg_if1_mac=${tg_if1_mac}
+| | ... | tg_if2_ip4=${tg_if2_ip4} | tg_if2_mac=${tg_if2_mac}
+| | ... | nodes=${nodes}
+| | Start VPP in all '${container_group}' containers
+| | Verify VPP in all '${container_group}' containers
+
+| Start vswitch in container
+| | [Documentation]
+| | ... | Configure and start vswitch in container on all DUTs.
+| |
+| | ... | *Arguments:*
+| | ... | - phy_cores - Number of physical cores to use. Type: integer
+| | ... | - rx_queues: Number of RX queues. Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Start vswitch in container \| 1 \| 1 \|
+| |
+| | [Arguments] | ${phy_cores} | ${rx_queues}=${None}
+| |
+| | FOR | ${dut} | IN | @{duts}
+| | | Run Keyword | Start vswitch in container on DUT
+| | | ... | ${dut} | ${phy_cores} | ${rx_queues}
+| | END
+| | Append To List | ${container_groups} | ${container_group}
+| | Save VPP PIDs
diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot
index ab8a409219..3b8f2804c1 100644
--- a/resources/libraries/robot/shared/default.robot
+++ b/resources/libraries/robot/shared/default.robot
@@ -119,7 +119,7 @@
| | FOR | ${dut} | IN | @{duts}
| | | Import Library | resources.libraries.python.VppConfigGenerator
| | | ... | WITH NAME | ${dut}
-| | | Run keyword | ${dut}.Set Node | ${nodes['${dut}']}
+| | | Run keyword | ${dut}.Set Node | ${nodes['${dut}']} | node_key=${dut}
| | | Run keyword | ${dut}.Add Unix Log
| | | Run keyword | ${dut}.Add Unix CLI Listen
| | | Run keyword | ${dut}.Add Unix Nodaemon
@@ -315,14 +315,30 @@
| |
| | FOR | ${dut} | IN | @{duts}
| | | Run keyword | ${dut}.Apply Config
-| | | Add New Socket | ${nodes['${dut}']} | PAPI | ${dut} | ${SOCKSVR_PATH}
-| | | Add New Socket | ${nodes['${dut}']} | STATS | ${dut} | ${SOCKSTAT_PATH}
| | END
| | Save VPP PIDs
| | Enable Coredump Limit VPP on All DUTs | ${nodes}
| | Update All Interface Data On All Nodes | ${nodes} | skip_tg=${True}
| | Run keyword If | ${with_trace} | VPP Enable Traces On All Duts | ${nodes}
+| Apply startup configuration on VPP DUT
+| | [Documentation] | Write VPP startup configuration and restart VPP DUT.
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT on which to apply the configuration. Type: string
+| | ... | - with_trace - Enable packet trace after VPP restart Type: boolean
+| |
+| | [Arguments] | ${dut} | ${with_trace}=${False}
+| |
+| | Run keyword | ${dut}.Apply Config
+| | Save VPP PIDs on DUT | ${dut}
+| | Enable Coredump Limit VPP on DUT | ${nodes['${dut}']}
+| | ${dutnode}= | Copy Dictionary | ${nodes}
+| | Keep In Dictionary | ${dutnode} | ${dut}
+| | Update All Interface Data On All Nodes | ${dutnode} | skip_tg=${True}
+| | Run keyword If | ${with_trace} | VPP Enable Traces On Dut
+| | ... | ${nodes['${dut}']}
+
| Save VPP PIDs
| | [Documentation] | Get PIDs of VPP processes from all DUTs in topology and\
| | ... | set it as a test variable. The PIDs are stored as dictionary items\
@@ -337,6 +353,24 @@
| | END
| | Set Test Variable | ${setup_vpp_pids}
+| Save VPP PIDs on DUT
+| | [Documentation] | Get PID of VPP processes from DUT and\
+| | ... | set it as a test variable. The PID is stored as dictionary item\
+| | ... | where the key is the host and the value is the PID.
+| |
+| | [Arguments] | ${dut}
+| |
+| | ${vpp_pids}= | Get VPP PID | ${nodes['${dut}']}
+| | Run Keyword If | ${vpp_pids} is None | FAIL
+| | ... | No VPP PID found on node ${nodes['${dut}']['host']
+| | ${status} | ${message}= | Run Keyword And Ignore Error
+| | ... | Variable Should Exist | ${setup_vpp_pids}
+| | ${setup_vpp_pids}= | Run Keyword If | '${status}' == 'FAIL'
+| | ... | Create Dictionary | ${nodes['${dut}']['host']}=${vpp_pids}
+| | ... | ELSE | Set To Dictionary | ${setup_vpp_pids}
+| | ... | ${nodes['${dut}']['host']}=${vpp_pids}
+| | Set Test Variable | ${setup_vpp_pids}
+
| Verify VPP PID in Teardown
| | [Documentation] | Check if the VPP PIDs on all DUTs are the same at the end\
| | ... | of test as they were at the begining. If they are not, only a message\
diff --git a/resources/libraries/robot/shared/interfaces.robot b/resources/libraries/robot/shared/interfaces.robot
index 3d2b70d065..128c3cb568 100644
--- a/resources/libraries/robot/shared/interfaces.robot
+++ b/resources/libraries/robot/shared/interfaces.robot
@@ -21,47 +21,61 @@
| | ... | *Set UP state on VPP interfaces in path on all DUT nodes and set
| | ... | maximal MTU.*
| |
+| | FOR | ${dut} | IN | @{duts}
+| | | Set interfaces in path up on DUT | ${dut}
+| | END
+| | All VPP Interfaces Ready Wait | ${nodes} | retries=${300}
+
+| Set interfaces in path up on DUT
+| | [Documentation]
+| | ... | *Set UP state on VPP interfaces in path on specified DUT node and
+| | ... | set maximal MTU.*
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT node on which to set the interfaces up.
+| | ... | Type: string
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Set interfaces in path up on DUT \| DUT1 \|
+| |
+| | [Arguments] | ${dut}
# TODO: Rework KW to set all interfaces in path UP and set MTU (including
# software interfaces. Run KW at the start phase of VPP setup to split
# from other "functional" configuration. This will allow modularity of this
# library
-| | FOR | ${dut} | IN | @{duts}
-| | | ${if1_status} | ${value}= | Run Keyword And Ignore Error
-| | | ... | Variable Should Exist | ${${dut}_if1}
-| | | Run Keyword If | '${if1_status}' == 'PASS'
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1} | up
-| | | ... | ELSE
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1_1} | up
-| | | Run Keyword Unless | '${if1_status}' == 'PASS'
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1_2} | up
-| | | ${if2_status} | ${value}= | Run Keyword And Ignore Error
-| | | ... | Variable Should Exist | ${${dut}_if2}
-| | | Run Keyword If | '${if2_status}' == 'PASS'
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2} | up
-| | | ... | ELSE
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2_1} | up
-| | | Run Keyword Unless | '${if2_status}' == 'PASS'
-| | | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2_2} | up
-| | END
-| | FOR | ${dut} | IN | @{duts}
-| | | ${if1_status} | ${value}= | Run Keyword And Ignore Error
-| | | ... | Variable Should Exist | ${${dut}_if1}
-| | | Run Keyword If | '${if1_status}' == 'PASS'
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1}
-| | | ... | ELSE
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1_1}
-| | | Run Keyword Unless | '${if1_status}' == 'PASS'
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1_2}
-| | | ${if2_status} | ${value}= | Run Keyword And Ignore Error
-| | | ... | Variable Should Exist | ${${dut}_if2}
-| | | Run Keyword If | '${if2_status}' == 'PASS'
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2}
-| | | ... | ELSE
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2_1}
-| | | Run Keyword Unless | '${if2_status}' == 'PASS'
-| | | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2_2}
-| | END
-| | All VPP Interfaces Ready Wait | ${nodes} | retries=${300}
+| | ${if1_status} | ${value}= | Run Keyword And Ignore Error
+| | ... | Variable Should Exist | ${${dut}_if1}
+| | Run Keyword If | '${if1_status}' == 'PASS'
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1} | up
+| | ... | ELSE
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1_1} | up
+| | Run Keyword Unless | '${if1_status}' == 'PASS'
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if1_2} | up
+| | ${if2_status} | ${value}= | Run Keyword And Ignore Error
+| | ... | Variable Should Exist | ${${dut}_if2}
+| | Run Keyword If | '${if2_status}' == 'PASS'
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2} | up
+| | ... | ELSE
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2_1} | up
+| | Run Keyword Unless | '${if2_status}' == 'PASS'
+| | ... | Set Interface State | ${nodes['${dut}']} | ${${dut}_if2_2} | up
+| | ${if1_status} | ${value}= | Run Keyword And Ignore Error
+| | ... | Variable Should Exist | ${${dut}_if1}
+| | Run Keyword If | '${if1_status}' == 'PASS'
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1}
+| | ... | ELSE
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1_1}
+| | Run Keyword Unless | '${if1_status}' == 'PASS'
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if1_2}
+| | ${if2_status} | ${value}= | Run Keyword And Ignore Error
+| | ... | Variable Should Exist | ${${dut}_if2}
+| | Run Keyword If | '${if2_status}' == 'PASS'
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2}
+| | ... | ELSE
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2_1}
+| | Run Keyword Unless | '${if2_status}' == 'PASS'
+| | ... | VPP Set Interface MTU | ${nodes['${dut}']} | ${${dut}_if2_2}
| Set single interfaces in path up
| | [Documentation]
diff --git a/resources/libraries/robot/shared/test_teardown.robot b/resources/libraries/robot/shared/test_teardown.robot
index 1e7d011fca..96d3cd825b 100644
--- a/resources/libraries/robot/shared/test_teardown.robot
+++ b/resources/libraries/robot/shared/test_teardown.robot
@@ -33,7 +33,6 @@
| |
| | Remove All Added Ports On All DUTs From Topology | ${nodes}
| | Show PAPI History On All DUTs | ${nodes}
-| | Show Log On All DUTs | ${nodes}
| | Run Keyword If Test Failed
| | ... | Get Core Files on All Nodes | ${nodes}
| | Run Keyword If Test Failed