aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/asf/asfframework.py4
-rw-r--r--test/test_vm_vpp_interfaces.py359
-rw-r--r--test/vm_test_config.py10
-rw-r--r--test/vpp_qemu_utils.py63
-rw-r--r--test/vpp_running.py8
5 files changed, 305 insertions, 139 deletions
diff --git a/test/asf/asfframework.py b/test/asf/asfframework.py
index 5bcbfccd269..24880044cec 100644
--- a/test/asf/asfframework.py
+++ b/test/asf/asfframework.py
@@ -583,6 +583,10 @@ class VppAsfTestCase(CPUInterface, unittest.TestCase):
return "%s/api.sock" % cls.tempdir
@classmethod
+ def get_memif_sock_path(cls):
+ return "%s/memif.sock" % cls.tempdir
+
+ @classmethod
def get_api_segment_prefix(cls):
return os.path.basename(cls.tempdir) # Only used for VAPI
diff --git a/test/test_vm_vpp_interfaces.py b/test/test_vm_vpp_interfaces.py
index 917b95006c4..0c90325e35a 100644
--- a/test/test_vm_vpp_interfaces.py
+++ b/test/test_vm_vpp_interfaces.py
@@ -9,6 +9,7 @@ from vpp_qemu_utils import (
set_interface_mtu,
disable_interface_gso,
add_namespace_route,
+ libmemif_test_app,
)
from vpp_iperf import start_iperf, stop_iperf
from framework import VppTestCase
@@ -21,7 +22,7 @@ from vm_test_config import test_config
#
# Tests for:
-# - tapv2, tunv2 & af_packet_v2 & v3 interfaces.
+# - tapv2, tunv2, af_packet_v2/v3 & memif interfaces.
# - reads test config from the file vm_test_config.py
# - Uses iPerf to send TCP/IP streams to VPP
# - VPP ingress interface runs the iperf client
@@ -79,7 +80,11 @@ def create_test(test_name, test, ip_version, mtu):
vpp_interfaces=self.vpp_interfaces,
linux_interfaces=self.linux_interfaces,
)
- # Start the Iperf server in dual stack mode & run iperf client
+ if "memif" in self.if_types:
+ self.logger.debug("Starting libmemif test_app for memif test")
+ self.memif_process = libmemif_test_app(
+ memif_sock_path=self.get_memif_sock_path(), logger=self.logger
+ )
if result is True:
start_iperf(ip_version=6, server_only=True, logger=self.logger)
self.assertTrue(
@@ -156,8 +161,11 @@ class TestVPPInterfacesQemu(VppTestCase):
3. Cross-Connect interfaces in VPP using L2 or L3.
"""
super(TestVPPInterfacesQemu, self).setUp()
- client_if_type = test["client_if_type"]
- server_if_type = test["server_if_type"]
+ # Need to support multiple interface types as the memif interface
+ # in VPP is connected to the iPerf client & server by x-connecting
+ # to a tap interface in their respective namespaces.
+ client_if_types = test["client_if_type"].split(",")
+ server_if_types = test["server_if_type"].split(",")
client_if_version = test["client_if_version"]
server_if_version = test["server_if_version"]
x_connect_mode = test["x_connect_mode"]
@@ -188,6 +196,11 @@ class TestVPPInterfacesQemu(VppTestCase):
)
vpp_server_nexthop = str(ip_interface(vpp_server_prefix).ip)
create_namespace([client_namespace, server_namespace])
+ # IPerf client & server ingress/egress interface indexes in VPP
+ self.tap_interfaces = []
+ self.memif_interfaces = []
+ self.ingress_if_idxes = []
+ self.egress_if_idxes = []
self.vpp_interfaces = []
self.linux_interfaces = []
enable_client_if_gso = test.get("client_if_gso", 0)
@@ -197,155 +210,197 @@ class TestVPPInterfacesQemu(VppTestCase):
enable_client_if_checksum_offload = test.get("client_if_checksum_offload", 0)
enable_server_if_checksum_offload = test.get("server_if_checksum_offload", 0)
## Handle client interface types
- if client_if_type == "af_packet":
- create_host_interface(
- af_packet_config["iprf_client_interface_on_linux"],
- af_packet_config["iprf_client_interface_on_vpp"],
- client_namespace,
- layer2["client_ip4_prefix"]
- if x_connect_mode == "L2"
- else layer3["client_ip4_prefix"],
- layer2["client_ip6_prefix"]
- if x_connect_mode == "L2"
- else layer3["client_ip6_prefix"],
- )
- self.ingress_if_idx = self.create_af_packet(
- version=client_if_version,
- host_if_name=af_packet_config["iprf_client_interface_on_vpp"],
- enable_gso=enable_client_if_gso,
- )
- self.vpp_interfaces.append(self.ingress_if_idx)
- self.linux_interfaces.append(
- ["", af_packet_config["iprf_client_interface_on_vpp"]]
- )
- self.linux_interfaces.append(
- [client_namespace, af_packet_config["iprf_client_interface_on_linux"]]
- )
- if enable_client_if_gso == 0:
- disable_interface_gso(
- "", af_packet_config["iprf_client_interface_on_vpp"]
+ for client_if_type in client_if_types:
+ if client_if_type == "af_packet":
+ create_host_interface(
+ af_packet_config["iprf_client_interface_on_linux"],
+ af_packet_config["iprf_client_interface_on_vpp"],
+ client_namespace,
+ layer2["client_ip4_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["client_ip4_prefix"],
+ layer2["client_ip6_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["client_ip6_prefix"],
)
- disable_interface_gso(
- client_namespace, af_packet_config["iprf_client_interface_on_linux"]
+ self.ingress_if_idx = self.create_af_packet(
+ version=client_if_version,
+ host_if_name=af_packet_config["iprf_client_interface_on_vpp"],
+ enable_gso=enable_client_if_gso,
)
- elif client_if_type == "tap" or client_if_type == "tun":
- self.ingress_if_idx = self.create_tap_tun(
- id=101,
- host_namespace=client_namespace,
- ip_version=ip_version,
- host_ip4_prefix=layer2["client_ip4_prefix"]
- if x_connect_mode == "L2"
- else layer3["client_ip4_prefix"],
- host_ip6_prefix=layer2["client_ip6_prefix"]
- if x_connect_mode == "L2"
- else layer3["client_ip6_prefix"],
- host_ip4_gw=vpp_client_nexthop
- if x_connect_mode == "L3" and ip_version == 4
- else None,
- host_ip6_gw=vpp_client_nexthop
- if x_connect_mode == "L3" and ip_version == 6
- else None,
- int_type=client_if_type,
- host_if_name=f"{client_if_type}0",
- enable_gso=enable_client_if_gso,
- enable_gro=enable_client_if_gro,
- enable_checksum_offload=enable_client_if_checksum_offload,
- )
- self.vpp_interfaces.append(self.ingress_if_idx)
- self.linux_interfaces.append([client_namespace, f"{client_if_type}0"])
- # Seeing TCP timeouts if tx=on & rx=on Linux tap & tun interfaces
- disable_interface_gso(client_namespace, f"{client_if_type}0")
- else:
- print(
- f"Unsupported client interface type: {client_if_type} "
- f"for test - ID={test['id']}"
- )
- sys.exit(1)
-
- if server_if_type == "af_packet":
- create_host_interface(
- af_packet_config["iprf_server_interface_on_linux"],
- af_packet_config["iprf_server_interface_on_vpp"],
- server_namespace,
- server_ip4_prefix,
- server_ip6_prefix,
- )
- self.egress_if_idx = self.create_af_packet(
- version=server_if_version,
- host_if_name=af_packet_config["iprf_server_interface_on_vpp"],
- enable_gso=enable_server_if_gso,
- )
- self.vpp_interfaces.append(self.egress_if_idx)
- self.linux_interfaces.append(
- ["", af_packet_config["iprf_server_interface_on_vpp"]]
- )
- self.linux_interfaces.append(
- [server_namespace, af_packet_config["iprf_server_interface_on_linux"]]
- )
- if enable_server_if_gso == 0:
- disable_interface_gso(
- "", af_packet_config["iprf_server_interface_on_vpp"]
+ self.ingress_if_idxes.append(self.ingress_if_idx)
+ self.vpp_interfaces.append(self.ingress_if_idx)
+ self.linux_interfaces.append(
+ ["", af_packet_config["iprf_client_interface_on_vpp"]]
)
- disable_interface_gso(
- server_namespace, af_packet_config["iprf_server_interface_on_linux"]
+ self.linux_interfaces.append(
+ [
+ client_namespace,
+ af_packet_config["iprf_client_interface_on_linux"],
+ ]
)
- elif server_if_type == "tap" or server_if_type == "tun":
- self.egress_if_idx = self.create_tap_tun(
- id=102,
- host_namespace=server_namespace,
- ip_version=ip_version,
- host_ip4_prefix=layer2["server_ip4_prefix"]
- if x_connect_mode == "L2"
- else layer3["server_ip4_prefix"],
- host_ip6_prefix=layer2["server_ip6_prefix"]
- if x_connect_mode == "L2"
- else layer3["server_ip6_prefix"],
- int_type=server_if_type,
- host_if_name=f"{server_if_type}0",
- enable_gso=enable_server_if_gso,
- enable_gro=enable_server_if_gro,
- enable_checksum_offload=enable_server_if_checksum_offload,
- )
- self.vpp_interfaces.append(self.egress_if_idx)
- self.linux_interfaces.append([server_namespace, f"{server_if_type}0"])
- # Seeing TCP timeouts if tx=on & rx=on Linux tap & tun interfaces
- disable_interface_gso(server_namespace, f"{server_if_type}0")
- else:
- print(
- f"Unsupported server interface type: {server_if_type} "
- f"for test - ID={test['id']}"
- )
- sys.exit(1)
-
- if x_connect_mode == "L2":
- self.l2_connect_interfaces(1, self.ingress_if_idx, self.egress_if_idx)
- elif x_connect_mode == "L3":
- # L3 connect client & server side
- vrf_id = layer3["ip4_vrf"] if ip_version == 4 else layer3["ip6_vrf"]
- self.l3_connect_interfaces(
- ip_version,
- vrf_id,
- (self.ingress_if_idx, vpp_client_prefix),
- (self.egress_if_idx, vpp_server_prefix),
- )
- # Setup namespace routing
- if ip_version == 4:
- add_namespace_route(client_namespace, "0.0.0.0/0", vpp_client_nexthop)
- add_namespace_route(server_namespace, "0.0.0.0/0", vpp_server_nexthop)
+ if enable_client_if_gso == 0:
+ disable_interface_gso(
+ "", af_packet_config["iprf_client_interface_on_vpp"]
+ )
+ disable_interface_gso(
+ client_namespace,
+ af_packet_config["iprf_client_interface_on_linux"],
+ )
+ elif client_if_type == "tap" or client_if_type == "tun":
+ self.ingress_if_idx = self.create_tap_tun(
+ id=101,
+ host_namespace=client_namespace,
+ ip_version=ip_version,
+ host_ip4_prefix=layer2["client_ip4_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["client_ip4_prefix"],
+ host_ip6_prefix=layer2["client_ip6_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["client_ip6_prefix"],
+ host_ip4_gw=vpp_client_nexthop
+ if x_connect_mode == "L3" and ip_version == 4
+ else None,
+ host_ip6_gw=vpp_client_nexthop
+ if x_connect_mode == "L3" and ip_version == 6
+ else None,
+ int_type=client_if_type,
+ host_if_name=f"{client_if_type}0",
+ enable_gso=enable_client_if_gso,
+ enable_gro=enable_client_if_gro,
+ enable_checksum_offload=enable_client_if_checksum_offload,
+ )
+ self.tap_interfaces.append(self.ingress_if_idx)
+ self.ingress_if_idxes.append(self.ingress_if_idx)
+ self.vpp_interfaces.append(self.ingress_if_idx)
+ self.linux_interfaces.append([client_namespace, f"{client_if_type}0"])
+ # Seeing TCP timeouts if tx=on & rx=on Linux tap & tun interfaces
+ disable_interface_gso(client_namespace, f"{client_if_type}0")
+ elif client_if_type == "memif":
+ self.ingress_if_idx = self.create_memif(
+ memif_id=0, mode=0 if x_connect_mode == "L2" else 1
+ )
+ self.memif_interfaces.append(self.ingress_if_idx)
+ self.ingress_if_idxes.append(self.ingress_if_idx)
+ self.vpp_interfaces.append(self.ingress_if_idx)
+ else:
+ print(
+ f"Unsupported client interface type: {client_if_type} "
+ f"for test - ID={test['id']}"
+ )
+ sys.exit(1)
+ for server_if_type in server_if_types:
+ if server_if_type == "af_packet":
+ create_host_interface(
+ af_packet_config["iprf_server_interface_on_linux"],
+ af_packet_config["iprf_server_interface_on_vpp"],
+ server_namespace,
+ server_ip4_prefix,
+ server_ip6_prefix,
+ )
+ self.egress_if_idx = self.create_af_packet(
+ version=server_if_version,
+ host_if_name=af_packet_config["iprf_server_interface_on_vpp"],
+ enable_gso=enable_server_if_gso,
+ )
+ self.egress_if_idxes.append(self.egress_if_idx)
+ self.vpp_interfaces.append(self.egress_if_idx)
+ self.linux_interfaces.append(
+ ["", af_packet_config["iprf_server_interface_on_vpp"]]
+ )
+ self.linux_interfaces.append(
+ [
+ server_namespace,
+ af_packet_config["iprf_server_interface_on_linux"],
+ ]
+ )
+ if enable_server_if_gso == 0:
+ disable_interface_gso(
+ "", af_packet_config["iprf_server_interface_on_vpp"]
+ )
+ disable_interface_gso(
+ server_namespace,
+ af_packet_config["iprf_server_interface_on_linux"],
+ )
+ elif server_if_type == "tap" or server_if_type == "tun":
+ self.egress_if_idx = self.create_tap_tun(
+ id=102,
+ host_namespace=server_namespace,
+ ip_version=ip_version,
+ host_ip4_prefix=layer2["server_ip4_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["server_ip4_prefix"],
+ host_ip6_prefix=layer2["server_ip6_prefix"]
+ if x_connect_mode == "L2"
+ else layer3["server_ip6_prefix"],
+ int_type=server_if_type,
+ host_if_name=f"{server_if_type}0",
+ enable_gso=enable_server_if_gso,
+ enable_gro=enable_server_if_gro,
+ enable_checksum_offload=enable_server_if_checksum_offload,
+ )
+ self.tap_interfaces.append(self.egress_if_idx)
+ self.egress_if_idxes.append(self.egress_if_idx)
+ self.vpp_interfaces.append(self.egress_if_idx)
+ self.linux_interfaces.append([server_namespace, f"{server_if_type}0"])
+ # Seeing TCP timeouts if tx=on & rx=on Linux tap & tun interfaces
+ disable_interface_gso(server_namespace, f"{server_if_type}0")
+ elif server_if_type == "memif":
+ self.egress_if_idx = self.create_memif(
+ memif_id=1, mode=0 if x_connect_mode == "L2" else 1
+ )
+ self.memif_interfaces.append(self.egress_if_idx)
+ self.egress_if_idxes.append(self.egress_if_idx)
+ self.vpp_interfaces.append(self.egress_if_idx)
else:
- add_namespace_route(client_namespace, "::/0", vpp_client_nexthop)
- add_namespace_route(server_namespace, "::/0", vpp_server_nexthop)
+ print(
+ f"Unsupported server interface type: {server_if_type} "
+ f"for test - ID={test['id']}"
+ )
+ sys.exit(1)
+ self.if_types = set(client_if_types).union(set(server_if_types))
+ # for memif testing: tapv2, memif & libmemif_app are connected
+ if "memif" not in self.if_types:
+ if x_connect_mode == "L2":
+ self.l2_connect_interfaces(1, self.ingress_if_idx, self.egress_if_idx)
+ elif x_connect_mode == "L3":
+ # L3 connect client & server side
+ vrf_id = layer3["ip4_vrf"] if ip_version == 4 else layer3["ip6_vrf"]
+ self.l3_connect_interfaces(
+ ip_version,
+ vrf_id,
+ (self.ingress_if_idx, vpp_client_prefix),
+ (self.egress_if_idx, vpp_server_prefix),
+ )
+ # Setup namespace routing
+ if ip_version == 4:
+ add_namespace_route(
+ client_namespace, "0.0.0.0/0", vpp_client_nexthop
+ )
+ add_namespace_route(
+ server_namespace, "0.0.0.0/0", vpp_server_nexthop
+ )
+ else:
+ add_namespace_route(client_namespace, "::/0", vpp_client_nexthop)
+ add_namespace_route(server_namespace, "::/0", vpp_server_nexthop)
+ else:
+ # connect: ingress tap & memif & egress tap and memif interfaces
+ if x_connect_mode == "L2":
+ self.l2_connect_interfaces(1, *self.ingress_if_idxes)
+ self.l2_connect_interfaces(2, *self.egress_if_idxes)
# Wait for Linux IPv6 stack to become ready
if ip_version == 6:
time.sleep(2)
def tearDown(self):
try:
- self.vapi.tap_delete_v2(self.ingress_if_idx)
+ for interface_if_idx in self.tap_interfaces:
+ self.vapi.tap_delete_v2(sw_if_index=interface_if_idx)
except Exception:
pass
try:
- self.vapi.tap_delete_v2(self.egress_if_idx)
+ for interface_if_idx in self.memif_interfaces:
+ self.vapi.memif_delete(sw_if_index=interface_if_idx)
except Exception:
pass
try:
@@ -384,6 +439,11 @@ class TestVPPInterfacesQemu(VppTestCase):
except Exception:
pass
try:
+ self.vapi.bridge_domain_add_del_v2(bd_id=1, is_add=0)
+ self.vapi.bridge_domain_add_del_v2(bd_id=2, is_add=0)
+ except Exception:
+ pass
+ try:
delete_namespace(
[
client_namespace,
@@ -396,6 +456,12 @@ class TestVPPInterfacesQemu(VppTestCase):
stop_iperf()
except Exception:
pass
+ try:
+ if self.memif_process:
+ self.memif_process.terminate()
+ self.memif_process.join()
+ except Exception:
+ pass
def create_af_packet(self, version, host_if_name, enable_gso=0):
"""Create an af_packetv3 interface in VPP.
@@ -535,6 +601,21 @@ class TestVPPInterfacesQemu(VppTestCase):
self.vapi.sw_interface_set_flags(sw_if_index=sw_if_index, flags=1)
return sw_if_index
+ def create_memif(self, memif_id, mode):
+ """Create memif interface in VPP.
+
+ Parameters:
+ memif_id: A unique ID for the memif interface
+ mode: 0 = ethernet, 1 = ip, 2 = punt/inject
+ """
+ # create memif interface with role=0 (i.e. master)
+ result = self.vapi.memif_create_v2(
+ role=0, mode=mode, id=memif_id, buffer_size=9216
+ )
+ sw_if_index = result.sw_if_index
+ self.vapi.sw_interface_set_flags(sw_if_index=sw_if_index, flags=1)
+ return sw_if_index
+
def dump_bridge_domain_details(self, bd_id):
return self.vapi.bridge_domain_dump(bd_id=bd_id)
diff --git a/test/vm_test_config.py b/test/vm_test_config.py
index 7c8aa4caeef..60db4d1f18b 100644
--- a/test/vm_test_config.py
+++ b/test/vm_test_config.py
@@ -318,5 +318,15 @@ test_config = {
"server_if_checksum_offload": 0,
"x_connect_mode": "L3",
},
+ {
+ "id": 27,
+ "client_if_type": "tap,memif",
+ "client_if_version": 2,
+ "client_if_checksum_offload": 0,
+ "server_if_type": "tap,memif",
+ "server_if_version": 2,
+ "server_if_checksum_offload": 0,
+ "x_connect_mode": "L2",
+ },
],
}
diff --git a/test/vpp_qemu_utils.py b/test/vpp_qemu_utils.py
index 3a8fdc8daf5..03b8632b15f 100644
--- a/test/vpp_qemu_utils.py
+++ b/test/vpp_qemu_utils.py
@@ -4,6 +4,8 @@
import subprocess
import sys
+import os
+import multiprocessing as mp
def can_create_namespaces(namespace="vpp_chk_4212"):
@@ -243,3 +245,64 @@ def list_namespace(ns):
subprocess.run(["ip", "netns", "exec", ns, "ip", "addr"])
except subprocess.CalledProcessError as e:
raise Exception("Error listing namespace IP:", e.output)
+
+
+def libmemif_test_app(memif_sock_path, logger):
+ """Build & run the libmemif test_app for memif interface testing."""
+ test_dir = os.path.dirname(os.path.realpath(__file__))
+ ws_root = os.path.dirname(test_dir)
+ libmemif_app = os.path.join(
+ ws_root, "extras", "libmemif", "build", "examples", "test_app"
+ )
+
+ def build_libmemif_app():
+ if not os.path.exists(libmemif_app):
+ print(f"Building app:{libmemif_app} for memif interface testing")
+ libmemif_app_dir = os.path.join(ws_root, "extras", "libmemif", "build")
+ if not os.path.exists(libmemif_app_dir):
+ os.makedirs(libmemif_app_dir)
+ os.chdir(libmemif_app_dir)
+ try:
+ p = subprocess.run(["cmake", ".."], capture_output=True)
+ logger.debug(p.stdout)
+ if p.returncode != 0:
+ print(f"libmemif app:{libmemif_app} cmake error:{p.stderr}")
+ sys.exit(1)
+ p = subprocess.run(["make"], capture_output=True)
+ logger.debug(p.stdout)
+ if p.returncode != 0:
+ print(f"Error building libmemif app:{p.stderr}")
+ sys.exit(1)
+ except subprocess.CalledProcessError as e:
+ raise Exception("Error building libmemif_test_app:", e.output)
+
+ def start_libmemif_app():
+ """Restart once if the initial run fails."""
+ max_tries = 2
+ run = 0
+ if not os.path.exists(libmemif_app):
+ raise Exception(
+ f"Error could not locate the libmemif test app:{libmemif_app}"
+ )
+ args = [libmemif_app, "-b", "9216", "-s", memif_sock_path]
+ while run < max_tries:
+ try:
+ process = subprocess.run(args, capture_output=True)
+ logger.debug(process.stdout)
+ if process.returncode != 0:
+ msg = f"Error starting libmemif app:{libmemif_app}"
+ logger.error(msg)
+ raise Exception(msg)
+ except Exception:
+ msg = f"re-starting libmemif app:{libmemif_app}"
+ logger.error(msg)
+ continue
+ else:
+ break
+ finally:
+ run += 1
+
+ build_libmemif_app()
+ process = mp.Process(target=start_libmemif_app)
+ process.start()
+ return process
diff --git a/test/vpp_running.py b/test/vpp_running.py
index f31a1327b4a..43377b65c6a 100644
--- a/test/vpp_running.py
+++ b/test/vpp_running.py
@@ -26,6 +26,7 @@ def use_running(cls):
RunningVPP.get_set_vpp_sock_files()
cls.get_stats_sock_path = RunningVPP.get_stats_sock_path
cls.get_api_sock_path = RunningVPP.get_api_sock_path
+ cls.get_memif_sock_path = RunningVPP.get_memif_sock_path
cls.run_vpp = RunningVPP.run_vpp
cls.quit_vpp = RunningVPP.quit_vpp
cls.vpp = RunningVPP
@@ -36,6 +37,7 @@ def use_running(cls):
class RunningVPP:
api_sock = "" # api_sock file path
stats_sock = "" # stats sock_file path
+ memif_sock = "" # memif sock path
socket_dir = "" # running VPP's socket directory
pid = None # running VPP's pid
returncode = None # indicates to the framework that VPP is running
@@ -49,6 +51,10 @@ class RunningVPP:
return cls.api_sock
@classmethod
+ def get_memif_sock_path(cls):
+ return cls.memif_sock
+
+ @classmethod
def run_vpp(cls):
"""VPP is already running -- skip this action."""
pass
@@ -112,6 +118,8 @@ class RunningVPP:
cls.api_sock = os.path.abspath(sock_file)
elif "stats.sock" in sock_file:
cls.stats_sock = os.path.abspath(sock_file)
+ elif "memif.sock" in sock_file:
+ cls.memif_sock = os.path.abspath(sock_file)
if not cls.api_sock:
print(
f"Error: Could not find a valid api.sock file "