aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/python')
-rw-r--r--resources/libraries/python/CpuUtils.py56
-rw-r--r--resources/libraries/python/Iperf3.py347
-rw-r--r--resources/libraries/python/QemuManager.py42
-rw-r--r--resources/libraries/python/QemuUtils.py95
-rw-r--r--resources/libraries/python/Tap.py74
-rw-r--r--resources/libraries/python/VhostUser.py24
-rw-r--r--resources/libraries/python/autogen/Regenerator.py51
-rw-r--r--resources/libraries/python/autogen/Testcase.py20
8 files changed, 673 insertions, 36 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index e4fff010f1..f261f9421e 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -392,3 +392,57 @@ class CpuUtils:
smt_used=False)
return master_thread_id[0], latency_thread_id[0], cpu_node, threads
+
+ @staticmethod
+ def get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to iPerf3.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ return CpuUtils.cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)
+
+ @staticmethod
+ def get_affinity_vhost(
+ node, pf_key, skip_cnt=0, cpu_cnt=1):
+ """Get affinity for vhost. Result will be used to pin vhost threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to vhost process.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used)
diff --git a/resources/libraries/python/Iperf3.py b/resources/libraries/python/Iperf3.py
new file mode 100644
index 0000000000..ed186f0757
--- /dev/null
+++ b/resources/libraries/python/Iperf3.py
@@ -0,0 +1,347 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""iPerf3 utilities library."""
+
+import json
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.IPUtil import IPUtil
+from resources.libraries.python.Namespaces import Namespaces
+from resources.libraries.python.OptionString import OptionString
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+
+
+class Iperf3:
+ """iPerf3 traffic generator utilities."""
+
+ def __init__(self):
+ """Initialize iPerf3 class."""
+ # Computed affinity for iPerf server.
+ self._s_affinity = None
+ # Computed affinity for iPerf client.
+ self._c_affinity = None
+
+ def initialize_iperf_server(
+ self, node, pf_key, interface, bind, bind_gw, bind_mask,
+ namespace=None, cpu_skip_cnt=0, cpu_cnt=1, instances=1):
+ """iPerf3 initialization.
+
+ :param node: Topology node running iPerf3 server.
+ :param pf_key: First TG's interface (To compute numa location).
+ :param interface: Name of TG bind interface.
+ :param bind: Bind to host, one of node's addresses.
+ :param bind_gw: Bind gateway (required for default route).
+ :param bind_mask: Bind address mask.
+ :param namespace: Name of TG namespace to execute.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: iPerf3 main thread count.
+ :param instances: Number of simultaneous iPerf3 instances.
+ :type node: dict
+ :type pf_key: str
+ :type interface: str
+ :type bind: str
+ :type bind_gw: str
+ :type bind_mask: str
+ :type namespace: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :type instances: int
+ """
+ if Iperf3.is_iperf_running(node):
+ Iperf3.teardown_iperf(node)
+
+ if namespace:
+ IPUtil.set_linux_interface_ip(
+ node, interface=interface, ip_addr=bind, prefix=bind_mask,
+ namespace=namespace)
+ IPUtil.set_linux_interface_up(
+ node, interface=interface, namespace=namespace)
+ Namespaces.add_default_route_to_namespace(
+ node, namespace=namespace, default_route=bind_gw)
+
+ # Compute affinity for iPerf server.
+ self._s_affinity = CpuUtils.get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=cpu_skip_cnt,
+ cpu_cnt=cpu_cnt * instances)
+ # Compute affinity for iPerf client.
+ self._c_affinity = CpuUtils.get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=cpu_skip_cnt + cpu_cnt * instances,
+ cpu_cnt=cpu_cnt * instances)
+
+ for i in range(0, instances):
+ Iperf3.start_iperf_server(
+ node, namespace=namespace, port=5201 + i,
+ affinity=self._s_affinity)
+
+ @staticmethod
+ def start_iperf_server(
+ node, namespace=None, port=5201, affinity=None):
+ """Start iPerf3 server instance as a deamon.
+
+ :param node: Topology node running iPerf3 server.
+ :param namespace: Name of TG namespace to execute.
+ :param port: The server port for the server to listen on.
+ :param affinity: iPerf3 server affinity.
+ :type node: dict
+ :type namespace: str
+ :type port: int
+ :type affinity: str
+ """
+ cmd = IPerf3Server.iperf3_cmdline(
+ namespace=namespace, port=port, affinity=affinity)
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message=u"Failed to start iPerf3 server!")
+
+ @staticmethod
+ def is_iperf_running(node):
+ """Check if iPerf3 is running using pgrep.
+
+ :param node: Topology node running iPerf3.
+ :type node: dict
+ :returns: True if iPerf3 is running otherwise False.
+ :rtype: bool
+ """
+ ret, _, _ = exec_cmd(node, u"pgrep iperf3", sudo=True)
+ return bool(int(ret) == 0)
+
+ @staticmethod
+ def teardown_iperf(node):
+ """iPerf3 teardown.
+
+ :param node: Topology node running iPerf3.
+ :type node: dict
+ """
+ pidfile = u"/tmp/iperf3_server.pid"
+ logfile = u"/tmp/iperf3.log"
+
+ exec_cmd_no_error(
+ node,
+ f"sh -c 'if [ -f {pidfile} ]; then "
+ f"pkill iperf3; "
+ f"cat {logfile}; "
+ f"rm {logfile}; "
+ f"fi'",
+ sudo=True, message=u"iPerf3 kill failed!")
+
+ def iperf_client_start_remote_exec(
+ self, node, duration, rate, frame_size, async_call=False,
+ warmup_time=0, traffic_directions=1, namespace=None, udp=False,
+ host=None, bind=None, affinity=None):
+ """Execute iPerf3 client script on remote node over ssh to start running
+ traffic.
+
+ :param node: Topology node running iPerf3.
+ :param duration: Time expressed in seconds for how long to send traffic.
+ :param rate: Traffic rate.
+ :param frame_size: L2 frame size to send (without padding and IPG).
+ :param async_call: If enabled then don't wait for all incoming traffic.
+ :param warmup_time: Warmup time period.
+ :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
+ Default: 1
+ :param namespace: Namespace to execute iPerf3 client on.
+ :param udp: UDP traffic.
+ :param host: Client connecting to an iPerf server running on host.
+ :param bind: Client bind IP address.
+ :param affinity: iPerf3 client affinity.
+ :type node: dict
+ :type duration: float
+ :type rate: str
+ :type frame_size: str
+ :type async_call: bool
+ :type warmup_time: float
+ :type traffic_directions: int
+ :type namespace: str
+ :type udp: bool
+ :type host: str
+ :type bind: str
+ :type affinity: str
+ :returns: List of iPerf3 PIDs.
+ :rtype: list
+ """
+ if not isinstance(duration, (float, int)):
+ duration = float(duration)
+ if not isinstance(warmup_time, (float, int)):
+ warmup_time = float(warmup_time)
+ if not affinity:
+ affinity = self._c_affinity
+
+ kwargs = dict()
+ if namespace:
+ kwargs[u"namespace"] = namespace
+ kwargs[u"host"] = host
+ kwargs[u"bind"] = bind
+ kwargs[u"udp"] = udp
+ if affinity:
+ kwargs[u"affinity"] = affinity
+ kwargs[u"duration"] = duration
+ kwargs[u"rate"] = rate
+ kwargs[u"frame_size"] = frame_size
+ kwargs[u"warmup_time"] = warmup_time
+ kwargs[u"traffic_directions"] = traffic_directions
+ kwargs[u"async_call"] = async_call
+
+ cmd = IPerf3Client.iperf3_cmdline(**kwargs)
+
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, timeout=int(duration) + 30,
+ message=u"iPerf3 runtime error!")
+
+ if async_call:
+ return stdout.split()
+ return json.loads(stdout)
+
+ @staticmethod
+ def iperf_client_stop_remote_exec(node, pids):
+ """Stop iPerf3 client execution.
+
+ :param pids: PID or List of PIDs of iPerf3 client.
+ :type pids: str or list
+ """
+ if not isinstance(pids, list):
+ pids = [pids]
+
+ for pid in pids:
+ exec_cmd_no_error(
+ node, f"kill {pid}", sudo=True, message=u"Kill iPerf3 failed!")
+
+
+class IPerf3Server:
+ """iPerf3 server utilities."""
+
+ @staticmethod
+ def iperf3_cmdline(**kwargs):
+ """Get iPerf3 server command line.
+
+ :param kwargs: List of iPerf3 server parameters.
+ :type kwargs: dict
+ :returns: iPerf3 server command line.
+ :rtype: OptionString
+ """
+ cmd = OptionString()
+ if kwargs['namespace']:
+ cmd.add(f"ip netns exec {kwargs['namespace']}")
+ cmd.add(f"iperf3")
+
+ cmd_options = OptionString(prefix=u"--")
+ # Run iPerf in server mode. (This will only allow one iperf connection
+ # at a time)
+ cmd_options.add(
+ u"server")
+
+ # Run the server in background as a daemon.
+ cmd_options.add_if_from_dict(
+ u"daemon", u"daemon", kwargs, True)
+
+ # Write a file with the process ID, most useful when running as a
+ # daemon.
+ cmd_options.add_with_value_from_dict(
+ u"pidfile", u"pidfile", kwargs, f"/tmp/iperf3_server.pid")
+
+ # Send output to a log file.
+ cmd_options.add_with_value_from_dict(
+ u"logfile", u"logfile", kwargs, f"/tmp/iperf3.log")
+
+ # The server port for the server to listen on and the client to
+ # connect to. This should be the same in both client and server.
+ # Default is 5201.
+ cmd_options.add_with_value_from_dict(
+ u"port", u"port", kwargs, 5201)
+
+ # Set the CPU affinity, if possible (Linux and FreeBSD only).
+ cmd_options.add_with_value_from_dict(
+ u"affinity", u"affinity", kwargs)
+
+ # Output in JSON format.
+ cmd_options.add_if_from_dict(
+ u"json", u"json", kwargs, True)
+
+ # Give more detailed output.
+ cmd_options.add_if_from_dict(
+ u"verbose", u"verbose", kwargs, True)
+
+ return cmd.extend(cmd_options)
+
+
+class IPerf3Client:
+ """iPerf3 client utilities."""
+
+ @staticmethod
+ def iperf3_cmdline(**kwargs):
+ """Get iperf_client driver command line.
+
+ :param kwargs: List of iperf_client driver parameters.
+ :type kwargs: dict
+ :returns: iperf_client driver command line.
+ :rtype: OptionString
+ """
+ cmd = OptionString()
+ cmd.add(u"python3")
+ dirname = f"{Constants.REMOTE_FW_DIR}/resources/tools/iperf"
+ cmd.add(f"'{dirname}/iperf_client.py'")
+
+ cmd_options = OptionString(prefix=u"--")
+ # Namespace to execute iPerf3 client on.
+ cmd_options.add_with_value_from_dict(
+ u"namespace", u"namespace", kwargs)
+
+ # Client connecting to an iPerf3 server running on host.
+ cmd_options.add_with_value_from_dict(
+ u"host", u"host", kwargs)
+
+ # Client bind IP address.
+ cmd_options.add_with_value_from_dict(
+ u"bind", u"bind", kwargs)
+
+ # Use UDP rather than TCP.
+ cmd_options.add_if_from_dict(
+ u"udp", u"udp", kwargs, False)
+
+ # Set the CPU affinity, if possible.
+ cmd_options.add_with_value_from_dict(
+ u"affinity", u"affinity", kwargs)
+
+ # Time expressed in seconds for how long to send traffic.
+ cmd_options.add_with_value_from_dict(
+ u"duration", u"duration", kwargs)
+
+ # Send bi- (2) or uni- (1) directional traffic.
+ cmd_options.add_with_value_from_dict(
+ u"traffic_directions", u"traffic_directions", kwargs, 1)
+
+ # Traffic warm-up time in seconds, (0=disable).
+ cmd_options.add_with_value_from_dict(
+ u"warmup_time", u"warmup_time", kwargs, 5.0)
+
+ # L2 frame size to send (without padding and IPG).
+ cmd_options.add_with_value_from_dict(
+ u"frame_size", u"frame_size", kwargs)
+
+ # Traffic rate expressed with units.
+ cmd_options.add_with_value_from_dict(
+ u"rate", u"rate", kwargs)
+
+ # If enabled then don't wait for all incoming traffic.
+ cmd_options.add_if_from_dict(
+ u"async_start", u"async_call", kwargs, False)
+
+ # Number of iPerf3 client parallel instances.
+ cmd_options.add_with_value_from_dict(
+ u"instances", u"instances", kwargs, 1)
+
+ # Number of iPerf3 client parallel flows.
+ cmd_options.add_with_value_from_dict(
+ u"parallel", u"parallel", kwargs, 8)
+
+ return cmd.extend(cmd_options)
diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py
index 6436f69aec..766372ad9c 100644
--- a/resources/libraries/python/QemuManager.py
+++ b/resources/libraries/python/QemuManager.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -51,8 +51,9 @@ class QemuManager:
nf_nodes = int(kwargs[u"nf_nodes"])
queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1
vs_dtc = kwargs[u"vs_dtc"]
- nf_dtc = kwargs[u"vs_dtc"] if kwargs[u"auto_scale"] \
- else kwargs[u"nf_dtc"]
+ nf_dtc = kwargs[u"nf_dtc"]
+ if kwargs[u"auto_scale"] and not kwargs[u"fixed_auto_scale"]:
+ nf_dtc = kwargs[u"vs_dtc"]
nf_dtcr = kwargs[u"nf_dtcr"] \
if isinstance(kwargs[u"nf_dtcr"], int) else 2
@@ -419,3 +420,38 @@ class QemuManager:
pci=Topology.get_interface_pci_addr(
self.nodes[kwargs[u"node"]], kwargs[u"if1"])
)
+
+ def _c_iperf3(self, **kwargs):
+ """Instantiate one VM with iperf3 configuration.
+
+ :param kwargs: Named parameters.
+ :type kwargs: dict
+ """
+ qemu_id = kwargs[u"qemu_id"]
+ name = kwargs[u"name"]
+ virtio_feature_mask = kwargs[u"virtio_feature_mask"] \
+ if u"virtio_feature_mask" in kwargs else None
+
+ self.machines[name] = QemuUtils(
+ node=self.nodes[kwargs[u"node"]],
+ qemu_id=qemu_id,
+ smp=len(self.machines_affinity[name]),
+ mem=4096,
+ vnf=kwargs[u"vnf"],
+ img=Constants.QEMU_VM_KERNEL
+ )
+ self.machines[name].add_default_params()
+ self.machines[name].add_kernelvm_params()
+ self.machines[name].configure_kernelvm_vnf(
+ queues=kwargs[u"queues"],
+ jumbo_frames=kwargs[u"jumbo"]
+ )
+ self.machines[name].add_net_user()
+ self.machines[name].add_vhost_user_if(
+ f"/run/vpp/sock-{qemu_id}-1",
+ server=False,
+ jumbo_frames=kwargs[u"jumbo"],
+ queues=kwargs[u"queues"],
+ queue_size=kwargs[u"perf_qemu_qsz"],
+ virtio_feature_mask=virtio_feature_mask
+ )
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index c215dfd96f..51fba6105e 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -80,8 +80,8 @@ class QemuUtils:
u"type": NodeType.VM,
u"port": 10021 + qemu_id,
u"serial": 4555 + qemu_id,
- u"username": 'cisco',
- u"password": 'cisco',
+ u"username": 'testuser',
+ u"password": 'Csit1234',
u"interfaces": {},
}
if node[u"port"] != 22:
@@ -142,10 +142,10 @@ class QemuUtils:
self._params.add_with_value(u"numa", u"node,memdev=mem")
self._params.add_with_value(u"balloon", u"none")
- def add_net_user(self):
+ def add_net_user(self, net="10.0.2.0/24"):
"""Set managment port forwarding."""
self._params.add_with_value(
- u"netdev", f"user,id=mgmt,net=172.16.255.0/24,"
+ u"netdev", f"user,id=mgmt,net={net},"
f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
)
self._params.add_with_value(
@@ -158,7 +158,9 @@ class QemuUtils:
u"chardev", f"socket,path={self._temp.get(u'qga')},"
f"server,nowait,id=qga0"
)
- self._params.add_with_value(u"device", u"isa-serial,chardev=qga0")
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=qga0"
+ )
self._params.add_with_value(
u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
)
@@ -167,8 +169,11 @@ class QemuUtils:
"""Set serial to file redirect."""
self._params.add_with_value(
u"chardev", f"socket,host=127.0.0.1,"
- f"port={self._vm_info[u'serial']},id=gnc0,server,nowait")
- self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0")
+ f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
+ )
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=gnc0"
+ )
self._params.add_with_value(
u"serial", f"file:{self._temp.get(u'log')}"
)
@@ -210,8 +215,12 @@ class QemuUtils:
self._params.add_with_value(
u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
)
- self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}")
- self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}")
+ self._params.add_with_value(
+ u"kernel", f"{self._opt.get(u'img')}"
+ )
+ self._params.add_with_value(
+ u"initrd", f"{self._opt.get(u'initrd')}"
+ )
self._params.add_with_value(
u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
f"root=virtioroot console={self._opt.get(u'console')} "
@@ -250,19 +259,19 @@ class QemuUtils:
f"{self._nic_id:02x}"
queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
if queue_size else u""
- if virtio_feature_mask is None:
- gso = False
- csum = False
- else:
- gso = VirtioFeatureMask.is_feature_enabled(
- virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
- csum = VirtioFeatureMask.is_feature_enabled(
- virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+ gso = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
+ csum = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+
self._params.add_with_value(
u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
- f"csum={u'on' if csum else u'off'},gso={u'on' if gso else u'off'},"
- f"guest_tso4=off,guest_tso6=off,guest_ecn=off,"
+ f"csum={u'on' if csum else u'off'},"
+ f"gso={u'on' if gso else u'off'},"
+ f"guest_tso4={u'on' if gso else u'off'},"
+ f"guest_tso6={u'on' if gso else u'off'},"
+ f"guest_ecn={u'on' if gso else u'off'},"
f"{queue_size}"
)
@@ -402,17 +411,20 @@ class QemuUtils:
self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+ def create_kernelvm_config_iperf3(self):
+ """Create QEMU iperf3 command line."""
+ self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
+
def create_kernelvm_init(self, **kwargs):
"""Create QEMU init script.
:param kwargs: Key-value pairs to replace content of init startup file.
:type kwargs: dict
"""
- template = f"{Constants.RESOURCES_TPL}/vm/init.sh"
init = self._temp.get(u"ini")
exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
- with open(template, u"rt") as src_file:
+ with open(kwargs[u"template"], u"rt") as src_file:
src = Template(src_file.read())
exec_cmd_no_error(
self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
@@ -428,13 +440,32 @@ class QemuUtils:
"""
if u"vpp" in self._opt.get(u"vnf"):
self.create_kernelvm_config_vpp(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_io" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_io(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_mac" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_mac(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"iperf3" in self._opt.get(u"vnf"):
+ qemu_id = self._opt.get(u'qemu_id') % 2
+ self.create_kernelvm_config_iperf3()
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
+ vnf_bin=self._opt.get(u"vnf_bin"),
+ ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
+ ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
+ ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
+ )
else:
raise RuntimeError(u"QEMU: Unsupported VNF!")
@@ -639,6 +670,20 @@ class QemuUtils:
f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
)
+ def _wait_iperf3(self, retries=60):
+ """Wait until QEMU with iPerf3 is booted.
+
+ :param retries: Number of retries.
+ :type retries: int
+ """
+ grep = u"Server listening on 0.0.0.0 port 22."
+ cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
+ message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, cmd=cmd, sudo=True, message=message, retries=retries,
+ include_reason=True
+ )
+
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
# Send guest-network-get-interfaces command via QGA, output example:
diff --git a/resources/libraries/python/Tap.py b/resources/libraries/python/Tap.py
index 041a774c0b..c729d602b1 100644
--- a/resources/libraries/python/Tap.py
+++ b/resources/libraries/python/Tap.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,6 +13,8 @@
"""Tap utilities library."""
+from enum import IntEnum
+
from robot.api import logger
from resources.libraries.python.Constants import Constants
@@ -22,21 +24,43 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
+class TapFeaturesFlags(IntEnum):
+ """TAP Features Flags."""
+ TAP_API_FLAG_GSO = 1
+ TAP_API_FLAG_CSUM_OFFLOAD = 2
+ TAP_API_FLAG_PERSIST = 4
+ TAP_API_FLAG_ATTACH = 8
+ TAP_API_FLAG_TUN = 16
+ TAP_API_FLAG_GRO_COALESCE = 32
+ TAP_API_FLAG_PACKED = 64
+ TAP_API_FLAG_IN_ORDER = 128
+
+
class Tap:
"""Tap utilities."""
@staticmethod
- def add_tap_interface(node, tap_name, mac=None, num_rx_queues=1):
+ def add_tap_interface(
+ node, tap_name, mac=None, host_namespace=None, num_rx_queues=1,
+ rxq_size=0, txq_size=0, tap_feature_mask=0):
"""Add tap interface with name and optionally with MAC.
:param node: Node to add tap on.
:param tap_name: Tap interface name for linux tap.
:param mac: Optional MAC address for VPP tap.
+ :param host_namespace: Namespace.
:param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 256 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 256 = Default VPP).
+ :param tap_feature_mask: Mask of tap features to be enabled.
:type node: dict
:type tap_name: str
:type mac: str
+ :type host_namespace: str
:type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type tap_feature_mask: int
:returns: Returns a interface index.
:rtype: int
"""
@@ -46,16 +70,20 @@ class Tap:
use_random_mac=bool(mac is None),
mac_address=L2Util.mac_to_bin(mac) if mac else None,
num_rx_queues=int(num_rx_queues),
+ tx_ring_sz=int(txq_size),
+ rx_ring_sz=int(rxq_size),
host_mtu_set=False,
host_mac_addr_set=False,
host_ip4_prefix_set=False,
host_ip6_prefix_set=False,
host_ip4_gw_set=False,
host_ip6_gw_set=False,
- host_namespace_set=False,
+ host_namespace_set=bool(host_namespace),
+ host_namespace=host_namespace,
host_if_name_set=True,
host_if_name=tap_name,
- host_bridge_set=False
+ host_bridge_set=False,
+ tap_flags=tap_feature_mask
)
err_msg = f"Failed to create tap interface {tap_name} " \
f"on host {node[u'host']}"
@@ -152,3 +180,41 @@ class Tap:
logger.debug(f"TAP data:\n{data}")
return data
+
+
+class TapFeatureMask:
+ """Tap features utilities"""
+
+ @staticmethod
+ def create_tap_feature_mask(**kwargs):
+ """Create tap feature mask with feature bits set according to kwargs.
+ :param kwargs: Key-value pairs of feature names and it's state
+ :type kwargs: dict
+ """
+ tap_feature_mask = 0
+
+ if u"all" in kwargs and kwargs[u"all"] is True:
+ for tap_feature_flag in TapFeaturesFlags:
+ tap_feature_mask |= 1 << (tap_feature_flag.value - 1)
+ else:
+ for feature_name, enabled in kwargs.items():
+ tap_feature_name = u"TAP_API_FLAG_" + feature_name.upper()
+ if tap_feature_name not in TapFeaturesFlags.__members__:
+ raise ValueError(u"Unsupported tap feature flag name")
+ if enabled:
+ tap_feature_mask |= \
+ 1 << (TapFeaturesFlags[tap_feature_name].value - 1)
+
+ return tap_feature_mask
+
+ @staticmethod
+ def is_feature_enabled(tap_feature_mask, tap_feature_flag):
+ """Checks if concrete tap feature is enabled within
+ tap_feature_mask
+ :param tap_feature_mask: Mask of enabled tap features
+ :param tap_feature_flag: Checked tap feature
+ :type tap_feature_mask: int
+ :type tap_feature_flag: TapFeaturesFlags
+ """
+ feature_flag_bit = 1 << tap_feature_flag.value
+ return (tap_feature_mask & feature_flag_bit) > 0
diff --git a/resources/libraries/python/VhostUser.py b/resources/libraries/python/VhostUser.py
index 22528b2b7f..c6b9185e14 100644
--- a/resources/libraries/python/VhostUser.py
+++ b/resources/libraries/python/VhostUser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,9 +17,11 @@ from enum import IntEnum
from robot.api import logger
+from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
class VirtioFeaturesFlags(IntEnum):
@@ -151,6 +153,26 @@ class VhostUser:
logger.debug(f"Vhost-user details:\n{details}")
return details
+ @staticmethod
+ def vhost_user_affinity(node, pf_key, skip_cnt=0):
+ """Set vhost-user affinity for the given node.
+
+ :param node: Topology node.
+ :param pf_key: Interface key to compute numa location.
+ :param skip_cnt: Skip first "skip_cnt" CPUs.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ """
+ pids, _ = exec_cmd_no_error(
+ node, f"grep -h vhost /proc/*/comm | uniq | xargs pidof")
+
+ affinity = CpuUtils.get_affinity_vhost(
+ node, pf_key, skip_cnt=skip_cnt, cpu_cnt=len(pids.split(" ")))
+
+ for cpu, pid in zip(affinity, pids.split(" ")):
+ exec_cmd_no_error(node, f"taskset -pc {cpu} {pid}", sudo=True)
+
class VirtioFeatureMask:
"""Virtio features utilities"""
diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py
index 6d35d1d13f..dd2672bd7c 100644
--- a/resources/libraries/python/autogen/Regenerator.py
+++ b/resources/libraries/python/autogen/Regenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -177,6 +177,20 @@ def add_tcp_testcases(testcase, file_out, tc_kwargs_list):
file_out.write(testcase.generate(**kwargs))
+def add_iperf3_testcases(testcase, file_out, tc_kwargs_list):
+ """Add iperf3 testcases to file.
+
+ :param testcase: Testcase class.
+ :param file_out: File to write testcases to.
+ :param tc_kwargs_list: Key-value pairs used to construct testcases.
+ :type testcase: Testcase
+ :type file_out: file
+ :type tc_kwargs_list: dict
+ """
+ for kwargs in tc_kwargs_list:
+ file_out.write(testcase.generate(**kwargs))
+
+
def write_default_files(in_filename, in_prolog, kwargs_list):
"""Using given filename and prolog, write all generated suites.
@@ -434,6 +448,34 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
add_tcp_testcases(testcase, file_out, kwargs_list)
+def write_iperf3_files(in_filename, in_prolog, kwargs_list):
+ """Using given filename and prolog, write all generated iperf3 suites.
+
+ :param in_filename: Template filename to derive real filenames from.
+ :param in_prolog: Template content to derive real content from.
+ :param kwargs_list: List of kwargs for add_default_testcase.
+ :type in_filename: str
+ :type in_prolog: str
+ :type kwargs_list: list of dict
+ """
+ _, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
+ testcase = Testcase.iperf3(suite_id)
+ out_filename = replace_defensively(
+ in_filename, u"10ge2p1x710",
+ Constants.NIC_NAME_TO_CODE[u"Intel-X710"], 1,
+ u"File name should contain NIC code once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ in_prolog, u"Intel-X710", u"Intel-X710", 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ check_suite_tag(suite_tag, out_prolog)
+ with open(out_filename, u"wt") as file_out:
+ file_out.write(out_prolog)
+ add_iperf3_testcases(testcase, file_out, kwargs_list)
+
+
class Regenerator:
"""Class containing file generating methods."""
@@ -485,6 +527,11 @@ class Regenerator:
hs_quic_kwargs_list = [
{u"frame_size": 1280, u"phy_cores": 1},
]
+ iperf3_kwargs_list = [
+ {u"frame_size": 128000, u"phy_cores": 1},
+ {u"frame_size": 128000, u"phy_cores": 2},
+ {u"frame_size": 128000, u"phy_cores": 4}
+ ]
for in_filename in glob(pattern):
if not self.quiet:
@@ -514,6 +561,8 @@ class Regenerator:
hs_quic_kwargs_list if u"quic" in in_filename \
else hs_bps_kwargs_list
write_tcp_files(in_filename, in_prolog, hoststack_kwargs_list)
+ elif in_filename.endswith(u"-iperf3.robot"):
+ write_iperf3_files(in_filename, in_prolog, iperf3_kwargs_list)
else:
raise RuntimeError(
f"Error in {in_filename}: non-primary suite type found."
diff --git a/resources/libraries/python/autogen/Testcase.py b/resources/libraries/python/autogen/Testcase.py
index 3ffbc4594a..173c5919af 100644
--- a/resources/libraries/python/autogen/Testcase.py
+++ b/resources/libraries/python/autogen/Testcase.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -111,3 +111,21 @@ class Testcase:
| | [Tags] | ${{cores_str}}C\n| | phy_cores=${{cores_num}}
'''
return cls(template_string)
+
+ @classmethod
+ def iperf3(cls, suite_id):
+ """Factory method for creating "iperf3" testcase objects.
+
+ Testcase name will contain core count, but not frame size.
+
+ :param suite_id: Part of suite name to distinguish from other suites.
+ :type suite_id: str
+ :returns: Instance for generating testcase text of this type.
+ :rtype: Testcase
+ """
+ template_string = f'''
+| 128KB-${{cores_str}}c-{suite_id}
+| | [Tags] | 128KB | ${{cores_str}}C
+| | frame_size=${{frame_num}} | phy_cores=${{cores_num}}
+'''
+ return cls(template_string)