aboutsummaryrefslogtreecommitdiffstats
path: root/resources
diff options
context:
space:
mode:
Diffstat (limited to 'resources')
-rw-r--r--resources/libraries/python/CpuUtils.py56
-rw-r--r--resources/libraries/python/Iperf3.py347
-rw-r--r--resources/libraries/python/QemuManager.py42
-rw-r--r--resources/libraries/python/QemuUtils.py95
-rw-r--r--resources/libraries/python/Tap.py74
-rw-r--r--resources/libraries/python/VhostUser.py24
-rw-r--r--resources/libraries/python/autogen/Regenerator.py51
-rw-r--r--resources/libraries/python/autogen/Testcase.py20
-rw-r--r--resources/libraries/robot/performance/performance_actions.robot11
-rw-r--r--resources/libraries/robot/performance/performance_utils.robot159
-rw-r--r--resources/libraries/robot/shared/default.robot3
-rw-r--r--resources/libraries/robot/shared/interfaces.robot95
-rw-r--r--resources/libraries/robot/shared/test_teardown.robot11
-rw-r--r--resources/libraries/robot/shared/vm.robot60
-rw-r--r--resources/templates/vm/init_iperf3.sh27
-rw-r--r--resources/tools/iperf/iperf_client.py182
16 files changed, 1192 insertions, 65 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py
index e4fff010f1..f261f9421e 100644
--- a/resources/libraries/python/CpuUtils.py
+++ b/resources/libraries/python/CpuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -392,3 +392,57 @@ class CpuUtils:
smt_used=False)
return master_thread_id[0], latency_thread_id[0], cpu_node, threads
+
+ @staticmethod
+ def get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=0, cpu_cnt=1):
+ """Get affinity for iPerf3. Result will be used to pin iPerf3 threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to iPerf3.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ return CpuUtils.cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=cpu_skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=False)
+
+ @staticmethod
+ def get_affinity_vhost(
+ node, pf_key, skip_cnt=0, cpu_cnt=1):
+ """Get affinity for vhost. Result will be used to pin vhost threads.
+
+ :param node: Topology node.
+ :param pf_key: Topology interface.
+ :param skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: CPU threads count.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ :type cpu_cnt: int
+ :returns: List of CPUs allocated to vhost process.
+ :rtype: str
+ """
+ if pf_key:
+ cpu_node = Topology.get_interface_numa_node(node, pf_key)
+ else:
+ cpu_node = 0
+
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
+ if smt_used:
+ cpu_cnt = cpu_cnt // CpuUtils.NR_OF_THREADS
+
+ return CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node=cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used)
diff --git a/resources/libraries/python/Iperf3.py b/resources/libraries/python/Iperf3.py
new file mode 100644
index 0000000000..ed186f0757
--- /dev/null
+++ b/resources/libraries/python/Iperf3.py
@@ -0,0 +1,347 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""iPerf3 utilities library."""
+
+import json
+
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.IPUtil import IPUtil
+from resources.libraries.python.Namespaces import Namespaces
+from resources.libraries.python.OptionString import OptionString
+from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
+
+
+class Iperf3:
+ """iPerf3 traffic generator utilities."""
+
+ def __init__(self):
+ """Initialize iPerf3 class."""
+ # Computed affinity for iPerf server.
+ self._s_affinity = None
+ # Computed affinity for iPerf client.
+ self._c_affinity = None
+
+ def initialize_iperf_server(
+ self, node, pf_key, interface, bind, bind_gw, bind_mask,
+ namespace=None, cpu_skip_cnt=0, cpu_cnt=1, instances=1):
+ """iPerf3 initialization.
+
+ :param node: Topology node running iPerf3 server.
+ :param pf_key: First TG's interface (To compute numa location).
+ :param interface: Name of TG bind interface.
+ :param bind: Bind to host, one of node's addresses.
+ :param bind_gw: Bind gateway (required for default route).
+ :param bind_mask: Bind address mask.
+ :param namespace: Name of TG namespace to execute.
+ :param cpu_skip_cnt: Amount of CPU cores to skip.
+ :param cpu_cnt: iPerf3 main thread count.
+ :param instances: Number of simultaneous iPerf3 instances.
+ :type node: dict
+ :type pf_key: str
+ :type interface: str
+ :type bind: str
+ :type bind_gw: str
+ :type bind_mask: str
+ :type namespace: str
+ :type cpu_skip_cnt: int
+ :type cpu_cnt: int
+ :type instances: int
+ """
+ if Iperf3.is_iperf_running(node):
+ Iperf3.teardown_iperf(node)
+
+ if namespace:
+ IPUtil.set_linux_interface_ip(
+ node, interface=interface, ip_addr=bind, prefix=bind_mask,
+ namespace=namespace)
+ IPUtil.set_linux_interface_up(
+ node, interface=interface, namespace=namespace)
+ Namespaces.add_default_route_to_namespace(
+ node, namespace=namespace, default_route=bind_gw)
+
+ # Compute affinity for iPerf server.
+ self._s_affinity = CpuUtils.get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=cpu_skip_cnt,
+ cpu_cnt=cpu_cnt * instances)
+ # Compute affinity for iPerf client.
+ self._c_affinity = CpuUtils.get_affinity_iperf(
+ node, pf_key, cpu_skip_cnt=cpu_skip_cnt + cpu_cnt * instances,
+ cpu_cnt=cpu_cnt * instances)
+
+ for i in range(0, instances):
+ Iperf3.start_iperf_server(
+ node, namespace=namespace, port=5201 + i,
+ affinity=self._s_affinity)
+
+ @staticmethod
+ def start_iperf_server(
+ node, namespace=None, port=5201, affinity=None):
+ """Start iPerf3 server instance as a deamon.
+
+ :param node: Topology node running iPerf3 server.
+ :param namespace: Name of TG namespace to execute.
+ :param port: The server port for the server to listen on.
+ :param affinity: iPerf3 server affinity.
+ :type node: dict
+ :type namespace: str
+ :type port: int
+ :type affinity: str
+ """
+ cmd = IPerf3Server.iperf3_cmdline(
+ namespace=namespace, port=port, affinity=affinity)
+ exec_cmd_no_error(
+ node, cmd, sudo=True, message=u"Failed to start iPerf3 server!")
+
+ @staticmethod
+ def is_iperf_running(node):
+ """Check if iPerf3 is running using pgrep.
+
+ :param node: Topology node running iPerf3.
+ :type node: dict
+ :returns: True if iPerf3 is running otherwise False.
+ :rtype: bool
+ """
+ ret, _, _ = exec_cmd(node, u"pgrep iperf3", sudo=True)
+ return bool(int(ret) == 0)
+
+ @staticmethod
+ def teardown_iperf(node):
+ """iPerf3 teardown.
+
+ :param node: Topology node running iPerf3.
+ :type node: dict
+ """
+ pidfile = u"/tmp/iperf3_server.pid"
+ logfile = u"/tmp/iperf3.log"
+
+ exec_cmd_no_error(
+ node,
+ f"sh -c 'if [ -f {pidfile} ]; then "
+ f"pkill iperf3; "
+ f"cat {logfile}; "
+ f"rm {logfile}; "
+ f"fi'",
+ sudo=True, message=u"iPerf3 kill failed!")
+
+ def iperf_client_start_remote_exec(
+ self, node, duration, rate, frame_size, async_call=False,
+ warmup_time=0, traffic_directions=1, namespace=None, udp=False,
+ host=None, bind=None, affinity=None):
+ """Execute iPerf3 client script on remote node over ssh to start running
+ traffic.
+
+ :param node: Topology node running iPerf3.
+ :param duration: Time expressed in seconds for how long to send traffic.
+ :param rate: Traffic rate.
+ :param frame_size: L2 frame size to send (without padding and IPG).
+ :param async_call: If enabled then don't wait for all incoming traffic.
+ :param warmup_time: Warmup time period.
+ :param traffic_directions: Traffic is bi- (2) or uni- (1) directional.
+ Default: 1
+ :param namespace: Namespace to execute iPerf3 client on.
+ :param udp: UDP traffic.
+ :param host: Client connecting to an iPerf server running on host.
+ :param bind: Client bind IP address.
+ :param affinity: iPerf3 client affinity.
+ :type node: dict
+ :type duration: float
+ :type rate: str
+ :type frame_size: str
+ :type async_call: bool
+ :type warmup_time: float
+ :type traffic_directions: int
+ :type namespace: str
+ :type udp: bool
+ :type host: str
+ :type bind: str
+ :type affinity: str
+ :returns: List of iPerf3 PIDs.
+ :rtype: list
+ """
+ if not isinstance(duration, (float, int)):
+ duration = float(duration)
+ if not isinstance(warmup_time, (float, int)):
+ warmup_time = float(warmup_time)
+ if not affinity:
+ affinity = self._c_affinity
+
+ kwargs = dict()
+ if namespace:
+ kwargs[u"namespace"] = namespace
+ kwargs[u"host"] = host
+ kwargs[u"bind"] = bind
+ kwargs[u"udp"] = udp
+ if affinity:
+ kwargs[u"affinity"] = affinity
+ kwargs[u"duration"] = duration
+ kwargs[u"rate"] = rate
+ kwargs[u"frame_size"] = frame_size
+ kwargs[u"warmup_time"] = warmup_time
+ kwargs[u"traffic_directions"] = traffic_directions
+ kwargs[u"async_call"] = async_call
+
+ cmd = IPerf3Client.iperf3_cmdline(**kwargs)
+
+ stdout, _ = exec_cmd_no_error(
+ node, cmd, timeout=int(duration) + 30,
+ message=u"iPerf3 runtime error!")
+
+ if async_call:
+ return stdout.split()
+ return json.loads(stdout)
+
+ @staticmethod
+ def iperf_client_stop_remote_exec(node, pids):
+ """Stop iPerf3 client execution.
+
+ :param pids: PID or List of PIDs of iPerf3 client.
+ :type pids: str or list
+ """
+ if not isinstance(pids, list):
+ pids = [pids]
+
+ for pid in pids:
+ exec_cmd_no_error(
+ node, f"kill {pid}", sudo=True, message=u"Kill iPerf3 failed!")
+
+
+class IPerf3Server:
+ """iPerf3 server utilities."""
+
+ @staticmethod
+ def iperf3_cmdline(**kwargs):
+ """Get iPerf3 server command line.
+
+ :param kwargs: List of iPerf3 server parameters.
+ :type kwargs: dict
+ :returns: iPerf3 server command line.
+ :rtype: OptionString
+ """
+ cmd = OptionString()
+ if kwargs['namespace']:
+ cmd.add(f"ip netns exec {kwargs['namespace']}")
+ cmd.add(f"iperf3")
+
+ cmd_options = OptionString(prefix=u"--")
+ # Run iPerf in server mode. (This will only allow one iperf connection
+ # at a time)
+ cmd_options.add(
+ u"server")
+
+ # Run the server in background as a daemon.
+ cmd_options.add_if_from_dict(
+ u"daemon", u"daemon", kwargs, True)
+
+ # Write a file with the process ID, most useful when running as a
+ # daemon.
+ cmd_options.add_with_value_from_dict(
+ u"pidfile", u"pidfile", kwargs, f"/tmp/iperf3_server.pid")
+
+ # Send output to a log file.
+ cmd_options.add_with_value_from_dict(
+ u"logfile", u"logfile", kwargs, f"/tmp/iperf3.log")
+
+ # The server port for the server to listen on and the client to
+ # connect to. This should be the same in both client and server.
+ # Default is 5201.
+ cmd_options.add_with_value_from_dict(
+ u"port", u"port", kwargs, 5201)
+
+ # Set the CPU affinity, if possible (Linux and FreeBSD only).
+ cmd_options.add_with_value_from_dict(
+ u"affinity", u"affinity", kwargs)
+
+ # Output in JSON format.
+ cmd_options.add_if_from_dict(
+ u"json", u"json", kwargs, True)
+
+ # Give more detailed output.
+ cmd_options.add_if_from_dict(
+ u"verbose", u"verbose", kwargs, True)
+
+ return cmd.extend(cmd_options)
+
+
+class IPerf3Client:
+ """iPerf3 client utilities."""
+
+ @staticmethod
+ def iperf3_cmdline(**kwargs):
+ """Get iperf_client driver command line.
+
+ :param kwargs: List of iperf_client driver parameters.
+ :type kwargs: dict
+ :returns: iperf_client driver command line.
+ :rtype: OptionString
+ """
+ cmd = OptionString()
+ cmd.add(u"python3")
+ dirname = f"{Constants.REMOTE_FW_DIR}/resources/tools/iperf"
+ cmd.add(f"'{dirname}/iperf_client.py'")
+
+ cmd_options = OptionString(prefix=u"--")
+ # Namespace to execute iPerf3 client on.
+ cmd_options.add_with_value_from_dict(
+ u"namespace", u"namespace", kwargs)
+
+ # Client connecting to an iPerf3 server running on host.
+ cmd_options.add_with_value_from_dict(
+ u"host", u"host", kwargs)
+
+ # Client bind IP address.
+ cmd_options.add_with_value_from_dict(
+ u"bind", u"bind", kwargs)
+
+ # Use UDP rather than TCP.
+ cmd_options.add_if_from_dict(
+ u"udp", u"udp", kwargs, False)
+
+ # Set the CPU affinity, if possible.
+ cmd_options.add_with_value_from_dict(
+ u"affinity", u"affinity", kwargs)
+
+ # Time expressed in seconds for how long to send traffic.
+ cmd_options.add_with_value_from_dict(
+ u"duration", u"duration", kwargs)
+
+ # Send bi- (2) or uni- (1) directional traffic.
+ cmd_options.add_with_value_from_dict(
+ u"traffic_directions", u"traffic_directions", kwargs, 1)
+
+ # Traffic warm-up time in seconds, (0=disable).
+ cmd_options.add_with_value_from_dict(
+ u"warmup_time", u"warmup_time", kwargs, 5.0)
+
+ # L2 frame size to send (without padding and IPG).
+ cmd_options.add_with_value_from_dict(
+ u"frame_size", u"frame_size", kwargs)
+
+ # Traffic rate expressed with units.
+ cmd_options.add_with_value_from_dict(
+ u"rate", u"rate", kwargs)
+
+ # If enabled then don't wait for all incoming traffic.
+ cmd_options.add_if_from_dict(
+ u"async_start", u"async_call", kwargs, False)
+
+ # Number of iPerf3 client parallel instances.
+ cmd_options.add_with_value_from_dict(
+ u"instances", u"instances", kwargs, 1)
+
+ # Number of iPerf3 client parallel flows.
+ cmd_options.add_with_value_from_dict(
+ u"parallel", u"parallel", kwargs, 8)
+
+ return cmd.extend(cmd_options)
diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py
index 6436f69aec..766372ad9c 100644
--- a/resources/libraries/python/QemuManager.py
+++ b/resources/libraries/python/QemuManager.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -51,8 +51,9 @@ class QemuManager:
nf_nodes = int(kwargs[u"nf_nodes"])
queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1
vs_dtc = kwargs[u"vs_dtc"]
- nf_dtc = kwargs[u"vs_dtc"] if kwargs[u"auto_scale"] \
- else kwargs[u"nf_dtc"]
+ nf_dtc = kwargs[u"nf_dtc"]
+ if kwargs[u"auto_scale"] and not kwargs[u"fixed_auto_scale"]:
+ nf_dtc = kwargs[u"vs_dtc"]
nf_dtcr = kwargs[u"nf_dtcr"] \
if isinstance(kwargs[u"nf_dtcr"], int) else 2
@@ -419,3 +420,38 @@ class QemuManager:
pci=Topology.get_interface_pci_addr(
self.nodes[kwargs[u"node"]], kwargs[u"if1"])
)
+
+ def _c_iperf3(self, **kwargs):
+ """Instantiate one VM with iperf3 configuration.
+
+ :param kwargs: Named parameters.
+ :type kwargs: dict
+ """
+ qemu_id = kwargs[u"qemu_id"]
+ name = kwargs[u"name"]
+ virtio_feature_mask = kwargs[u"virtio_feature_mask"] \
+ if u"virtio_feature_mask" in kwargs else None
+
+ self.machines[name] = QemuUtils(
+ node=self.nodes[kwargs[u"node"]],
+ qemu_id=qemu_id,
+ smp=len(self.machines_affinity[name]),
+ mem=4096,
+ vnf=kwargs[u"vnf"],
+ img=Constants.QEMU_VM_KERNEL
+ )
+ self.machines[name].add_default_params()
+ self.machines[name].add_kernelvm_params()
+ self.machines[name].configure_kernelvm_vnf(
+ queues=kwargs[u"queues"],
+ jumbo_frames=kwargs[u"jumbo"]
+ )
+ self.machines[name].add_net_user()
+ self.machines[name].add_vhost_user_if(
+ f"/run/vpp/sock-{qemu_id}-1",
+ server=False,
+ jumbo_frames=kwargs[u"jumbo"],
+ queues=kwargs[u"queues"],
+ queue_size=kwargs[u"perf_qemu_qsz"],
+ virtio_feature_mask=virtio_feature_mask
+ )
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index c215dfd96f..51fba6105e 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -80,8 +80,8 @@ class QemuUtils:
u"type": NodeType.VM,
u"port": 10021 + qemu_id,
u"serial": 4555 + qemu_id,
- u"username": 'cisco',
- u"password": 'cisco',
+ u"username": 'testuser',
+ u"password": 'Csit1234',
u"interfaces": {},
}
if node[u"port"] != 22:
@@ -142,10 +142,10 @@ class QemuUtils:
self._params.add_with_value(u"numa", u"node,memdev=mem")
self._params.add_with_value(u"balloon", u"none")
- def add_net_user(self):
+ def add_net_user(self, net="10.0.2.0/24"):
"""Set managment port forwarding."""
self._params.add_with_value(
- u"netdev", f"user,id=mgmt,net=172.16.255.0/24,"
+ u"netdev", f"user,id=mgmt,net={net},"
f"hostfwd=tcp::{self._vm_info[u'port']}-:22"
)
self._params.add_with_value(
@@ -158,7 +158,9 @@ class QemuUtils:
u"chardev", f"socket,path={self._temp.get(u'qga')},"
f"server,nowait,id=qga0"
)
- self._params.add_with_value(u"device", u"isa-serial,chardev=qga0")
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=qga0"
+ )
self._params.add_with_value(
u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
)
@@ -167,8 +169,11 @@ class QemuUtils:
"""Set serial to file redirect."""
self._params.add_with_value(
u"chardev", f"socket,host=127.0.0.1,"
- f"port={self._vm_info[u'serial']},id=gnc0,server,nowait")
- self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0")
+ f"port={self._vm_info[u'serial']},id=gnc0,server,nowait"
+ )
+ self._params.add_with_value(
+ u"device", u"isa-serial,chardev=gnc0"
+ )
self._params.add_with_value(
u"serial", f"file:{self._temp.get(u'log')}"
)
@@ -210,8 +215,12 @@ class QemuUtils:
self._params.add_with_value(
u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
)
- self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}")
- self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}")
+ self._params.add_with_value(
+ u"kernel", f"{self._opt.get(u'img')}"
+ )
+ self._params.add_with_value(
+ u"initrd", f"{self._opt.get(u'initrd')}"
+ )
self._params.add_with_value(
u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
f"root=virtioroot console={self._opt.get(u'console')} "
@@ -250,19 +259,19 @@ class QemuUtils:
f"{self._nic_id:02x}"
queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
if queue_size else u""
- if virtio_feature_mask is None:
- gso = False
- csum = False
- else:
- gso = VirtioFeatureMask.is_feature_enabled(
- virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
- csum = VirtioFeatureMask.is_feature_enabled(
- virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+ gso = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO)
+ csum = VirtioFeatureMask.is_feature_enabled(
+ virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM)
+
self._params.add_with_value(
u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac},"
f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2},"
- f"csum={u'on' if csum else u'off'},gso={u'on' if gso else u'off'},"
- f"guest_tso4=off,guest_tso6=off,guest_ecn=off,"
+ f"csum={u'on' if csum else u'off'},"
+ f"gso={u'on' if gso else u'off'},"
+ f"guest_tso4={u'on' if gso else u'off'},"
+ f"guest_tso6={u'on' if gso else u'off'},"
+ f"guest_ecn={u'on' if gso else u'off'},"
f"{queue_size}"
)
@@ -402,17 +411,20 @@ class QemuUtils:
self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
+ def create_kernelvm_config_iperf3(self):
+ """Create QEMU iperf3 command line."""
+ self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d"
+
def create_kernelvm_init(self, **kwargs):
"""Create QEMU init script.
:param kwargs: Key-value pairs to replace content of init startup file.
:type kwargs: dict
"""
- template = f"{Constants.RESOURCES_TPL}/vm/init.sh"
init = self._temp.get(u"ini")
exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
- with open(template, u"rt") as src_file:
+ with open(kwargs[u"template"], u"rt") as src_file:
src = Template(src_file.read())
exec_cmd_no_error(
self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
@@ -428,13 +440,32 @@ class QemuUtils:
"""
if u"vpp" in self._opt.get(u"vnf"):
self.create_kernelvm_config_vpp(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_io" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_io(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
elif u"testpmd_mac" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_mac(**kwargs)
- self.create_kernelvm_init(vnf_bin=self._opt.get(u"vnf_bin"))
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init.sh",
+ vnf_bin=self._opt.get(u"vnf_bin")
+ )
+ elif u"iperf3" in self._opt.get(u"vnf"):
+ qemu_id = self._opt.get(u'qemu_id') % 2
+ self.create_kernelvm_config_iperf3()
+ self.create_kernelvm_init(
+ template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh",
+ vnf_bin=self._opt.get(u"vnf_bin"),
+ ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30",
+ ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2",
+ ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30"
+ )
else:
raise RuntimeError(u"QEMU: Unsupported VNF!")
@@ -639,6 +670,20 @@ class QemuUtils:
f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
)
+ def _wait_iperf3(self, retries=60):
+ """Wait until QEMU with iPerf3 is booted.
+
+ :param retries: Number of retries.
+ :type retries: int
+ """
+ grep = u"Server listening on 0.0.0.0 port 22."
+ cmd = f"fgrep '{grep}' {self._temp.get(u'log')}"
+ message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, cmd=cmd, sudo=True, message=message, retries=retries,
+ include_reason=True
+ )
+
def _update_vm_interfaces(self):
"""Update interface names in VM node dict."""
# Send guest-network-get-interfaces command via QGA, output example:
diff --git a/resources/libraries/python/Tap.py b/resources/libraries/python/Tap.py
index 041a774c0b..c729d602b1 100644
--- a/resources/libraries/python/Tap.py
+++ b/resources/libraries/python/Tap.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -13,6 +13,8 @@
"""Tap utilities library."""
+from enum import IntEnum
+
from robot.api import logger
from resources.libraries.python.Constants import Constants
@@ -22,21 +24,43 @@ from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
+class TapFeaturesFlags(IntEnum):
+ """TAP Features Flags."""
+ TAP_API_FLAG_GSO = 1
+ TAP_API_FLAG_CSUM_OFFLOAD = 2
+ TAP_API_FLAG_PERSIST = 4
+ TAP_API_FLAG_ATTACH = 8
+ TAP_API_FLAG_TUN = 16
+ TAP_API_FLAG_GRO_COALESCE = 32
+ TAP_API_FLAG_PACKED = 64
+ TAP_API_FLAG_IN_ORDER = 128
+
+
class Tap:
"""Tap utilities."""
@staticmethod
- def add_tap_interface(node, tap_name, mac=None, num_rx_queues=1):
+ def add_tap_interface(
+ node, tap_name, mac=None, host_namespace=None, num_rx_queues=1,
+ rxq_size=0, txq_size=0, tap_feature_mask=0):
"""Add tap interface with name and optionally with MAC.
:param node: Node to add tap on.
:param tap_name: Tap interface name for linux tap.
:param mac: Optional MAC address for VPP tap.
+ :param host_namespace: Namespace.
:param num_rx_queues: Number of RX queues.
+ :param rxq_size: Size of RXQ (0 = Default API; 256 = Default VPP).
+ :param txq_size: Size of TXQ (0 = Default API; 256 = Default VPP).
+ :param tap_feature_mask: Mask of tap features to be enabled.
:type node: dict
:type tap_name: str
:type mac: str
+ :type host_namespace: str
:type num_rx_queues: int
+ :type rxq_size: int
+ :type txq_size: int
+ :type tap_feature_mask: int
:returns: Returns a interface index.
:rtype: int
"""
@@ -46,16 +70,20 @@ class Tap:
use_random_mac=bool(mac is None),
mac_address=L2Util.mac_to_bin(mac) if mac else None,
num_rx_queues=int(num_rx_queues),
+ tx_ring_sz=int(txq_size),
+ rx_ring_sz=int(rxq_size),
host_mtu_set=False,
host_mac_addr_set=False,
host_ip4_prefix_set=False,
host_ip6_prefix_set=False,
host_ip4_gw_set=False,
host_ip6_gw_set=False,
- host_namespace_set=False,
+ host_namespace_set=bool(host_namespace),
+ host_namespace=host_namespace,
host_if_name_set=True,
host_if_name=tap_name,
- host_bridge_set=False
+ host_bridge_set=False,
+ tap_flags=tap_feature_mask
)
err_msg = f"Failed to create tap interface {tap_name} " \
f"on host {node[u'host']}"
@@ -152,3 +180,41 @@ class Tap:
logger.debug(f"TAP data:\n{data}")
return data
+
+
+class TapFeatureMask:
+ """Tap features utilities"""
+
+ @staticmethod
+ def create_tap_feature_mask(**kwargs):
+ """Create tap feature mask with feature bits set according to kwargs.
+ :param kwargs: Key-value pairs of feature names and it's state
+ :type kwargs: dict
+ """
+ tap_feature_mask = 0
+
+ if u"all" in kwargs and kwargs[u"all"] is True:
+ for tap_feature_flag in TapFeaturesFlags:
+ tap_feature_mask |= 1 << (tap_feature_flag.value - 1)
+ else:
+ for feature_name, enabled in kwargs.items():
+ tap_feature_name = u"TAP_API_FLAG_" + feature_name.upper()
+ if tap_feature_name not in TapFeaturesFlags.__members__:
+ raise ValueError(u"Unsupported tap feature flag name")
+ if enabled:
+ tap_feature_mask |= \
+ 1 << (TapFeaturesFlags[tap_feature_name].value - 1)
+
+ return tap_feature_mask
+
+ @staticmethod
+ def is_feature_enabled(tap_feature_mask, tap_feature_flag):
+ """Checks if concrete tap feature is enabled within
+ tap_feature_mask
+ :param tap_feature_mask: Mask of enabled tap features
+ :param tap_feature_flag: Checked tap feature
+ :type tap_feature_mask: int
+ :type tap_feature_flag: TapFeaturesFlags
+ """
+ feature_flag_bit = 1 << tap_feature_flag.value
+ return (tap_feature_mask & feature_flag_bit) > 0
diff --git a/resources/libraries/python/VhostUser.py b/resources/libraries/python/VhostUser.py
index 22528b2b7f..c6b9185e14 100644
--- a/resources/libraries/python/VhostUser.py
+++ b/resources/libraries/python/VhostUser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,9 +17,11 @@ from enum import IntEnum
from robot.api import logger
+from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.ssh import exec_cmd_no_error
class VirtioFeaturesFlags(IntEnum):
@@ -151,6 +153,26 @@ class VhostUser:
logger.debug(f"Vhost-user details:\n{details}")
return details
+ @staticmethod
+ def vhost_user_affinity(node, pf_key, skip_cnt=0):
+ """Set vhost-user affinity for the given node.
+
+ :param node: Topology node.
+ :param pf_key: Interface key to compute numa location.
+ :param skip_cnt: Skip first "skip_cnt" CPUs.
+ :type node: dict
+ :type pf_key: str
+ :type skip_cnt: int
+ """
+ pids, _ = exec_cmd_no_error(
+ node, f"grep -h vhost /proc/*/comm | uniq | xargs pidof")
+
+ affinity = CpuUtils.get_affinity_vhost(
+ node, pf_key, skip_cnt=skip_cnt, cpu_cnt=len(pids.split(" ")))
+
+ for cpu, pid in zip(affinity, pids.split(" ")):
+ exec_cmd_no_error(node, f"taskset -pc {cpu} {pid}", sudo=True)
+
class VirtioFeatureMask:
"""Virtio features utilities"""
diff --git a/resources/libraries/python/autogen/Regenerator.py b/resources/libraries/python/autogen/Regenerator.py
index 6d35d1d13f..dd2672bd7c 100644
--- a/resources/libraries/python/autogen/Regenerator.py
+++ b/resources/libraries/python/autogen/Regenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -177,6 +177,20 @@ def add_tcp_testcases(testcase, file_out, tc_kwargs_list):
file_out.write(testcase.generate(**kwargs))
+def add_iperf3_testcases(testcase, file_out, tc_kwargs_list):
+ """Add iperf3 testcases to file.
+
+ :param testcase: Testcase class.
+ :param file_out: File to write testcases to.
+ :param tc_kwargs_list: Key-value pairs used to construct testcases.
+ :type testcase: Testcase
+ :type file_out: file
+ :type tc_kwargs_list: dict
+ """
+ for kwargs in tc_kwargs_list:
+ file_out.write(testcase.generate(**kwargs))
+
+
def write_default_files(in_filename, in_prolog, kwargs_list):
"""Using given filename and prolog, write all generated suites.
@@ -434,6 +448,34 @@ def write_tcp_files(in_filename, in_prolog, kwargs_list):
add_tcp_testcases(testcase, file_out, kwargs_list)
+def write_iperf3_files(in_filename, in_prolog, kwargs_list):
+ """Using given filename and prolog, write all generated iperf3 suites.
+
+ :param in_filename: Template filename to derive real filenames from.
+ :param in_prolog: Template content to derive real content from.
+ :param kwargs_list: List of kwargs for add_default_testcase.
+ :type in_filename: str
+ :type in_prolog: str
+ :type kwargs_list: list of dict
+ """
+ _, suite_id, suite_tag = get_iface_and_suite_ids(in_filename)
+ testcase = Testcase.iperf3(suite_id)
+ out_filename = replace_defensively(
+ in_filename, u"10ge2p1x710",
+ Constants.NIC_NAME_TO_CODE[u"Intel-X710"], 1,
+ u"File name should contain NIC code once.", in_filename
+ )
+ out_prolog = replace_defensively(
+ in_prolog, u"Intel-X710", u"Intel-X710", 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ check_suite_tag(suite_tag, out_prolog)
+ with open(out_filename, u"wt") as file_out:
+ file_out.write(out_prolog)
+ add_iperf3_testcases(testcase, file_out, kwargs_list)
+
+
class Regenerator:
"""Class containing file generating methods."""
@@ -485,6 +527,11 @@ class Regenerator:
hs_quic_kwargs_list = [
{u"frame_size": 1280, u"phy_cores": 1},
]
+ iperf3_kwargs_list = [
+ {u"frame_size": 128000, u"phy_cores": 1},
+ {u"frame_size": 128000, u"phy_cores": 2},
+ {u"frame_size": 128000, u"phy_cores": 4}
+ ]
for in_filename in glob(pattern):
if not self.quiet:
@@ -514,6 +561,8 @@ class Regenerator:
hs_quic_kwargs_list if u"quic" in in_filename \
else hs_bps_kwargs_list
write_tcp_files(in_filename, in_prolog, hoststack_kwargs_list)
+ elif in_filename.endswith(u"-iperf3.robot"):
+ write_iperf3_files(in_filename, in_prolog, iperf3_kwargs_list)
else:
raise RuntimeError(
f"Error in {in_filename}: non-primary suite type found."
diff --git a/resources/libraries/python/autogen/Testcase.py b/resources/libraries/python/autogen/Testcase.py
index 3ffbc4594a..173c5919af 100644
--- a/resources/libraries/python/autogen/Testcase.py
+++ b/resources/libraries/python/autogen/Testcase.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -111,3 +111,21 @@ class Testcase:
| | [Tags] | ${{cores_str}}C\n| | phy_cores=${{cores_num}}
'''
return cls(template_string)
+
+ @classmethod
+ def iperf3(cls, suite_id):
+ """Factory method for creating "iperf3" testcase objects.
+
+ Testcase name will contain core count, but not frame size.
+
+ :param suite_id: Part of suite name to distinguish from other suites.
+ :type suite_id: str
+ :returns: Instance for generating testcase text of this type.
+ :rtype: Testcase
+ """
+ template_string = f'''
+| 128KB-${{cores_str}}c-{suite_id}
+| | [Tags] | 128KB | ${{cores_str}}C
+| | frame_size=${{frame_num}} | phy_cores=${{cores_num}}
+'''
+ return cls(template_string)
diff --git a/resources/libraries/robot/performance/performance_actions.robot b/resources/libraries/robot/performance/performance_actions.robot
index 2cf954a5b3..c28b2ebdc8 100644
--- a/resources/libraries/robot/performance/performance_actions.robot
+++ b/resources/libraries/robot/performance/performance_actions.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -35,6 +35,15 @@
| |
| | Clear and show runtime counters with running traffic
+| Additional Statistics Action For clear-show-runtime-with-iperf3
+| | [Documentation]
+| | ... | Additional Statistics Action for clear and show runtime counters with
+| | ... | iPerf3 running traffic.
+| |
+| | ... | See documentation of the called keyword for required test variables.
+| |
+| | Clear and show runtime counters with running iperf3
+
| Additional Statistics Action For noop
| | [Documentation]
| | ... | Additional Statistics Action for no operation.
diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot
index 5cf6dba84c..d45adc6cf9 100644
--- a/resources/libraries/robot/performance/performance_utils.robot
+++ b/resources/libraries/robot/performance/performance_utils.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -17,6 +17,7 @@
| Library | resources.libraries.python.NodePath
| Library | resources.libraries.python.PerfUtil
| Library | resources.libraries.python.InterfaceUtil
+| Library | resources.libraries.python.Iperf3
| Library | resources.libraries.python.TrafficGenerator
| Library | resources.libraries.python.TrafficGenerator.OptimizedSearch
| Library | resources.libraries.python.TrafficGenerator.TGDropRateSearchImpl
@@ -445,6 +446,162 @@
| | END
| | Return From Keyword | ${results}
+| Clear and show runtime counters with running iperf3
+| | [Documentation]
+| | ... | Start traffic at specified rate then clear runtime counters on all
+| | ... | DUTs. Wait for specified amount of time and capture runtime counters
+| | ... | on all DUTs. Finally stop traffic.
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Clear and show runtime counters with running traffic \|
+| |
+| | ${runtime_duration} = | Get Runtime Duration
+| | ${pids}= | iPerf Client Start Remote Exec
+| | | ... | ${nodes['${iperf_client_node}']}
+| | | ... | duration=${-1}
+| | | ... | rate=${None}
+| | | ... | frame_size=${None}
+| | | ... | async_call=True
+| | | ... | warmup_time=0
+| | | ... | traffic_directions=${1}
+| | | ... | namespace=${iperf_client_namespace}
+| | | ... | udp=${iperf_client_udp}
+| | | ... | host=${iperf_server_bind}
+| | | ... | bind=${iperf_client_bind}
+| | | ... | affinity=${iperf_client_affinity}
+| | FOR | ${action} | IN | @{pre_run_stats}
+| | | Run Keyword | Additional Statistics Action For ${action}
+| | END
+| | Sleep | ${runtime_duration}
+| | FOR | ${action} | IN | @{post_run_stats}
+| | | Run Keyword | Additional Statistics Action For ${action}
+| | END
+| | iPerf Client Stop Remote Exec | ${nodes['${iperf_client_node}']} | ${pids}
+
+| Traffic should pass with maximum rate on iPerf3
+| | [Documentation]
+| | ... | Send traffic at maximum rate on iPerf3.
+| |
+| | ... | *Arguments:*
+| | ... | - trial_duration - Duration of single trial [s].
+| | ... | Type: float
+| | ... | - trial_multiplicity - How many trials in this measurement.
+| | ... | Type: integer
+| | ... | - traffic_directions - Bi- (2) or uni- (1) directional traffic;
+| | ... | Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Traffic should pass with maximum rate on iPerf3 \| \${1} \| \
+| | ... | \| \${10.0} \| \${2} \|
+| |
+| | [Arguments] | ${trial_duration}=${trial_duration}
+| | ... | ${trial_multiplicity}=${trial_multiplicity}
+| | ... | ${traffic_directions}=${1}
+| |
+| | ${results}= | Send iPerf3 traffic at specified rate
+| | ... | ${trial_duration} | ${None} | ${None}
+| | ... | ${trial_multiplicity} | ${traffic_directions}
+| | Set Test Message | ${\n}iPerf3 trial results
+| | Set Test Message | in Gbits per second: ${results}
+| | ... | append=yes
+
+| Send iPerf3 traffic at specified rate
+| | [Documentation]
+| | ... | Perform a warmup, show runtime counters during it.
+| | ... | Then send traffic at specified rate, possibly multiple trials.
+| | ... | Show various DUT stats, optionally also packet trace.
+| | ... | Return list of measured receive rates.
+| |
+| | ... | *Arguments:*
+| | ... | - trial_duration - Duration of single trial [s].
+| | ... | Type: float
+| | ... | - rate - Target aggregate transmit rate [bps] / Bits per second
+| | ... | Type: float
+| | ... | - frame_size - L2 Frame Size [B].
+| | ... | Type: integer or string
+| | ... | - trial_multiplicity - How many trials in this measurement.
+| | ... | Type: integer
+| | ... | - traffic_directions - Bi- (2) or uni- (1) directional traffic.
+| | ... | Type: integer
+| | ... | - extended_debug - True to enable extended debug.
+| | ... | Type: boolean
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Send iPerf3 traffic at specified rate \| \${1.0} \| ${4000000.0} \
+| | ... | \| \${64} \| \${10} \| \${1} \| ${False} \|
+| |
+| | [Arguments] | ${trial_duration} | ${rate} | ${frame_size}
+| | ... | ${trial_multiplicity}=${trial_multiplicity}
+| | ... | ${traffic_directions}=${1} | ${extended_debug}=${extended_debug}
+| |
+| | Set Test Variable | ${extended_debug}
+| | Set Test Variable | ${rate}
+| | Set Test Variable | ${traffic_directions}
+| |
+| | ${smt_used}= | Is SMT enabled | ${nodes['${iperf_server_node}']['cpuinfo']}
+| | ${vm_status} | ${value}= | Run Keyword And Ignore Error
+| | ... | Get Library Instance | vnf_manager
+| | ${vth}= | Evaluate | (${thr_count_int} + 1)
+| | ${cpu_skip_cnt}= | Set Variable If | '${vm_status}' == 'PASS'
+| | ... | ${CPU_CNT_SYSTEM}
+| | ... | ${${CPU_CNT_SYSTEM} + ${CPU_CNT_MAIN} + ${cpu_count_int} + ${vth}}
+| |
+| | Initialize iPerf Server
+| | ... | ${nodes['${iperf_server_node}']}
+| | ... | pf_key=${iperf_server_pf_key}
+| | ... | interface=${iperf_server_interface}
+| | ... | bind=${iperf_server_bind}
+| | ... | bind_gw=${iperf_server_bind_gw}
+| | ... | bind_mask=${iperf_server_bind_mask}
+| | ... | namespace=${iperf_server_namespace}
+| | ... | cpu_skip_cnt=${cpu_skip_cnt}
+| | Run Keyword If | '${iperf_client_namespace}' is not '${None}'
+| | ... | Set Linux Interface IP
+| | ... | ${nodes['${iperf_client_node}']}
+| | ... | interface=${iperf_client_interface}
+| | ... | ip_addr=${iperf_client_bind}
+| | ... | prefix=${iperf_client_bind_mask}
+| | ... | namespace=${iperf_client_namespace}
+| | Run Keyword If | '${iperf_client_namespace}' is not '${None}'
+| | ... | Add Default Route To Namespace
+| | ... | ${nodes['${iperf_client_node}']}
+| | ... | namespace=${iperf_client_namespace}
+| | ... | default_route=${iperf_client_bind_gw}
+| | ${pre_stats}= | Create List
+| | ... | clear-show-runtime-with-iperf3
+| | ... | vpp-clear-stats | vpp-enable-packettrace | vpp-enable-elog
+| | FOR | ${action} | IN | @{pre_stats}
+| | | Run Keyword | Additional Statistics Action For ${action}
+| | END
+| | ${results} = | Create List
+| | FOR | ${i} | IN RANGE | ${trial_multiplicity}
+| | | ${rr} = | iPerf Client Start Remote Exec
+| | | ... | ${nodes['${iperf_client_node}']}
+| | | ... | duration=${trial_duration}
+| | | ... | rate=${rate}
+| | | ... | frame_size=${frame_size}
+| | | ... | async_call=False
+| | | ... | warmup_time=0
+| | | ... | traffic_directions=${traffic_directions}
+| | | ... | namespace=${iperf_client_namespace}
+| | | ... | udp=${iperf_client_udp}
+| | | ... | host=${iperf_server_bind}
+| | | ... | bind=${iperf_client_bind}
+| | | ... | affinity=${iperf_client_affinity}
+| | | ${conv} = | Convert To Number | ${rr['sum_received']['bits_per_second']}
+| | | ${conv} = | Evaluate | ${conv} / ${1000} / ${1000} / ${1000}
+| | | ${conv} = | Evaluate | "{:.3f}".format(${conv})
+| | | Append To List
+| | | ... | ${results} | ${conv}
+| | END
+| | FOR | ${action} | IN | @{post_stats}
+| | | Run Keyword | Additional Statistics Action For ${action}
+| | END
+| | Return From Keyword | ${results}
+
| Start Traffic on Background
| | [Documentation]
| | ... | Start traffic at specified rate then return control to Robot.
diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot
index e44141d8ad..1ae21e1049 100644
--- a/resources/libraries/robot/shared/default.robot
+++ b/resources/libraries/robot/shared/default.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -34,6 +34,7 @@
| Library | resources.libraries.python.PapiHistory
| Library | resources.libraries.python.SchedUtils
| Library | resources.libraries.python.Tap
+| Library | resources.libraries.python.Tap.TapFeatureMask
| Library | resources.libraries.python.TestConfig
| Library | resources.libraries.python.TGSetup
| Library | resources.libraries.python.topology.Topology
diff --git a/resources/libraries/robot/shared/interfaces.robot b/resources/libraries/robot/shared/interfaces.robot
index 27908c3cf8..a9b6440173 100644
--- a/resources/libraries/robot/shared/interfaces.robot
+++ b/resources/libraries/robot/shared/interfaces.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -44,10 +44,17 @@
| | ... | *Set UP state on VPP interfaces in path on all DUT nodes and set
| | ... | maximal MTU.*
| |
+| | ... | *Arguments:*
+| | ... | - validate - Validate interfaces are up.
+| | ... | Type: boolean
+| |
+| | [Arguments] | ${validate}=${True}
+| |
| | FOR | ${dut} | IN | @{duts}
| | | Set interfaces in path up on node | ${dut}
| | END
-| | All VPP Interfaces Ready Wait | ${nodes} | retries=${60}
+| | Run Keyword If | ${validate}
+| | ... | All VPP Interfaces Ready Wait | ${nodes} | retries=${60}
| Set interfaces in path up on node
| | [Documentation]
@@ -109,6 +116,18 @@
| |
| | Run Keyword | Pre-initialize layer ${driver} on all DUTs
+| Pre-initialize layer tap on all DUTs
+| | [Documentation]
+| | ... | Pre-initialize tap driver. Currently no operation.
+| |
+| | No operation
+
+| Pre-initialize layer vhost on all DUTs
+| | [Documentation]
+| | ... | Pre-initialize vhost driver. Currently no operation.
+| |
+| | No operation
+
| Pre-initialize layer vfio-pci on all DUTs
| | [Documentation]
| | ... | Pre-initialize vfio-pci driver by adding related sections to startup
@@ -179,18 +198,20 @@
| | ... | *Arguments:*
| | ... | - driver - NIC driver used in test [vfio-pci|avf|rdma-core].
| | ... | Type: string
+| | ... | - validate - Validate interfaces are up.
+| | ... | Type: boolean
| |
| | ... | *Example:*
| |
| | ... | \| Initialize layer driver \| vfio-pci \|
| |
-| | [Arguments] | ${driver}
+| | [Arguments] | ${driver} | ${validate}=${True}
| |
| | FOR | ${dut} | IN | @{duts}
| | | Initialize layer driver on node | ${dut} | ${driver}
| | END
| | Set Test Variable | ${int} | vf
-| | Set interfaces in path up
+| | Set interfaces in path up | validate=${validate}
| Initialize layer driver on node
| | [Documentation]
@@ -236,6 +257,72 @@
| | | Run Keyword | Initialize layer ${driver} on node | ${dut} | ${pf}
| | END
+| Initialize layer tap on node
+| | [Documentation]
+| | ... | Initialize tap interfaces on DUT.
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT node.
+| | ... | Type: string
+| | ... | - pf - TAP ID (logical port).
+| | ... | Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Initialize layer tap on node \| DUT1 \| 0 \|
+| |
+| | [Arguments] | ${dut} | ${pf}
+| |
+| | Create Namespace
+| | ... | ${nodes['${dut}']} | tap${${pf}-1}_namespace
+| | ${tap_feature_mask}= | Create Tap feature mask | gso=${enable_gso}
+| | ${_tap}=
+| | ... | And Add Tap Interface | ${nodes['${dut}']} | tap${${pf}-1}
+| | ... | host_namespace=tap${${pf}-1}_namespace
+| | ... | num_rx_queues=${rxq_count_int}
+| | ... | rxq_size=${nic_rxq_size} | txq_size=${nic_txq_size}
+| | ... | tap_feature_mask=${tap_feature_mask}
+| | ${_mac}=
+| | ... | Get Interface MAC | ${nodes['${dut}']} | tap${pf}
+| | ${_tap}= | Create List | ${_tap}
+| | ${_mac}= | Create List | ${_mac}
+| | Vhost User Affinity
+| | ... | ${nodes['${dut}']} | ${${dut}_pf${pf}}[0]
+| | ... | skip_cnt=${${CPU_CNT_MAIN}+${CPU_CNT_SYSTEM}+${cpu_count_int}}
+| | Set Test Variable
+| | ... | ${${dut}_vf${pf}} | ${_tap}
+| | Set Test Variable
+| | ... | ${${dut}_vf${pf}_mac} | ${_mac}
+
+| Initialize layer vhost on node
+| | [Documentation]
+| | ... | Initialize vhost interfaces on DUT.
+| |
+| | ... | *Arguments:*
+| | ... | - dut - DUT node.
+| | ... | Type: string
+| | ... | - pf - VHOST ID (logical port).
+| | ... | Type: integer
+| |
+| | ... | *Example:*
+| |
+| | ... | \| Initialize layer vhost on node \| DUT1 \| 0 \|
+| |
+| | [Arguments] | ${dut} | ${pf}
+| |
+| | ${virtio_feature_mask}= | Create Virtio feature mask | gso=${enable_gso}
+| | ${vhost}= | Vpp Create Vhost User Interface
+| | ... | ${nodes['${dut}']} | /var/run/vpp/sock-${pf}-1
+| | ... | is_server=${True} | virtio_feature_mask=${virtio_feature_mask}
+| | ${_mac}=
+| | ... | Get Interface MAC | ${nodes['${dut}']} | vhost${pf}
+| | ${_vhost}= | Create List | ${_vhost}
+| | ${_mac}= | Create List | ${_mac}
+| | Set Test Variable
+| | ... | ${${dut}_vf${pf}} | ${_vhost}
+| | Set Test Variable
+| | ... | ${${dut}_vf${pf}_mac} | ${_mac}
+
| Initialize layer vfio-pci on node
| | [Documentation]
| | ... | Initialize vfio-pci interfaces on DUT on NIC PF.
diff --git a/resources/libraries/robot/shared/test_teardown.robot b/resources/libraries/robot/shared/test_teardown.robot
index a2f35ed76c..18be67cfc5 100644
--- a/resources/libraries/robot/shared/test_teardown.robot
+++ b/resources/libraries/robot/shared/test_teardown.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -104,6 +104,13 @@
| | | ... | Show Geneve Tunnel Data | ${nodes['${dut}']}
| | END
+| Additional Test Tear Down Action For iPerf3
+| | [Documentation]
+| | ... | Additional teardown for test which uses iPerf3 server.
+| |
+| | Run Keyword And Ignore Error
+| | ... | Teardown iPerf | ${nodes['${iperf_server_node}']}
+
| Additional Test Tear Down Action For ipsec_sa
| | [Documentation]
| | ... | Additional teardown for tests which uses IPSec security association.
@@ -200,4 +207,4 @@
| |
| | ${vnf_status} | ${value}= | Run Keyword And Ignore Error
| | ... | Keyword Should Exist | vnf_manager.Kill All VMs
-| | Run Keyword If | '${vnf_status}' == 'PASS' | vnf_manager.Kill All VMs
+| | Run Keyword If | '${vnf_status}' == 'PASS' | vnf_manager.Kill All VMs \ No newline at end of file
diff --git a/resources/libraries/robot/shared/vm.robot b/resources/libraries/robot/shared/vm.robot
index c33ca5fea9..eb6acb371f 100644
--- a/resources/libraries/robot/shared/vm.robot
+++ b/resources/libraries/robot/shared/vm.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -12,9 +12,9 @@
# limitations under the License.
*** Settings ***
-| Documentation | Keywords related to vm lifecycle management
-...
| Library | resources.libraries.python.InterfaceUtil
+|
+| Documentation | Keywords related to vm lifecycle management
*** Keywords ***
| Configure chains of NFs connected via vhost-user
@@ -32,6 +32,7 @@
| | ... | Type: boolean
| | ... | - auto_scale - Whether to use same amount of RXQs for memif interface
| | ... | in containers as vswitch, otherwise use single RXQ. Type: boolean
+| | ... | - fixed_auto_scale - Enable fixed auto_scale (nf_dtc). Type: boolean
| | ... | - vnf - Network function as a payload. Type: string
| | ... | - pinning - Whether to pin QEMU VMs to specific cores
| |
@@ -42,18 +43,23 @@
| |
| | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${jumbo}=${False}
| | ... | ${perf_qemu_qsz}=${1024} | ${use_tuned_cfs}=${False}
-| | ... | ${auto_scale}=${True} | ${vnf}=vpp | ${pinning}=${True}
+| | ... | ${auto_scale}=${True} | ${fixed_auto_scale}=${False} | ${vnf}=vpp
+| | ... | ${pinning}=${True}
| |
+| | ${enable_gso} = | Get Variable Value | ${enable_gso} | ${False}
+| | ${enable_csum} = | Get Variable Value | ${enable_csum} | ${False}
+| | ${virtio_feature_mask}= | Create Virtio feature mask
+| | ... | gso=${enable_gso} | csum=${enable_csum}
| | Import Library | resources.libraries.python.QemuManager | ${nodes}
| | ... | WITH NAME | vnf_manager
| | Run Keyword | vnf_manager.Construct VMs on all nodes
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | jumbo=${jumbo}
| | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs}
-| | ... | auto_scale=${auto_scale} | vnf=${vnf}
-| | ... | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
+| | ... | auto_scale=${auto_scale} | fixed_auto_scale=${fixed_auto_scale}
+| | ... | vnf=${vnf} | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
| | ... | vs_dtc=${cpu_count_int} | nf_dtc=${nf_dtc} | nf_dtcr=${nf_dtcr}
-| | ... | rxq_count_int=${rxq_count_int} | enable_csum=${False}
-| | ... | enable_gso=${False}
+| | ... | rxq_count_int=${rxq_count_int}
+| | ... | virtio_feature_mask=${virtio_feature_mask}
| | ${cpu_wt}= | Run Keyword | vnf_manager.Start All VMs | pinning=${pinning}
| | ${cpu_alloc_str}= | Catenate | SEPARATOR=, | ${cpu_alloc_str} | ${cpu_wt}
| | Set Test Variable | ${cpu_alloc_str}
@@ -76,8 +82,10 @@
| | ... | Type: boolean
| | ... | - auto_scale - Whether to use same amount of RXQs for memif interface
| | ... | in containers as vswitch, otherwise use single RXQ. Type: boolean
+| | ... | - fixed_auto_scale - Enable override auto_scale. Type: boolean
| | ... | - vnf - Network function as a payload. Type: string
| | ... | - pinning - Whether to pin QEMU VMs to specific cores
+| | ... | - validate - Validate interfaces are up. Type: boolean
| |
| | ... | *Example:*
| |
@@ -87,9 +95,14 @@
| |
| | [Arguments] | ${node} | ${nf_chains}=${1} | ${nf_nodes}=${1}
| | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${1024}
-| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True} | ${vnf}=vpp
-| | ... | ${pinning}=${True}
+| | ... | ${use_tuned_cfs}=${False} | ${auto_scale}=${True}
+| | ... | ${fixed_auto_scale}=${False} | ${vnf}=vpp | ${pinning}=${True}
+| | ... | ${validate}=${True}
| |
+| | ${enable_gso}= | Get Variable Value | ${enable_gso} | ${False}
+| | ${enable_csum}= | Get Variable Value | ${enable_csum} | ${False}
+| | ${virtio_feature_mask}= | Create Virtio feature mask
+| | ... | gso=${enable_gso} | csum=${enable_csum}
| | Import Library | resources.libraries.python.QemuManager | ${nodes}
| | ... | WITH NAME | vnf_manager
| | Run Keyword | vnf_manager.Initialize
@@ -97,15 +110,16 @@
| | ... | node=${node}
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | jumbo=${jumbo}
| | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs}
-| | ... | auto_scale=${auto_scale} | vnf=${vnf}
-| | ... | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
+| | ... | auto_scale=${auto_scale} | fixed_auto_scale=${fixed_auto_scale}
+| | ... | vnf=${vnf} | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
| | ... | vs_dtc=${cpu_count_int} | nf_dtc=${nf_dtc} | nf_dtcr=${nf_dtcr}
-| | ... | rxq_count_int=${rxq_count_int} | enable_csum=${False}
-| | ... | enable_gso=${False}
+| | ... | rxq_count_int=${rxq_count_int}
+| | ... | virtio_feature_mask=${virtio_feature_mask}
| | ${cpu_wt}= | Run Keyword | vnf_manager.Start All VMs | pinning=${pinning}
| | ${cpu_alloc_str}= | Catenate | SEPARATOR=, | ${cpu_alloc_str} | ${cpu_wt}
| | Set Test Variable | ${cpu_alloc_str}
-| | All VPP Interfaces Ready Wait | ${nodes} | retries=${300}
+| | Run Keyword If | ${validate}
+| | ... | All VPP Interfaces Ready Wait | ${nodes} | retries=${300}
| | VPP round robin RX placement on all DUTs | ${nodes} | prefix=Virtual
| Configure chains of NFs connected via passtrough
@@ -123,6 +137,7 @@
| | ... | Type: boolean
| | ... | - auto_scale - Whether to use same amount of RXQs for memif interface
| | ... | in containers as vswitch, otherwise use single RXQ. Type: boolean
+| | ... | - fixed_auto_scale - Enable override auto_scale. Type: boolean
| | ... | - vnf - Network function as a payload. Type: string
| | ... | - pinning - Whether to pin QEMU VMs to specific cores
| |
@@ -133,18 +148,23 @@
| |
| | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${jumbo}=${False}
| | ... | ${perf_qemu_qsz}=${1024} | ${use_tuned_cfs}=${False}
-| | ... | ${auto_scale}=${True} | ${vnf}=vpp | ${pinning}=${True}
+| | ... | ${auto_scale}=${True} | ${fixed_auto_scale}=${False} | ${vnf}=vpp
+| | ... | ${pinning}=${True}
| |
+| | ${enable_gso} = | Get Variable Value | ${enable_gso} | ${False}
+| | ${enable_csum} = | Get Variable Value | ${enable_csum} | ${False}
+| | ${virtio_feature_mask}= | Create Virtio feature mask
+| | ... | gso=${enable_gso} | csum=${enable_csum}
| | Import Library | resources.libraries.python.QemuManager | ${nodes}
| | ... | WITH NAME | vnf_manager
| | Run Keyword | vnf_manager.Construct VMs on all nodes
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | jumbo=${jumbo}
| | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs}
-| | ... | auto_scale=${auto_scale} | vnf=${vnf}
-| | ... | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
+| | ... | auto_scale=${auto_scale} | fixed_auto_scale=${fixed_auto_scale}
+| | ... | vnf=${vnf} | tg_pf1_mac=${TG_pf1_mac}[0] | tg_pf2_mac=${TG_pf2_mac}[0]
| | ... | vs_dtc=${cpu_count_int} | nf_dtc=${nf_dtc} | nf_dtcr=${nf_dtcr}
-| | ... | rxq_count_int=${rxq_count_int} | enable_csum=${False}
-| | ... | enable_gso=${False}
+| | ... | rxq_count_int=${rxq_count_int}
+| | ... | virtio_feature_mask=${virtio_feature_mask}
| | ... | if1=${DUT1_${int}1}[0] | if2=${DUT1_${int}2}[0]
| | ${cpu_wt}= | Run Keyword | vnf_manager.Start All VMs | pinning=${pinning}
| | ${cpu_alloc_str}= | Catenate | SEPARATOR=, | ${cpu_alloc_str} | ${cpu_wt}
diff --git a/resources/templates/vm/init_iperf3.sh b/resources/templates/vm/init_iperf3.sh
new file mode 100644
index 0000000000..959406ff4c
--- /dev/null
+++ b/resources/templates/vm/init_iperf3.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+mkdir /dev/pts
+mkdir /dev/hugepages
+mount -t devpts -o "rw,noexec,nosuid,gid=5,mode=0620" devpts /dev/pts || true
+mount -t tmpfs -o "rw,noexec,nosuid,size=10%,mode=0755" tmpfs /run
+cp /tmp/openvpp-testing/resources/tools/iperf/iperf_client.py /run
+mount -t tmpfs -o "rw,noexec,nosuid,size=10%,mode=0755" tmpfs /tmp
+mkdir -p /tmp/openvpp-testing/resources/tools/iperf/
+mv /run/iperf_client.py /tmp/openvpp-testing/resources/tools/iperf/
+mount -t hugetlbfs -o "rw,relatime,pagesize=2M" hugetlbfs /dev/hugepages
+echo Y > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+
+# Qemu virtio-net-pci mgmt
+ip address add dev ens3 10.0.2.15/24
+ip link set dev ens3 up
+ip route add default via 10.0.2.2
+
+# Qemu virtio-net-pci vhost1
+ip address add dev ens6 ${ip_address_l}
+ip link set dev ens6 up
+ip route add ${ip_route_r} via ${ip_address_r}
+
+# Payload
+${vnf_bin}
+
+# Safenet
+poweroff -f \ No newline at end of file
diff --git a/resources/tools/iperf/iperf_client.py b/resources/tools/iperf/iperf_client.py
new file mode 100644
index 0000000000..9d9ed9b197
--- /dev/null
+++ b/resources/tools/iperf/iperf_client.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python3
+
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module gets a bandwith limit together with other parameters, reads
+the iPerf3 configuration and sends the traffic. At the end, it measures
+the packet loss and latency.
+"""
+
+import argparse
+import json
+import sys
+import time
+import subprocess
+
+def simple_burst(args):
+ """Send traffic and measure throughput.
+
+ :param args: Named arguments from command line.
+ :type args: dict
+ """
+ if1_process = []
+ if1_results = []
+ cmd = None
+
+ if args.rate and args.frame_size:
+ iperf_frame_size = args.frame_size - 18
+ iperf_rate = float(args.rate)
+ bandwidth = \
+ int(args.frame_size) * float(iperf_rate) / args.instances
+
+ if args.warmup_time > 0:
+ try:
+ for i in range(0, args.instances):
+ cmd = u"exec sudo "
+ cmd += f"ip netns exec {args.namespace} " if args.namespace else u""
+ cmd += f"iperf3 "
+ cmd += f"--client {args.host} "
+ cmd += f"--bind {args.bind} "
+ if args.rate and args.frame_size:
+ cmd += f"--bandwidth {bandwidth} "
+ cmd += f"--length {iperf_frame_size} "
+ cmd += f"--port {5201 + i} "
+ cmd += f"--parallel {args.parallel} "
+ cmd += f"--time {args.warmup_time} "
+ if args.affinity:
+ cmd += f"--affinity {args.affinity} "
+ if args.udp:
+ cmd += f"--udp "
+ cmd += f"--zerocopy "
+ cmd += f"--json"
+ if1_process.append(
+ subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
+ )
+ finally:
+ for i in range(0, args.instances):
+ if1, _ = if1_process[i].communicate(
+ timeout=args.warmup_time + 60)
+
+ if1_process = []
+ if1_results = []
+ cmd = None
+
+ try:
+ if args.async_start:
+ args.duration += 999
+ for i in range(0, args.instances):
+ cmd = u"exec sudo "
+ cmd += f"ip netns exec {args.namespace} " if args.namespace else u""
+ cmd += f"iperf3 "
+ cmd += f"--client {args.host} "
+ cmd += f"--bind {args.bind} "
+ if args.rate and args.frame_size:
+ cmd += f"--bandwidth {bandwidth} "
+ cmd += f"--length {iperf_frame_size} "
+ cmd += f"--port {5201 + i} "
+ cmd += f"--parallel {args.parallel} "
+ cmd += f"--time {args.duration} "
+ if args.affinity:
+ cmd += f"--affinity {args.affinity} "
+ if args.udp:
+ cmd += f"--udp "
+ cmd += f"--zerocopy "
+ cmd += f"--json"
+ if1_process.append(
+ subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
+ )
+ finally:
+ if args.async_start:
+ for i in range(0, args.instances):
+ print(if1_process[i].pid)
+ else:
+ for i in range(0, args.instances):
+ if1, _ = if1_process[i].communicate(timeout=args.duration + 60)
+ if1_results.append(json.loads(if1))
+ if1_results[i][u"end"][u"command"] = cmd
+ print(f"{json.dumps(if1_results[i]['end'], indent = 4)}")
+
+
+def main():
+ """Main function for the traffic generator using iPerf3.
+
+ It verifies the given command line arguments and runs "simple_burst"
+ function.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ u"--namespace", required=False, type=str,
+ help=u"Port netns name to run iPerf client on."
+ )
+ parser.add_argument(
+ u"--host", required=True, type=str,
+ help=u"Run in client mode, connecting to an iPerf server host."
+ )
+ parser.add_argument(
+ u"--bind", required=True, type=str,
+ help=u"Client bind IP address."
+ )
+ parser.add_argument(
+ u"--udp", action=u"store_true", default=False,
+ help=u"UDP test."
+ )
+ parser.add_argument(
+ u"--affinity", required=False, type=str,
+ help=u"Set the CPU affinity, if possible."
+ )
+ parser.add_argument(
+ u"--duration", required=True, type=float,
+ help=u"Duration of traffic run in seconds (-1=infinite)."
+ )
+ parser.add_argument(
+ u"--frame_size", required=False,
+ help=u"Size of a Frame without padding and IPG."
+ )
+ parser.add_argument(
+ u"--rate", required=False,
+ help=u"Traffic rate with included units (pps)."
+ )
+ parser.add_argument(
+ u"--traffic_directions", default=1, type=int,
+ help=u"Send bi- (2) or uni- (1) directional traffic."
+ )
+ parser.add_argument(
+ u"--warmup_time", type=float, default=5.0,
+ help=u"Traffic warm-up time in seconds, (0=disable)."
+ )
+ parser.add_argument(
+ u"--async_start", action=u"store_true", default=False,
+ help=u"Non-blocking call of the script."
+ )
+ parser.add_argument(
+ u"--instances", default=1, type=int,
+ help=u"The number of simultaneous client instances."
+ )
+ parser.add_argument(
+ u"--parallel", default=8, type=int,
+ help=u"The number of simultaneous client streams."
+ )
+
+ args = parser.parse_args()
+
+ # Currently limiting to uni- directional traffic.
+ if args.traffic_directions != 1:
+ print(f"Currently only uni- directional traffic is supported!")
+ sys.exit(1)
+
+ simple_burst(args)
+
+
+if __name__ == u"__main__":
+ main()