aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/l2/10ge2p1x520-eth-l2xcbase-ndrchk.robot
blob: 115df06e972329e143188cc9671c16fffd88b163 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/performance/performance_setup.robot
| Library | resources.libraries.python.InterfaceUtil
| Library | resources.libraries.python.NodePath
| ...
| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRCHK
| ... | NIC_Intel-X520-DA2 | ETH | L2XCFWD | BASE | L2XCBASE
| ...
| Suite Setup | Set up 3-node performance topology with DUT's NIC model
| ... | L2 | Intel-X520-DA2
| Suite Teardown | Tear down 3-node performance topology
| ...
| Test Setup | Set up performance test
| Test Teardown | Tear down performance ndrchk test
| ...
| Documentation | *Reference NDR throughput L2XC verify test cases*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 cross connect.
| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with L2 cross-
| ... | connect. DUT1 and DUT2 tested with 2p10GE NIC X520 Niantic by Intel.
| ... | *[Ver] TG verification:* In short performance tests, TG verifies
| ... | DUTs' throughput at ref-NDR (reference Non Drop Rate) with zero packet
| ... | loss tolerance. Ref-NDR value is periodically updated acording to
| ... | formula: ref-NDR = 0.9x NDR, where NDR is found in RFC2544 long
| ... | performance tests for the same DUT configuration. Test packets are
| ... | generated by TG on links to DUTs. TG traffic profile contains two L3
| ... | flow-groups (flow-group per direction, 253 flows per flow-group) with
| ... | all packets containing Ethernet header, IPv4 header with IP protocol=61
| ... | and static payload. MAC addresses are matching MAC addresses of the
| ... | TG node interfaces.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
# Traffic profile:
| ${traffic_profile} | trex-sl-3n-ethip4-ip4src254

*** Test Cases ***
| tc01-64B-1t1c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 64 Byte
| | ... | frames using single trial throughput test at 2x 5.5mpps.
| | [Tags] | 64B | 1T1C | STHREAD
| | ${framesize}= | Set Variable | ${64}
| | ${rate}= | Set Variable | 5.5mpps
| | Given Add '1' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc02-1518B-1t1c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 1518 Byte
| | ... | frames using single trial throughput test at 2x 812743pps.
| | [Tags] | 1518B | 1T1C | STHREAD
| | ${framesize}= | Set Variable | ${1518}
| | ${rate}= | Set Variable | 812743pps
| | Given Add '1' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc03-9000B-1t1c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 1 thread, 1 phy core, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 9000 Byte
| | ... | frames using single trial throughput test at 2x 138580pps.
| | [Tags] | 9000B | 1T1C | STHREAD
| | ${framesize}= | Set Variable | ${9000}
| | ${rate}= | Set Variable | 138580pps
| | Given Add '1' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc04-64B-2t2c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 64 Byte
| | ... | frames using single trial throughput test at 2x 10.4mpps.
| | [Tags] | 64B | 2T2C | MTHREAD
| | ${framesize}= | Set Variable | ${64}
| | ${rate}= | Set Variable | 10.4mpps
| | Given Add '2' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc05-1518B-2t2c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 1518 Byte
| | ... | frames using single trial throughput test at 2x 812743pps.
| | [Tags] | 1518B | 2T2C | MTHREAD
| | ${framesize}= | Set Variable | ${1518}
| | ${rate}= | Set Variable | 812743pps
| | Given Add '2' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc06-9000B-2t2c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 2 threads, 2 phy cores, \
| | ... | 1 receive queue per NIC port. [Ver] Verify ref-NDR for 9000 Byte
| | ... | frames using single trial throughput test at 2x 138580pps.
| | [Tags] | 9000B | 2T2C | MTHREAD
| | ${framesize}= | Set Variable | ${9000}
| | ${rate}= | Set Variable | 138580pps
| | Given Add '2' worker threads and '1' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc07-64B-4t4c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Verify ref-NDR for 64 Byte
| | ... | frames using single trial throughput test at 2x 10.4mpps.
| | [Tags] | 64B | 4T4C | MTHREAD
| | ${framesize}= | Set Variable | ${64}
| | ${rate}= | Set Variable | 10.4mpps
| | Given Add '4' worker threads and '2' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc08-1518B-4t4c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Verify ref-NDR for 1518 Byte
| | ... | frames using single trial throughput test at 2x 812743pps.
| | [Tags] | 1518B | 4T4C | MTHREAD
| | ${framesize}= | Set Variable | ${1518}
| | ${rate}= | Set Variable | 812743pps
| | Given Add '4' worker threads and '2' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Add no multi seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}

| tc09-9000B-4t4c-eth-l2xcbase-ndrchk
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC config with 4 threads, 4 phy cores, \
| | ... | 2 receive queues per NIC port. [Ver] Verify ref-NDR for 9000 Byte
| | ... | frames using single trial throughput test at 2x 138580pps.
| | [Tags] | 9000B | 4T4C | MTHREAD
| | ${framesize}= | Set Variable | ${9000}
| | ${rate}= | Set Variable | 138580pps
| | Given Add '4' worker threads and '2' rxqueues in 3-node single-link circular topology
| | And Add PCI devices to DUTs in 3-node single link topology
| | And Apply startup configuration on all VPP DUTs
| | And Initialize L2 xconnect in 3-node circular topology
| | Then Traffic should pass with no loss | ${perf_trial_duration} | ${rate}
| | ... | ${framesize} | ${traffic_profile}
+ qemu_id, u"serial": 4555 + qemu_id, u"username": 'testuser', u"password": 'Csit1234', u"interfaces": {}, } if node[u"port"] != 22: self._vm_info[u"host_port"] = node[u"port"] self._vm_info[u"host_username"] = node[u"username"] self._vm_info[u"host_password"] = node[u"password"] # Input Options. self._opt[u"qemu_id"] = qemu_id self._opt[u"mem"] = int(mem) self._opt[u"smp"] = int(smp) self._opt[u"img"] = img self._opt[u"vnf"] = vnf self._opt[u"page_size"] = page_size # Temporary files. self._temp = dict() self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log" self._temp[u"pidfile"] = f"/run/qemu_{qemu_id}.pid" if img == Constants.QEMU_VM_IMAGE: self._temp[u"qmp"] = f"/run/qmp_{qemu_id}.sock" self._temp[u"qga"] = f"/run/qga_{qemu_id}.sock" elif img == Constants.QEMU_VM_KERNEL: self._opt[u"img"], _ = exec_cmd_no_error( node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1", message=u"Qemu Kernel VM image not found!" ) self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf" self._opt[u"initrd"], _ = exec_cmd_no_error( node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1", message=u"Qemu Kernel initrd image not found!" ) else: raise RuntimeError(f"QEMU: Unknown VM image option: {img}") # Computed parameters for QEMU command line. self._params = OptionString(prefix=u"-") def add_default_params(self): """Set default QEMU command line parameters.""" mem_path = f"/dev/hugepages1G" \ if self._opt[u"page_size"] == u"1G" else u"/dev/hugepages" self._params.add(u"daemonize") self._params.add(u"nodefaults") self._params.add_with_value( u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on" ) self._params.add(u"no-user-config") self._params.add(u"nographic") self._params.add(u"enable-kvm") self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile")) self._params.add_with_value(u"cpu", u"host") self._params.add_with_value(u"machine", self._opt.get(u"machine_args")) self._params.add_with_value( u"smp", f"{self._opt.get(u'smp')},sockets=1," f"cores={self._opt.get(u'smp')},threads=1" ) self._params.add_with_value( u"object", f"memory-backend-file,id=mem," f"size={self._opt.get(u'mem')}M," f"mem-path={mem_path},share=on" ) self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M") self._params.add_with_value(u"numa", u"node,memdev=mem") def add_net_user(self, net="10.0.2.0/24"): """Set managment port forwarding.""" self._params.add_with_value( u"netdev", f"user,id=mgmt,net={net}," f"hostfwd=tcp::{self._vm_info[u'port']}-:22" ) self._params.add_with_value( u"device", f"virtio-net,netdev=mgmt" ) def add_qmp_qga(self): """Set QMP, QGA management.""" self._params.add_with_value( u"chardev", f"socket,path={self._temp.get(u'qga')}," f"server,nowait,id=qga0" ) self._params.add_with_value( u"device", u"isa-serial,chardev=qga0" ) self._params.add_with_value( u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait" ) def add_serial(self): """Set serial to file redirect.""" self._params.add_with_value( u"chardev", f"socket,host=127.0.0.1," f"port={self._vm_info[u'serial']},id=gnc0,server,nowait" ) self._params.add_with_value( u"device", u"isa-serial,chardev=gnc0" ) self._params.add_with_value( u"serial", f"file:{self._temp.get(u'log')}" ) def add_drive_cdrom(self, drive_file, index=None): """Set CD-ROM drive. :param drive_file: Path to drive image. :param index: Drive index. :type drive_file: str :type index: int """ index = f"index={index}," if index else u"" self._params.add_with_value( u"drive", f"file={drive_file},{index}media=cdrom" ) def add_drive(self, drive_file, drive_format): """Set drive with custom format. :param drive_file: Path to drive image. :param drive_format: Drive image format. :type drive_file: str :type drive_format: str """ self._params.add_with_value( u"drive", f"file={drive_file},format={drive_format}," u"cache=none,if=virtio,file.locking=off" ) def add_kernelvm_params(self): """Set KernelVM QEMU parameters.""" hugepages = 3 if self._opt[u"page_size"] == u"1G" else 512 self._params.add_with_value( u"serial", f"file:{self._temp.get(u'log')}" ) self._params.add_with_value( u"fsdev", u"local,id=root9p,path=/,security_model=none" ) self._params.add_with_value( u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot" ) self._params.add_with_value( u"kernel", f"{self._opt.get(u'img')}" ) self._params.add_with_value( u"initrd", f"{self._opt.get(u'initrd')}" ) self._params.add_with_value( u"append", f"'ro rootfstype=9p rootflags=trans=virtio " f"root=virtioroot console={self._opt.get(u'console')} " f"tsc=reliable hugepages={hugepages} " f"hugepagesz={self._opt.get(u'page_size')} " f"init={self._temp.get(u'ini')} fastboot'" ) def add_vhost_user_if( self, socket, server=True, jumbo_frames=False, queue_size=None, queues=1, virtio_feature_mask=None): """Add Vhost-user interface. :param socket: Path of the unix socket. :param server: If True the socket shall be a listening socket. :param jumbo_frames: Set True if jumbo frames are used in the test. :param queue_size: Vring queue size. :param queues: Number of queues. :param virtio_feature_mask: Mask of virtio features to be enabled. :type socket: str :type server: bool :type jumbo_frames: bool :type queue_size: int :type queues: int :type virtio_feature_mask: int """ self._nic_id += 1 if jumbo_frames: logger.debug(u"Jumbo frames temporarily disabled!") self._params.add_with_value( u"chardev", f"socket,id=char{self._nic_id}," f"path={socket}{u',server=on' if server is True else u''}" ) self._params.add_with_value( u"netdev", f"vhost-user,id=vhost{self._nic_id}," f"chardev=char{self._nic_id},queues={queues}" ) mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \ f"{self._nic_id:02x}" queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \ if queue_size else u"" gso = VirtioFeatureMask.is_feature_enabled( virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_GSO) csum = VirtioFeatureMask.is_feature_enabled( virtio_feature_mask, VirtioFeaturesFlags.VIRTIO_NET_F_API_CSUM) self._params.add_with_value( u"device", f"virtio-net-pci,netdev=vhost{self._nic_id},mac={mac}," f"addr={self._nic_id+5}.0,mq=on,vectors={2 * queues + 2}," f"csum={u'on' if csum else u'off'}," f"gso={u'on' if gso else u'off'}," f"guest_tso4={u'on' if gso else u'off'}," f"guest_tso6={u'on' if gso else u'off'}," f"guest_ecn={u'on' if gso else u'off'}," f"{queue_size}" ) # Add interface MAC and socket to the node dict. if_data = {u"mac_address": mac, u"socket": socket} if_name = f"vhost{self._nic_id}" self._vm_info[u"interfaces"][if_name] = if_data # Add socket to temporary file list. self._temp[if_name] = socket def add_vfio_pci_if(self, pci): """Add VFIO PCI interface. :param pci: PCI address of interface. :type pci: str """ self._nic_id += 1 self._params.add_with_value( u"device", f"vfio-pci,host={pci},addr={self._nic_id+5}.0" ) def create_kernelvm_config_vpp(self, **kwargs): """Create QEMU VPP config files. :param kwargs: Key-value pairs to replace content of VPP configuration file. :type kwargs: dict """ startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf" running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec" self._temp[u"startup"] = startup self._temp[u"running"] = running self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}" # Create VPP startup configuration. vpp_config = VppConfigGenerator() vpp_config.set_node(self._node) vpp_config.add_unix_nodaemon() vpp_config.add_unix_cli_listen() vpp_config.add_unix_exec(running) vpp_config.add_socksvr() vpp_config.add_main_heap_size(u"512M") vpp_config.add_main_heap_page_size(self._opt[u"page_size"]) vpp_config.add_default_hugepage_size(self._opt[u"page_size"]) vpp_config.add_statseg_size(u"512M") vpp_config.add_statseg_page_size(self._opt[u"page_size"]) vpp_config.add_statseg_per_node_counters(u"on") vpp_config.add_buffers_per_numa(107520) vpp_config.add_cpu_main_core(u"0") if self._opt.get(u"smp") > 1: vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}") vpp_config.add_plugin(u"disable", u"default") vpp_config.add_plugin(u"enable", u"ping_plugin.so") if "2vfpt" in self._opt.get(u'vnf'): vpp_config.add_plugin(u"enable", u"avf_plugin.so") if "vhost" in self._opt.get(u'vnf'): vpp_config.add_plugin(u"enable", u"dpdk_plugin.so") vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0") vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"]) vpp_config.add_dpdk_log_level(u"debug") if not kwargs[u"jumbo_frames"]: vpp_config.add_dpdk_no_multi_seg() vpp_config.add_dpdk_no_tx_checksum_offload() if "ipsec" in self._opt.get(u'vnf'): vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so") vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so") vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so") if "nat" in self._opt.get(u'vnf'): vpp_config.add_nat(value=u"endpoint-dependent") vpp_config.add_plugin(u"enable", u"nat_plugin.so") vpp_config.write_config(startup) # Create VPP running configuration. template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec" exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True) with open(template, u"rt") as src_file: src = Template(src_file.read()) exec_cmd_no_error( self._node, f"echo '{src.safe_substitute(**kwargs)}' | " f"sudo tee {running}" ) def create_kernelvm_config_testpmd_io(self, **kwargs): """Create QEMU testpmd-io command line. :param kwargs: Key-value pairs to construct command line parameters. :type kwargs: dict """ pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518" testpmd_cmd = DpdkUtil.get_testpmd_cmdline( eal_corelist=f"0-{self._opt.get(u'smp') - 1}", eal_driver=False, eal_pci_whitelist0=u"0000:00:06.0", eal_pci_whitelist1=u"0000:00:07.0", eal_in_memory=True, pmd_num_mbufs=32768, pmd_fwd_mode=u"io", pmd_nb_ports=u"2", pmd_portmask=u"0x3", pmd_max_pkt_len=pmd_max_pkt_len, pmd_mbuf_size=u"16384", pmd_rxq=kwargs[u"queues"], pmd_txq=kwargs[u"queues"], pmd_tx_offloads='0x0', pmd_nb_cores=str(self._opt.get(u"smp") - 1) ) self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}" def create_kernelvm_config_testpmd_mac(self, **kwargs): """Create QEMU testpmd-mac command line. :param kwargs: Key-value pairs to construct command line parameters. :type kwargs: dict """ pmd_max_pkt_len = u"9200" if kwargs[u"jumbo_frames"] else u"1518" testpmd_cmd = DpdkUtil.get_testpmd_cmdline( eal_corelist=f"0-{self._opt.get(u'smp') - 1}", eal_driver=False, eal_pci_whitelist0=u"0000:00:06.0", eal_pci_whitelist1=u"0000:00:07.0", eal_in_memory=True, pmd_num_mbufs=32768, pmd_fwd_mode=u"mac", pmd_nb_ports=u"2", pmd_portmask=u"0x3", pmd_max_pkt_len=pmd_max_pkt_len, pmd_mbuf_size=u"16384", pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}", pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}", pmd_rxq=kwargs[u"queues"], pmd_txq=kwargs[u"queues"], pmd_tx_offloads=u"0x0", pmd_nb_cores=str(self._opt.get(u"smp") - 1) ) self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}" def create_kernelvm_config_iperf3(self): """Create QEMU iperf3 command line.""" self._opt[u"vnf_bin"] = f"mkdir /run/sshd; /usr/sbin/sshd -D -d" def create_kernelvm_init(self, **kwargs): """Create QEMU init script. :param kwargs: Key-value pairs to replace content of init startup file. :type kwargs: dict """ init = self._temp.get(u"ini") exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True) with open(kwargs[u"template"], u"rt") as src_file: src = Template(src_file.read()) exec_cmd_no_error( self._node, f"echo '{src.safe_substitute(**kwargs)}' | " f"sudo tee {init}" ) exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True) def configure_kernelvm_vnf(self, **kwargs): """Create KernelVM VNF configurations. :param kwargs: Key-value pairs for templating configs. :type kwargs: dict """ if u"vpp" in self._opt.get(u"vnf"): self.create_kernelvm_config_vpp(**kwargs) self.create_kernelvm_init( template=f"{Constants.RESOURCES_TPL}/vm/init.sh", vnf_bin=self._opt.get(u"vnf_bin") ) elif u"testpmd_io" in self._opt.get(u"vnf"): self.create_kernelvm_config_testpmd_io(**kwargs) self.create_kernelvm_init( template=f"{Constants.RESOURCES_TPL}/vm/init.sh", vnf_bin=self._opt.get(u"vnf_bin") ) elif u"testpmd_mac" in self._opt.get(u"vnf"): self.create_kernelvm_config_testpmd_mac(**kwargs) self.create_kernelvm_init( template=f"{Constants.RESOURCES_TPL}/vm/init.sh", vnf_bin=self._opt.get(u"vnf_bin") ) elif u"iperf3" in self._opt.get(u"vnf"): qemu_id = self._opt.get(u'qemu_id') % 2 self.create_kernelvm_config_iperf3() self.create_kernelvm_init( template=f"{Constants.RESOURCES_TPL}/vm/init_iperf3.sh", vnf_bin=self._opt.get(u"vnf_bin"), ip_address_l=u"2.2.2.2/30" if qemu_id else u"1.1.1.1/30", ip_address_r=u"2.2.2.1" if qemu_id else u"1.1.1.2", ip_route_r=u"1.1.1.0/30" if qemu_id else u"2.2.2.0/30" ) else: raise RuntimeError(u"QEMU: Unsupported VNF!") def get_qemu_pids(self): """Get QEMU CPU pids. :returns: List of QEMU CPU pids. :rtype: list of str """ command = f"grep -rwl 'CPU' /proc/$(sudo cat " \ f"{self._temp.get(u'pidfile')})/task/*/comm " command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq" stdout, _ = exec_cmd_no_error(self._node, command) return stdout.splitlines() def qemu_set_affinity(self, *host_cpus): """Set qemu affinity by getting thread PIDs via QMP and taskset to list of CPU cores. Function tries to execute 3 times to avoid race condition in getting thread PIDs. :param host_cpus: List of CPU cores. :type host_cpus: list """ for _ in range(3): try: qemu_cpus = self.get_qemu_pids() if len(qemu_cpus) != len(host_cpus): sleep(1) continue for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus): command = f"taskset -pc {host_cpu} {qemu_cpu}" message = f"QEMU: Set affinity failed " \ f"on {self._node[u'host']}!" exec_cmd_no_error( self._node, command, sudo=True, message=message ) break except (RuntimeError, ValueError): self.qemu_kill_all() raise else: self.qemu_kill_all() raise RuntimeError(u"Failed to set Qemu threads affinity!") def qemu_set_scheduler_policy(self): """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU processes. :raises RuntimeError: Set scheduler policy failed. """ try: qemu_cpus = self.get_qemu_pids() for qemu_cpu in qemu_cpus: command = f"chrt -r -p 1 {qemu_cpu}" message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}" exec_cmd_no_error( self._node, command, sudo=True, message=message ) except (RuntimeError, ValueError): self.qemu_kill_all() raise def _qemu_qmp_exec(self, cmd): """Execute QMP command. QMP is JSON based protocol which allows to control QEMU instance. :param cmd: QMP command to execute. :type cmd: str :returns: Command output in python representation of JSON format. The { "return": {} } response is QMP's success response. An error response will contain the "error" keyword instead of "return". """ # To enter command mode, the qmp_capabilities command must be issued. command = f"echo \"{{{{ \\\"execute\\\": " \ f"\\\"qmp_capabilities\\\" }}}}" \ f"{{{{ \\\"execute\\\": \\\"{cmd}\\\" }}}}\" | " \ f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qmp')}" message = f"QMP execute '{cmd}' failed on {self._node[u'host']}" stdout, _ = exec_cmd_no_error( self._node, command, sudo=False, message=message ) # Skip capabilities negotiation messages. out_list = stdout.splitlines() if len(out_list) < 3: raise RuntimeError(f"Invalid QMP output on {self._node[u'host']}") return json.loads(out_list[2]) def _qemu_qga_flush(self): """Flush the QGA parser state.""" command = f"(printf \"\xFF\"; sleep 1) | sudo -S socat " \ f"- UNIX-CONNECT:{self._temp.get(u'qga')}" message = f"QGA flush failed on {self._node[u'host']}" stdout, _ = exec_cmd_no_error( self._node, command, sudo=False, message=message ) return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict() def _qemu_qga_exec(self, cmd): """Execute QGA command. QGA provide access to a system-level agent via standard QMP commands. :param cmd: QGA command to execute. :type cmd: str """ command = f"(echo \"{{{{ \\\"execute\\\": " \ f"\\\"{cmd}\\\" }}}}\"; sleep 1) | " \ f"sudo -S socat - UNIX-CONNECT:{self._temp.get(u'qga')}" message = f"QGA execute '{cmd}' failed on {self._node[u'host']}" stdout, _ = exec_cmd_no_error( self._node, command, sudo=False, message=message ) return json.loads(stdout.split(u"\n", 1)[0]) if stdout else dict() def _wait_until_vm_boot(self): """Wait until QEMU VM is booted.""" try: getattr(self, f'_wait_{self._opt["vnf"]}')() except AttributeError: self._wait_default() def _wait_default(self, retries=60): """Wait until QEMU with VPP is booted. :param retries: Number of retries. :type retries: int """ for _ in range(retries): command = f"tail -1 {self._temp.get(u'log')}" stdout = None try: stdout, _ = exec_cmd_no_error(self._node, command, sudo=True) sleep(1) except RuntimeError: pass if "vpp " in stdout and "built by" in stdout: break if u"Press enter to exit" in stdout: break if u"reboot: Power down" in stdout: raise RuntimeError( f"QEMU: NF failed to run on {self._node[u'host']}!" ) else: raise RuntimeError( f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" ) def _wait_nestedvm(self, retries=12): """Wait until QEMU with NestedVM is booted. First try to flush qga until there is output. Then ping QEMU guest agent each 5s until VM booted or timeout. :param retries: Number of retries with 5s between trials. :type retries: int """ for _ in range(retries): out = None try: out = self._qemu_qga_flush() except ValueError: logger.trace(f"QGA qga flush unexpected output {out}") # Empty output - VM not booted yet if not out: sleep(5) else: break else: raise RuntimeError( f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" ) for _ in range(retries): out = None try: out = self._qemu_qga_exec(u"guest-ping") except ValueError: logger.trace(f"QGA guest-ping unexpected output {out}") # Empty output - VM not booted yet. if not out: sleep(5) # Non-error return - VM booted. elif out.get(u"return") is not None: break # Skip error and wait. elif out.get(u"error") is not None: sleep(5) else: # If there is an unexpected output from QGA guest-info, try # again until timeout. logger.trace(f"QGA guest-ping unexpected output {out}") else: raise RuntimeError( f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" ) def _wait_iperf3(self, retries=60): """Wait until QEMU with iPerf3 is booted. :param retries: Number of retries. :type retries: int """ grep = u"Server listening on 0.0.0.0 port 22." cmd = f"fgrep '{grep}' {self._temp.get(u'log')}" message = f"QEMU: Timeout, VM not booted on {self._node[u'host']}!" exec_cmd_no_error( self._node, cmd=cmd, sudo=True, message=message, retries=retries, include_reason=True ) def _update_vm_interfaces(self): """Update interface names in VM node dict.""" # Send guest-network-get-interfaces command via QGA, output example: # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"}, # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]}. out = self._qemu_qga_exec(u"guest-network-get-interfaces") interfaces = out.get(u"return") mac_name = {} if not interfaces: raise RuntimeError( f"Get VM interface list failed on {self._node[u'host']}" ) # Create MAC-name dict. for interface in interfaces: if u"hardware-address" not in interface: continue mac_name[interface[u"hardware-address"]] = interface[u"name"] # Match interface by MAC and save interface name. for interface in self._vm_info[u"interfaces"].values(): mac = interface.get(u"mac_address") if_name = mac_name.get(mac) if if_name is None: logger.trace(f"Interface name for MAC {mac} not found") else: interface[u"name"] = if_name def qemu_start(self): """Start QEMU and wait until VM boot. :returns: VM node info. :rtype: dict """ cmd_opts = OptionString() cmd_opts.add(f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch}") cmd_opts.extend(self._params) message = f"QEMU: Start failed on {self._node[u'host']}!" try: DUTSetup.check_huge_page( self._node, self._opt.get(u"mem-path"), int(self._opt.get(u"mem")) ) exec_cmd_no_error( self._node, cmd_opts, timeout=300, sudo=True, message=message ) self._wait_until_vm_boot() except RuntimeError: self.qemu_kill_all() raise return self._vm_info def qemu_kill(self): """Kill qemu process.""" exec_cmd( self._node, f"chmod +r {self._temp.get(u'pidfile')}", sudo=True ) exec_cmd( self._node, f"kill -SIGKILL $(cat {self._temp.get(u'pidfile')})", sudo=True ) for value in self._temp.values(): exec_cmd(self._node, f"cat {value}", sudo=True) exec_cmd(self._node, f"rm -f {value}", sudo=True) def qemu_kill_all(self): """Kill all qemu processes on DUT node if specified.""" exec_cmd(self._node, u"pkill -SIGKILL qemu", sudo=True) for value in self._temp.values(): exec_cmd(self._node, f"cat {value}", sudo=True) exec_cmd(self._node, f"rm -f {value}", sudo=True) def qemu_version(self): """Return Qemu version. :returns: Qemu version. :rtype: str """ command = f"{Constants.QEMU_BIN_PATH}/qemu-system-{self._arch} " \ f"--version" try: stdout, _ = exec_cmd_no_error(self._node, command, sudo=True) return match(r"QEMU emulator version ([\d.]*)", stdout).group(1) except RuntimeError: self.qemu_kill_all() raise