diff options
author | pmikus <pmikus@cisco.com> | 2020-08-27 07:47:31 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2020-09-03 06:41:47 +0000 |
commit | 5c7cf5a09ec1cd7ebc4077981de84c4f18aa8738 (patch) | |
tree | 864d4fec2f4698650e073a19c7b91429ceae9751 /resources/libraries/python | |
parent | 3b2dcb0348e890950dfbc3fe4aec7008d4e1f63a (diff) |
Framework: Code aligments
+ Unifying code structures
- To easily plug another DUT
+ New PCI PassThrough templates
+ Improved perf stat on cores allocated in test.
Signed-off-by: pmikus <pmikus@cisco.com>
Change-Id: I325f17b977314f93cb91818feddfddf3e607eb8a
Diffstat (limited to 'resources/libraries/python')
-rw-r--r-- | resources/libraries/python/Constants.py | 9 | ||||
-rw-r--r-- | resources/libraries/python/PerfUtil.py | 50 | ||||
-rw-r--r-- | resources/libraries/python/QemuManager.py | 243 | ||||
-rw-r--r-- | resources/libraries/python/QemuUtils.py | 6 |
4 files changed, 284 insertions, 24 deletions
diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py index 6e44d39e80..437c97f187 100644 --- a/resources/libraries/python/Constants.py +++ b/resources/libraries/python/Constants.py @@ -139,8 +139,8 @@ class Constants: # Kubernetes templates location RESOURCES_TPL_K8S = u"resources/templates/kubernetes" - # KernelVM templates location - RESOURCES_TPL_VM = u"resources/templates/vm" + # Templates location + RESOURCES_TPL = u"resources/templates" # Container templates location RESOURCES_TPL_CONTAINER = u"resources/templates/container" @@ -215,7 +215,10 @@ class Constants: CORE_DUMP_DIR = u"/tmp" # Perf stat events (comma separated). - PERF_STAT_EVENTS = u"L1-icache-load-misses" + PERF_STAT_EVENTS = get_str_from_env( + u"PERF_STAT_EVENTS", + u"cpu-clock,context-switches,cpu-migrations,page-faults," + u"cycles,instructions,branches,branch-misses,L1-icache-load-misses") # Equivalent to ~0 used in vpp code BITWISE_NON_ZERO = 0xffffffff diff --git a/resources/libraries/python/PerfUtil.py b/resources/libraries/python/PerfUtil.py index 4c286b455e..6444cc595f 100644 --- a/resources/libraries/python/PerfUtil.py +++ b/resources/libraries/python/PerfUtil.py @@ -14,6 +14,7 @@ """Linux perf utility.""" from resources.libraries.python.Constants import Constants +from resources.libraries.python.OptionString import OptionString from resources.libraries.python.ssh import exec_cmd from resources.libraries.python.topology import NodeType @@ -28,29 +29,42 @@ class PerfUtil: """Get perf stat read for duration. :param node: Node in the topology. - :param cpu_list: CPU List. + :param cpu_list: CPU List as a string separated by comma. :param duration: Measure time in seconds. :type node: dict :type cpu_list: str :type duration: int """ - cpu = cpu_list if cpu_list else u"0-$(($(nproc) - 1))" - if Constants.PERF_STAT_EVENTS: - command = ( - u"perf stat" - f" --cpu {cpu} --no-aggr" - f" --event '{{{Constants.PERF_STAT_EVENTS}}}'" - f" --interval-print 1000 " - f" -- sleep {int(duration)}" - ) - else: - command = ( - u"perf stat" - f" --cpu {cpu} --no-aggr" - f" --interval-print 1000 " - f" -- sleep {int(duration)}" - ) - exec_cmd(node, command, sudo=True) + if cpu_list: + cpu_list = list(dict.fromkeys(cpu_list.split(u","))) + cpu_list = ",".join(str(cpu) for cpu in cpu_list) + + cmd_opts = OptionString(prefix=u"--") + cmd_opts.add(u"no-aggr") + cmd_opts.add_with_value_if( + u"cpu", cpu_list, cpu_list + ) + cmd_opts.add_if( + u"all-cpus", not(cpu_list) + ) + cmd_opts.add_with_value_if( + u"event", f"'{{{Constants.PERF_STAT_EVENTS}}}'", + Constants.PERF_STAT_EVENTS + ) + cmd_opts.add_with_value( + u"interval-print", 1000 + ) + cmd_opts.add_with_value( + u"field-separator", u"';'" + ) + + cmd_base = OptionString() + cmd_base.add(f"perf stat") + cmd_base.extend(cmd_opts) + cmd_base.add(u"--") + cmd_base.add_with_value(u"sleep", int(duration)) + + exec_cmd(node, cmd_base, sudo=True) @staticmethod def perf_stat_on_all_duts(nodes, cpu_list=None, duration=1): diff --git a/resources/libraries/python/QemuManager.py b/resources/libraries/python/QemuManager.py index 66a21aa115..547250e283 100644 --- a/resources/libraries/python/QemuManager.py +++ b/resources/libraries/python/QemuManager.py @@ -107,6 +107,7 @@ class QemuManager: :param pinning: If True, then do also QEMU process pinning. :type pinning: bool """ + cpus = [] for machine, machine_affinity in \ zip(self.machines.values(), self.machines_affinity.values()): index = list(self.machines.values()).index(machine) @@ -114,6 +115,8 @@ class QemuManager: self.nodes[name] = machine.qemu_start() if pinning: machine.qemu_set_affinity(*machine_affinity) + cpus.extend(machine_affinity) + return ",".join(str(cpu) for cpu in cpus) def kill_all_vms(self, force=False): """Kill all added VMs in manager. @@ -176,3 +179,243 @@ class QemuManager: csum=kwargs[u"enable_csum"], gso=kwargs[u"enable_gso"] ) + + def _c_vpp_2vfpt_ip4base_plen24(self, **kwargs): + """Instantiate one VM with vpp_2vfpt_ip4base_plen24 configuration. + + :param kwargs: Named parameters. + :type kwargs: dict + """ + qemu_id = kwargs[u"qemu_id"] + name = kwargs[u"name"] + + self.machines[name] = QemuUtils( + node=self.nodes[kwargs[u"node"]], + qemu_id=qemu_id, + smp=len(self.machines_affinity[name]), + mem=4096, + vnf=kwargs[u"vnf"], + img=Constants.QEMU_VM_KERNEL + ) + self.machines[name].add_default_params() + self.machines[name].add_kernelvm_params() + if u"DUT1" in name: + self.machines[name].configure_kernelvm_vnf( + ip1=u"2.2.2.1/30", + ip2=u"1.1.1.2/30", + route1=u"20.0.0.0/24", + routeif1=u"avf-0/0/6/0", + nexthop1=u"2.2.2.2", + route2=u"10.0.0.0/24", + routeif2=u"avf-0/0/7/0", + nexthop2=u"1.1.1.1", + arpmac1=u"3c:fd:fe:d1:5c:d8", + arpip1=u"1.1.1.1", + arpif1=u"avf-0/0/7/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + else: + self.machines[name].configure_kernelvm_vnf( + ip1=u"3.3.3.2/30", + ip2=u"2.2.2.2/30", + route1=u"10.0.0.0/24", + routeif1=u"avf-0/0/7/0", + nexthop1=u"2.2.2.1", + route2=u"20.0.0.0/24", + routeif2=u"avf-0/0/6/0", + nexthop2=u"3.3.3.1", + arpmac1=u"3c:fd:fe:d1:5c:d9", + arpip1=u"3.3.3.1", + arpif1=u"avf-0/0/6/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if2"]) + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if1"]) + ) + + def _c_vpp_2vfpt_ip4scale2k_plen30(self, **kwargs): + """Instantiate one VM with vpp_2vfpt_ip4scale2k_plen30 configuration. + + :param kwargs: Named parameters. + :type kwargs: dict + """ + qemu_id = kwargs[u"qemu_id"] + name = kwargs[u"name"] + + self.machines[name] = QemuUtils( + node=self.nodes[kwargs[u"node"]], + qemu_id=qemu_id, + smp=len(self.machines_affinity[name]), + mem=4096, + vnf=kwargs[u"vnf"], + img=Constants.QEMU_VM_KERNEL + ) + self.machines[name].add_default_params() + self.machines[name].add_kernelvm_params() + if u"DUT1" in name: + self.machines[name].configure_kernelvm_vnf( + ip1=u"2.2.2.1/30", + ip2=u"1.1.1.2/30", + route1=u"20.0.0.0/30", + routeif1=u"avf-0/0/6/0", + nexthop1=u"2.2.2.2", + route2=u"10.0.0.0/30", + routeif2=u"avf-0/0/7/0", + nexthop2=u"1.1.1.1", + arpmac1=u"3c:fd:fe:d1:5c:d8", + arpip1=u"1.1.1.1", + arpif1=u"avf-0/0/7/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + else: + self.machines[name].configure_kernelvm_vnf( + ip1=u"3.3.3.2/30", + ip2=u"2.2.2.2/30", + route1=u"10.0.0.0/30", + routeif1=u"avf-0/0/7/0", + nexthop1=u"2.2.2.1", + route2=u"20.0.0.0/30", + routeif2=u"avf-0/0/6/0", + nexthop2=u"3.3.3.1", + arpmac1=u"3c:fd:fe:d1:5c:d9", + arpip1=u"3.3.3.1", + arpif1=u"avf-0/0/6/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if2"]) + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if1"]) + ) + + def _c_vpp_2vfpt_ip4scale20k_plen30(self, **kwargs): + """Instantiate one VM with vpp_2vfpt_ip4scale20k_plen30 configuration. + + :param kwargs: Named parameters. + :type kwargs: dict + """ + qemu_id = kwargs[u"qemu_id"] + name = kwargs[u"name"] + + self.machines[name] = QemuUtils( + node=self.nodes[kwargs[u"node"]], + qemu_id=qemu_id, + smp=len(self.machines_affinity[name]), + mem=4096, + vnf=kwargs[u"vnf"], + img=Constants.QEMU_VM_KERNEL + ) + self.machines[name].add_default_params() + self.machines[name].add_kernelvm_params() + if u"DUT1" in name: + self.machines[name].configure_kernelvm_vnf( + ip1=u"2.2.2.1/30", + ip2=u"1.1.1.2/30", + route1=u"20.0.0.0/30", + routeif1=u"avf-0/0/6/0", + nexthop1=u"2.2.2.2", + route2=u"10.0.0.0/30", + routeif2=u"avf-0/0/7/0", + nexthop2=u"1.1.1.1", + arpmac1=u"3c:fd:fe:d1:5c:d8", + arpip1=u"1.1.1.1", + arpif1=u"avf-0/0/7/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + else: + self.machines[name].configure_kernelvm_vnf( + ip1=u"3.3.3.2/30", + ip2=u"2.2.2.2/30", + route1=u"10.0.0.0/30", + routeif1=u"avf-0/0/7/0", + nexthop1=u"2.2.2.1", + route2=u"20.0.0.0/30", + routeif2=u"avf-0/0/6/0", + nexthop2=u"3.3.3.1", + arpmac1=u"3c:fd:fe:d1:5c:d9", + arpip1=u"3.3.3.1", + arpif1=u"avf-0/0/6/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if2"]) + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if1"]) + ) + + def _c_vpp_2vfpt_ip4scale200k_plen30(self, **kwargs): + """Instantiate one VM with vpp_2vfpt_ip4scale200k_plen30 configuration. + + :param kwargs: Named parameters. + :type kwargs: dict + """ + qemu_id = kwargs[u"qemu_id"] + name = kwargs[u"name"] + + self.machines[name] = QemuUtils( + node=self.nodes[kwargs[u"node"]], + qemu_id=qemu_id, + smp=len(self.machines_affinity[name]), + mem=4096, + vnf=kwargs[u"vnf"], + img=Constants.QEMU_VM_KERNEL + ) + self.machines[name].add_default_params() + self.machines[name].add_kernelvm_params() + if u"DUT1" in name: + self.machines[name].configure_kernelvm_vnf( + ip1=u"2.2.2.1/30", + ip2=u"1.1.1.2/30", + route1=u"20.0.0.0/30", + routeif1=u"avf-0/0/6/0", + nexthop1=u"2.2.2.2", + route2=u"10.0.0.0/30", + routeif2=u"avf-0/0/7/0", + nexthop2=u"1.1.1.1", + arpmac1=u"3c:fd:fe:d1:5c:d8", + arpip1=u"1.1.1.1", + arpif1=u"avf-0/0/7/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + else: + self.machines[name].configure_kernelvm_vnf( + ip1=u"3.3.3.2/30", + ip2=u"2.2.2.2/30", + route1=u"10.0.0.0/30", + routeif1=u"avf-0/0/7/0", + nexthop1=u"2.2.2.1", + route2=u"20.0.0.0/30", + routeif2=u"avf-0/0/6/0", + nexthop2=u"3.3.3.1", + arpmac1=u"3c:fd:fe:d1:5c:d9", + arpip1=u"3.3.3.1", + arpif1=u"avf-0/0/6/0", + queues=kwargs[u"queues"], + jumbo_frames=kwargs[u"jumbo"] + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if2"]) + ) + self.machines[name].add_vfio_pci_if( + pci=Topology.get_interface_pci_addr( + self.nodes[kwargs[u"node"]], kwargs[u"if1"]) + ) diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py index a814763e27..b29e19c035 100644 --- a/resources/libraries/python/QemuUtils.py +++ b/resources/libraries/python/QemuUtils.py @@ -323,12 +323,12 @@ class QemuUtils: vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so") if "nat" in self._opt.get(u'vnf'): vpp_config.add_nat(value=u"endpoint-dependent") - vpp_config.add_nat_max_translations_per_thread(value=655360) + #vpp_config.add_nat_max_translations_per_thread(value=655360) vpp_config.add_plugin(u"enable", u"nat_plugin.so") vpp_config.write_config(startup) # Create VPP running configuration. - template = f"{Constants.RESOURCES_TPL_VM}/{self._opt.get(u'vnf')}.exec" + template = f"{Constants.RESOURCES_TPL}/vm/{self._opt.get(u'vnf')}.exec" exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True) with open(template, u"rt") as src_file: @@ -400,7 +400,7 @@ class QemuUtils: :param kwargs: Key-value pairs to replace content of init startup file. :type kwargs: dict """ - template = f"{Constants.RESOURCES_TPL_VM}/init.sh" + template = f"{Constants.RESOURCES_TPL}/vm/init.sh" init = self._temp.get(u"ini") exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True) |