diff options
author | pmikus <pmikus@cisco.com> | 2016-07-29 13:14:53 +0100 |
---|---|---|
committer | Miroslav Miklus <mmiklus@cisco.com> | 2016-08-03 13:28:39 +0000 |
commit | 411b4dfab6252c55d8235afaf3d1324b50b61f00 (patch) | |
tree | 04411694238efcaf878dcceadd1cc7376186da17 | |
parent | 635c30479fe7e7604f59c9f564c1b7f2698bb468 (diff) |
CSIT-217 Hugepage configuration on physical LF testbeds
If Qemu requires more Hugepages than availbale in system it fails to start.
This patch is aimed to add option to configure more hugepages needed.
Default behavior is to fail to start if amount of hugepages is lower
then required.
Change-Id: I64bd8a5ec5bfa559676035c23001b12d00b026a5
Signed-off-by: pmikus <pmikus@cisco.com>
-rw-r--r-- | resources/libraries/python/QemuUtils.py | 51 |
1 files changed, 50 insertions, 1 deletions
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py index d2b184e4a4..3751cd6da9 100644 --- a/resources/libraries/python/QemuUtils.py +++ b/resources/libraries/python/QemuUtils.py @@ -49,6 +49,8 @@ class QemuUtils(object): self._qemu_opt['mem_size'] = 512 # Default huge page mount point, required for Vhost-user interfaces. self._qemu_opt['huge_mnt'] = '/mnt/huge' + # Default do not allocate huge pages. + self._qemu_opt['huge_allocate'] = False # Default image for CSIT virl setup self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img' # Affinity of qemu processes @@ -114,6 +116,10 @@ class QemuUtils(object): """ self._qemu_opt['huge_mnt'] = huge_mnt + def qemu_set_huge_allocate(self): + """Set flag to allocate more huge pages if needed.""" + self._qemu_opt['huge_allocate'] = True + def qemu_set_disk_image(self, disk_image): """Set disk image. @@ -333,6 +339,39 @@ class QemuUtils(object): raise RuntimeError('Mount huge pages failed on {0}'.format( self._node['host'])) + def _huge_page_allocate(self): + """Huge page allocate.""" + huge_mnt = self._qemu_opt.get('huge_mnt') + mem_size = self._qemu_opt.get('mem_size') + # Check size of free huge pages + (_, output, _) = self._ssh.exec_command('grep Huge /proc/meminfo') + regex = re.compile(r'HugePages_Free:\s+(\d+)') + match = regex.search(output) + huge_free = int(match.group(1)) + regex = re.compile(r'HugePages_Total:\s+(\d+)') + match = regex.search(output) + huge_total = int(match.group(1)) + regex = re.compile(r'Hugepagesize:\s+(\d+)') + match = regex.search(output) + huge_size = int(match.group(1)) + + mem_needed = abs((huge_free * huge_size) - (mem_size * 1024)) + + if mem_needed: + huge_to_allocate = (mem_needed / huge_size) + huge_total + # Increase limit of allowed max hugepage count + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format( + huge_to_allocate*3) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + # Increase hugepage count + cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format( + huge_to_allocate) + (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) + if int(ret_code) != 0: + logger.debug('Mount huge pages failed {0}'.format(stderr)) + raise RuntimeError('Mount huge pages failed on {0}'.format( + self._node['host'])) + def qemu_start(self): """Start QEMU and wait until VM boot. @@ -348,7 +387,17 @@ class QemuUtils(object): mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \ 'share=on -m {0} -numa node,memdev=mem'.format( self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt')) - self._huge_page_check() + + # By default check only if hugepages are availbale. + # If 'huge_allocate' is set to true try to allocate as well. + try: + self._huge_page_check() + except RuntimeError as runtime_error: + if self._qemu_opt.get('huge_allocate'): + self._huge_page_allocate() + else: + raise runtime_error + # Setup QMP via unix socket qmp = '-qmp unix:{0},server,nowait'.format(self.__QMP_SOCK) # Setup serial console |