aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/testbed-setup/ansible
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2020-02-10 08:42:33 +0000
committerPeter Mikus <pmikus@cisco.com>2020-02-12 09:46:52 +0000
commit34ec0f34987ceaec1f77cb56c4c672f01aa4dca4 (patch)
tree7ecc79fb2680321cb1c4a5e773d85f29ee17da7d /resources/tools/testbed-setup/ansible
parentc3abfc9d604770a10a658d1a7f780c3761fe15f4 (diff)
Ansible: Optimizations
Signed-off-by: Peter Mikus <pmikus@cisco.com> Change-Id: I56fb45fb22b924eec6ee6b6b7ad7874a07f1555c
Diffstat (limited to 'resources/tools/testbed-setup/ansible')
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml2
-rw-r--r--resources/tools/testbed-setup/ansible/roles/calibration/tasks/main.yaml33
-rw-r--r--resources/tools/testbed-setup/ansible/roles/calibration/tasks/x86_64.yaml24
-rw-r--r--resources/tools/testbed-setup/ansible/roles/performance_tuning/files/90-csit80
-rw-r--r--resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/main.yaml127
-rw-r--r--resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/turbo_boost.yaml8
-rwxr-xr-xresources/tools/testbed-setup/ansible/roles/tg/files/csit-initialize-docker-tg.sh2
-rw-r--r--resources/tools/testbed-setup/ansible/roles/wrk/tasks/main.yaml4
8 files changed, 164 insertions, 116 deletions
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml
index f24c6a7d42..0756621eef 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml
@@ -2,4 +2,4 @@
# file: lf_inventory/group_vars/all.yaml
# Ansible interpreter (for PIP)
-ansible_python_interpreter: "python3"
+ansible_python_interpreter: "/usr/bin/python3"
diff --git a/resources/tools/testbed-setup/ansible/roles/calibration/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/calibration/tasks/main.yaml
index 462f72788c..cf0f2450bb 100644
--- a/resources/tools/testbed-setup/ansible/roles/calibration/tasks/main.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/calibration/tasks/main.yaml
@@ -9,18 +9,35 @@
tags:
- install-dependencies
-- name: Calibration - Run Spectre Meltdown Checker
- raw: "wget -qO - https://meltdown.ovh | sudo bash -s - --no-color || true"
- register: spectre_meltdown_output
+- name: Calibration - Get Spectre Meltdown Checker
+ get_url:
+ url: "https://meltdown.ovh"
+ dest: "/opt/spectre-meltdown-checker.sh"
+ mode: "744"
tags:
- - run-spectre-meltdown-checker
+ - check-spectre-meltdown
-- debug: var=spectre_meltdown_output.stdout_lines
+- name: Calibration - Run Spectre Meltdown Checker
+ shell: "/opt/spectre-meltdown-checker.sh --no-color || true"
+ async: 60
+ poll: 0
+ register: spectre_meltdown_async
tags:
- - run-spectre-meltdown-checker
+ - check-spectre-meltdown
- name: Calibration - {{ ansible_machine }} Specific
include_tasks: "{{ ansible_machine }}.yaml"
- when: ansible_machine == "x86_64"
tags:
- - run-jitter-tool
+ - check-machine-specific
+ - check-jitter-tool
+
+- name: Check sync status
+ async_status:
+ jid: "{{ spectre_meltdown_async.ansible_job_id }}"
+ register: "spectre_meltdown_poll_results"
+ until: spectre_meltdown_poll_results.finished
+ retries: 30
+
+- debug: var=spectre_meltdown_poll_results.stdout_lines
+ tags:
+ - check-spectre-meltdown
diff --git a/resources/tools/testbed-setup/ansible/roles/calibration/tasks/x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/calibration/tasks/x86_64.yaml
index 1d9ed5ea08..90b1c954b5 100644
--- a/resources/tools/testbed-setup/ansible/roles/calibration/tasks/x86_64.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/calibration/tasks/x86_64.yaml
@@ -6,20 +6,30 @@
repo: "https://gerrit.fd.io/r/pma_tools"
dest: "{{ pma_directory }}"
tags:
- - run-jitter-tool
+ - check-jitter-tool
- name: Calibration - Compile PMA Tool
raw: "cd {{ pma_directory }}/jitter && make"
tags:
- - run-jitter-tool
+ - check-jitter-tool
- name: Calibration - Run Jitter Tool
- raw: "{{ pma_directory }}/jitter/jitter -c {{ jitter_core }} -i {{ jitter_iterations }} -f"
+ shell: "{{ pma_directory }}/jitter/jitter -c {{ jitter_core }} -i {{ jitter_iterations }} -f"
become: yes
- register: jitter_output
+ async: 60
+ poll: 0
+ ignore_errors: yes
+ register: jitter_async
tags:
- - run-jitter-tool
+ - check-jitter-tool
-- debug: var=jitter_output.stdout_lines
+- name: Check sync status
+ async_status:
+ jid: "{{ jitter_async.ansible_job_id }}"
+ register: "jitter_poll_results"
+ until: jitter_poll_results.finished
+ retries: 30
+
+- debug: var=jitter_poll_results.stdout_lines
tags:
- - run-jitter-tool
+ - check-jitter-tool
diff --git a/resources/tools/testbed-setup/ansible/roles/performance_tuning/files/90-csit b/resources/tools/testbed-setup/ansible/roles/performance_tuning/files/90-csit
deleted file mode 100644
index a0404c20d7..0000000000
--- a/resources/tools/testbed-setup/ansible/roles/performance_tuning/files/90-csit
+++ /dev/null
@@ -1,80 +0,0 @@
-# change the minimum size of the hugepage pool.
-# 2G VPP, 16x2G for Fullbox VM, 2G reserve
-vm.nr_hugepages={{ sysctl.vm.nr_hugepages }}
-
-# this file contains the maximum number of memory map areas a process
-# may have. memory map areas are used as a side-effect of calling
-# malloc, directly by mmap and mprotect, and also when loading shared
-# libraries.
-#
-# while most applications need less than a thousand maps, certain
-# programs, particularly malloc debuggers, may consume lots of them,
-# e.g., up to one or two maps per allocation.
-# must be greater than or equal to (2 * vm.nr_hugepages).
-vm.max_map_count={{ sysctl.vm.max_map_count }}
-
-# hugetlb_shm_group contains group id that is allowed to create sysv
-# shared memory segment using hugetlb page.
-vm.hugetlb_shm_group=1000
-
-# this control is used to define how aggressive the kernel will swap
-# memory pages. higher values will increase agressiveness, lower values
-# decrease the amount of swap. a value of 0 instructs the kernel not to
-# initiate swap until the amount of free and file-backed pages is less
-# than the high water mark in a zone.
-vm.swappiness=0
-
-# this parameter can be used to control the nmi watchdog
-# (i.e. the hard lockup detector) on x86 systems.
-#
-# 0 - disable the hard lockup detector
-# 1 - enable the hard lockup detector
-#
-# the hard lockup detector monitors each cpu for its ability to respond to
-# timer interrupts. the mechanism utilizes cpu performance counter registers
-# that are programmed to generate non-maskable interrupts (nmis) periodically
-# while a cpu is busy. hence, the alternative name 'nmi watchdog'.
-#
-# the nmi watchdog is disabled by default if the kernel is running as a guest
-# in a kvm virtual machine. this default can be overridden by adding
-#kernel. nmi_watchdog=1
-
-# shared memory max must be greator or equal to the total size of hugepages.
-# for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024
-# if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
-# is greater than the calculated totalhugepagesize then set this parameter
-# to current shmmax value.
-kernel.shmmax=8589934592
-
-# this option can be used to select the type of process address
-# space randomization that is used in the system, for architectures
-# that support this feature.
-# 0 - turn the process address space randomization off. this is the
-# default for architectures that do not support this feature anyways,
-# and kernels that are booted with the "norandmaps" parameter.
-kernel.randomize_va_space=0
-
-# this parameter can be used to control the soft lockup detector.
-#
-# 0 - disable the soft lockup detector
-# 1 - enable the soft lockup detector
-#
-# the soft lockup detector monitors cpus for threads that are hogging the cpus
-# without rescheduling voluntarily, and thus prevent the 'watchdog/n' threads
-# from running. the mechanism depends on the cpus ability to respond to timer
-# interrupts which are needed for the 'watchdog/n' threads to be woken up by
-# the watchdog timer function, otherwise the nmi watchdog - if enabled - can
-# detect a hard lockup condition.
-#kernel.soft_watchdog=0
-
-# this value can be used to control on which cpus the watchdog may run.
-# the default cpumask is all possible cores, but if no_hz_full is
-# enabled in the kernel config, and cores are specified with the
-# nohz_full= boot argument, those cores are excluded by default.
-# offline cores can be included in this mask, and if the core is later
-# brought online, the watchdog will be started based on the mask value.
-#
-# typically this value would only be touched in the nohz_full case
-# to re-enable cores that by default were not running the watchdog,
-# if a kernel lockup was suspected on those cores.
-kernel.watchdog_cpumask={{ sysctl.kernel.watchdog_cpumask }}
diff --git a/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/main.yaml
index 77b2683124..9fedd27160 100644
--- a/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/main.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/main.yaml
@@ -9,19 +9,19 @@
tags:
- install-dependencies
-- name: Performance Tuning - Configure {{ ansible_machine }} kernel parameters
+- name: Performance Tuning - Configure {{ ansible_machine }} Kernel Parameters
lineinfile:
path: "/etc/default/grub"
state: "present"
regexp: "^GRUB_CMDLINE_LINUX="
- line: {{ grub_cmdline_linux[ansible_machine] }}
+ line: "{{ grub_cmdline_linux[ansible_machine] }}"
notify:
- "Update GRUB"
- "Reboot server"
tags:
- set-grub
-- name: Performance Tuning - Turbo boost
+- name: Performance Tuning - Turbo Boost
import_tasks: turbo_boost.yaml
when: >
cpu_microarchitecture == "skylake" or
@@ -29,17 +29,116 @@
tags:
- turbo-boost
-- name: Performance Tuning - Copy CSIT sysctl file
- template:
- src: "files/90-csit"
- dest: "/etc/sysctl.d/90-csit.conf"
- owner: "root"
- group: "root"
- mode: "0644"
+- name: Performance Tuning - Adjust nr_hugepages
+ # change the minimum size of the hugepage pool.
+ # 2G VPP, 4GB per VNF/CNF, 2G reserve
+ sysctl:
+ name: "vm.nr_hugepages"
+ value: "{{ sysctl.vm.nr_hugepages }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust max_map_count
+ # this file contains the maximum number of memory map areas a process
+ # may have. memory map areas are used as a side-effect of calling
+ # malloc, directly by mmap and mprotect, and also when loading shared
+ # libraries.
+ #
+ # while most applications need less than a thousand maps, certain
+ # programs, particularly malloc debuggers, may consume lots of them,
+ # e.g., up to one or two maps per allocation.
+ # must be greater than or equal to (2 * vm.nr_hugepages).
+ sysctl:
+ name: "vm.max_map_count"
+ value: "{{ sysctl.vm.nr_hugepages * 4 }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust hugetlb_shm_group
+ # hugetlb_shm_group contains group id that is allowed to create sysv
+ # shared memory segment using hugetlb page.
+ sysctl:
+ name: "vm.hugetlb_shm_group"
+ value: "1000"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust swappiness
+ # this control is used to define how aggressive the kernel will swap
+ # memory pages. higher values will increase agressiveness, lower values
+ # decrease the amount of swap. a value of 0 instructs the kernel not to
+ # initiate swap until the amount of free and file-backed pages is less
+ # than the high water mark in a zone.
+ sysctl:
+ name: "vm.swappiness"
+ value: "0"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust shmmax
+ # shared memory max must be greator or equal to the total size of hugepages.
+ # for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024
+ # if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
+ # is greater than the calculated totalhugepagesize then set this parameter
+ # to current shmmax value.
+ sysctl:
+ name: "kernel.shmmax"
+ value: "{{ sysctl.vm.nr_hugepages * 2 * 1024 * 1024 }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust watchdog_cpumask
+ # this value can be used to control on which cpus the watchdog may run.
+ # the default cpumask is all possible cores, but if no_hz_full is
+ # enabled in the kernel config, and cores are specified with the
+ # nohz_full= boot argument, those cores are excluded by default.
+ # offline cores can be included in this mask, and if the core is later
+ # brought online, the watchdog will be started based on the mask value.
+ #
+ # typically this value would only be touched in the nohz_full case
+ # to re-enable cores that by default were not running the watchdog,
+ # if a kernel lockup was suspected on those cores.
+ sysctl:
+ name: "kernel.watchdog_cpumask"
+ value: "{{ sysctl.kernel.watchdog_cpumask }}"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
+ tags:
+ - set-sysctl
+
+- name: Performance Tuning - Adjust randomize_va_space
+ # this option can be used to select the type of process address
+ # space randomization that is used in the system, for architectures
+ # that support this feature.
+ # 0 - turn the process address space randomization off. this is the
+ # default for architectures that do not support this feature anyways,
+ # and kernels that are booted with the "norandmaps" parameter.
+ sysctl:
+ name: "kernel.randomize_va_space"
+ value: "0"
+ state: "present"
+ sysctl_file: "/etc/sysctl.d/90-csit.conf"
+ reload: "yes"
tags:
- - copy-90-csit
+ - set-sysctl
-- name: Performance Tuning - Copy cpufrequtils file
+- name: Performance Tuning - Copy Cpufrequtils File
copy:
src: "files/cpufrequtils"
dest: "/etc/default/cpufrequtils"
@@ -49,14 +148,14 @@
tags:
- copy-cpufrequtils
-- name: Performance Tuning - Set ondemand service to disable
+- name: Performance Tuning - Set Ondemand Service To Disable
service:
name: "ondemand"
enabled: "no"
tags:
- set-ondemand
-- name: Performance Tuning - Load kernel modules by default
+- name: Performance Tuning - Load Kernel Modules By Default
lineinfile:
path: "/etc/modules"
state: "present"
diff --git a/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/turbo_boost.yaml b/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/turbo_boost.yaml
index f6633f5e82..310803ca5b 100644
--- a/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/turbo_boost.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/performance_tuning/tasks/turbo_boost.yaml
@@ -1,7 +1,7 @@
---
# file: roles/performance_tuning/tasks/turbo_boost.yaml
-- name: Turbo boost - Install msr-tools
+- name: Turbo Boost - Install msr-tools
package:
name:
- "msr-tools"
@@ -10,7 +10,7 @@
tags:
- turbo-boost
-- name: Turbo boost - Load msr by default
+- name: Turbo Boost - Load msr By Default
lineinfile:
path: "/etc/modules"
state: "present"
@@ -18,7 +18,7 @@
tags:
- turbo-boost
-- name: Turbo boost - Custom startup service hook
+- name: Turbo Boost - Custom Startup Service Hook
copy:
src: "files/disable-turbo-boost.service"
dest: "/etc/systemd/system/disable-turbo-boost.service"
@@ -28,7 +28,7 @@
tags:
- turbo-boost
-- name: Turbo boost - Custom startup service hook enable
+- name: Turbo Boost - Custom Startup Service Hook Enable
service:
name: "disable-turbo-boost"
enabled: yes
diff --git a/resources/tools/testbed-setup/ansible/roles/tg/files/csit-initialize-docker-tg.sh b/resources/tools/testbed-setup/ansible/roles/tg/files/csit-initialize-docker-tg.sh
index 7b90d20bda..e9f19fd899 100755
--- a/resources/tools/testbed-setup/ansible/roles/tg/files/csit-initialize-docker-tg.sh
+++ b/resources/tools/testbed-setup/ansible/roles/tg/files/csit-initialize-docker-tg.sh
@@ -44,6 +44,8 @@ case "${1:-start}" in
dcr_stc_params+="--volume /dev:/dev "
# Mount /opt/boot/ where VM kernel and initrd are located.
dcr_stc_params+="--volume /opt:/opt "
+ # Mount /usr/local/bin/wrk where WRK is located.
+ dcr_stc_params+="--volume /usr/local/bin/wrk:/usr/local/bin/wrk "
# Mount host hugepages for VMs.
dcr_stc_params+="--volume /dev/hugepages:/dev/hugepages "
diff --git a/resources/tools/testbed-setup/ansible/roles/wrk/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/wrk/tasks/main.yaml
index d8d06145db..c2e42aa3f1 100644
--- a/resources/tools/testbed-setup/ansible/roles/wrk/tasks/main.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/wrk/tasks/main.yaml
@@ -46,8 +46,8 @@
tags:
- install-wrk
-- name: WRK Install - Move Binary
- command: "mv {{ wrk_target_dir }}/wrk-{{ wrk_version }}/wrk /usr/local/bin/"
+- name: WRK Install - Copy Binary
+ command: "cp {{ wrk_target_dir }}/wrk-{{ wrk_version }}/wrk /usr/local/bin/"
when: wrk_compiled
tags:
- install-wrk