diff options
author | Peter Mikus <pmikus@cisco.com> | 2018-05-19 15:15:29 +0200 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2018-05-30 09:49:59 +0000 |
commit | 3a14297dca576dd7e2c9f09d84bf0f5cf58c1938 (patch) | |
tree | 56de2207a8260d2204f6ce3bc7dbadcfb6e96417 /resources/tools/testbed-setup/ansible/roles/tg_sut | |
parent | 8bb12f5b902cf42c623172c562b4f2752489bc13 (diff) |
CSIT-1070 Update Ansible structure
Change-Id: I9adab174f0c15f4c05a93f61b17714fa6542ea5d
Signed-off-by: Peter Mikus <pmikus@cisco.com>
Diffstat (limited to 'resources/tools/testbed-setup/ansible/roles/tg_sut')
10 files changed, 435 insertions, 0 deletions
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml new file mode 100644 index 0000000000..cefc496107 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml @@ -0,0 +1,30 @@ +--- +docker_edition: "ce" +docker_channel: "edge" + +docker_version: "18.05.0" +docker_install_docker_compose: True +docker_compose_version: "1.21.0" + +docker_users: ['testuser'] + +docker_daemon_options: [] +# Can be used to set environment variables for the Docker daemon, such as: +# docker_daemon_environment: +# - "HTTP_PROXY=http://proxy.example.com:3128/" +# - "HTTPS_PROXY=http://proxy.example.com:3128/" +# - "NO_PROXY=localhost,127.0.0.1" +docker_daemon_environment: [] + +docker_apt_key: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88" +docker_repository: "deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}" +docker_apt_package_name: "{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}" + +apt_cache_time: 86400 + +kubernetes_channel: "main" +kubernetes_version: "1.10.3" + +kubernetes_apt_key: "54A647F9048D5688D7DA2ABE6A030B21BA07F4FB" +kubernetes_repository: "deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}" +kubernetes_apt_package_name: "{{ kubernetes_version }}-00" diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit new file mode 100644 index 0000000000..2304162ce8 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit @@ -0,0 +1,79 @@ +# change the minimum size of the hugepage pool. +vm.nr_hugepages=4096 + +# this file contains the maximum number of memory map areas a process +# may have. memory map areas are used as a side-effect of calling +# malloc, directly by mmap and mprotect, and also when loading shared +# libraries. +# +# while most applications need less than a thousand maps, certain +# programs, particularly malloc debuggers, may consume lots of them, +# e.g., up to one or two maps per allocation. +# must be greater than or equal to (2 * vm.nr_hugepages). +vm.max_map_count=200000 + +# hugetlb_shm_group contains group id that is allowed to create sysv +# shared memory segment using hugetlb page. +vm.hugetlb_shm_group=0 + +# this control is used to define how aggressive the kernel will swap +# memory pages. higher values will increase agressiveness, lower values +# decrease the amount of swap. a value of 0 instructs the kernel not to +# initiate swap until the amount of free and file-backed pages is less +# than the high water mark in a zone. +vm.swappiness=0 + +# this parameter can be used to control the nmi watchdog +# (i.e. the hard lockup detector) on x86 systems. +# +# 0 - disable the hard lockup detector +# 1 - enable the hard lockup detector +# +# the hard lockup detector monitors each cpu for its ability to respond to +# timer interrupts. the mechanism utilizes cpu performance counter registers +# that are programmed to generate non-maskable interrupts (nmis) periodically +# while a cpu is busy. hence, the alternative name 'nmi watchdog'. +# +# the nmi watchdog is disabled by default if the kernel is running as a guest +# in a kvm virtual machine. this default can be overridden by adding +#kernel. nmi_watchdog=1 + +# shared memory max must be greator or equal to the total size of hugepages. +# for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024 +# if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax) +# is greater than the calculated totalhugepagesize then set this parameter +# to current shmmax value. +kernel.shmmax=8589934592 + +# this option can be used to select the type of process address +# space randomization that is used in the system, for architectures +# that support this feature. +# 0 - turn the process address space randomization off. this is the +# default for architectures that do not support this feature anyways, +# and kernels that are booted with the "norandmaps" parameter. +kernel.randomize_va_space=0 + +# this parameter can be used to control the soft lockup detector. +# +# 0 - disable the soft lockup detector +# 1 - enable the soft lockup detector +# +# the soft lockup detector monitors cpus for threads that are hogging the cpus +# without rescheduling voluntarily, and thus prevent the 'watchdog/n' threads +# from running. the mechanism depends on the cpus ability to respond to timer +# interrupts which are needed for the 'watchdog/n' threads to be woken up by +# the watchdog timer function, otherwise the nmi watchdog - if enabled - can +# detect a hard lockup condition. +#kernel.soft_watchdog=0 + +# this value can be used to control on which cpus the watchdog may run. +# the default cpumask is all possible cores, but if no_hz_full is +# enabled in the kernel config, and cores are specified with the +# nohz_full= boot argument, those cores are excluded by default. +# offline cores can be included in this mask, and if the core is later +# brought online, the watchdog will be started based on the mask value. +# +# typically this value would only be touched in the nohz_full case +# to re-enable cores that by default were not running the watchdog, +# if a kernel lockup was suspected on those cores. +kernel.watchdog_cpumask=0,18 diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils new file mode 100644 index 0000000000..03070fefe1 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils @@ -0,0 +1 @@ +GOVERNOR="performance" diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical new file mode 100644 index 0000000000..734d8cd18f --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical @@ -0,0 +1,14 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto {{ ansible_default_ipv4["interface"] }} +iface {{ ansible_default_ipv4["interface"] }} inet static + address {{ ansible_default_ipv4["address"] }} + netmask {{ ansible_default_ipv4["netmask"] }} + gateway {{ ansible_default_ipv4["gateway"] }} + dns-nameservers 199.204.44.24 199.204.47.54 diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance new file mode 100644 index 0000000000..84fb5f17e2 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance @@ -0,0 +1,6 @@ +#Configuration for the irqbalance daemon + +#Should irqbalance be enabled? +ENABLED="0" +#Balance the IRQs only once? +ONESHOT="0" diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt new file mode 100644 index 0000000000..11caf5d563 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt @@ -0,0 +1,13 @@ +robotframework==2.9.2 +paramiko==1.16.0 +scp==0.10.2 +ipaddress==1.0.16 +interruptingcow==0.6 +PyYAML==3.11 +pykwalify==1.5.0 +scapy==2.3.1 +enum34==1.1.2 +requests==2.9.1 +ecdsa==0.13 +pycrypto==2.6.1 +pypcap==1.1.5 diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml new file mode 100644 index 0000000000..15a6803671 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml @@ -0,0 +1,8 @@ +--- +# file roles/tg_sut/handlers/main.yaml + +- name: Restart Docker + service: + name: 'docker' + state: 'restarted' + tags: restart-docker diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml new file mode 100644 index 0000000000..c8454f58c6 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml @@ -0,0 +1,68 @@ +---
+# file: roles/tg_sut/tasks/main.yaml
+
+- name: Ubuntu specific
+ import_tasks: ubuntu_x86_64.yaml
+ when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
+
+- name: Copy interfaces file
+ template:
+ src: 'files/interfaces_physical'
+ dest: '/etc/network/interfaces'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-interface-file
+
+- name: Copy sysctl file
+ template:
+ src: 'files/90-csit'
+ dest: '/etc/sysctl.d/90-csit.conf'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-90-csit
+
+- name: Copy IRQ load balancing file
+ copy:
+ src: 'files/irqbalance'
+ dest: '/etc/default/irqbalance'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-irq
+
+- name: Copy cpufrequtils file
+ copy:
+ src: 'files/cpufrequtils'
+ dest: '/etc/default/cpufrequtils'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-cpufrequtils
+
+- name: Copy Python requirements file
+ copy:
+ src: 'files/requirements.txt'
+ dest: '/tmp/requirements.txt'
+ tags: copy-pip
+
+- name: Set isolcpus and pstate parameter
+ lineinfile:
+ path: '/etc/default/grub'
+ state: 'present'
+ regexp: '^GRUB_CMDLINE_LINUX='
+ line: 'GRUB_CMDLINE_LINUX="isolcpus={{ isolcpus }} nohz_full={{ isolcpus }} rcu_nocbs={{ isolcpus }} intel_pstate=disable"'
+ notify: ['Update GRUB']
+ tags: set-grub
+
+- name: Set ondemand service to disable
+ service:
+ name: 'ondemand'
+ enabled: 'no'
+ tags: set-ondemand
+
+- name: Install PIP requirements
+ pip:
+ requirements: '/tmp/requirements.txt'
+ tags: install-pip
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml new file mode 100644 index 0000000000..3c6eb10d62 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml @@ -0,0 +1,182 @@ +---
+# file: roles/tg_sut/tasks/ubuntu_x86_64.yaml
+
+- name: Install python-dev
+ apt:
+ name: 'python-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-python-dev
+
+- name: Install python-virtualenv
+ apt:
+ name: 'python-virtualenv'
+ state: 'present'
+ update_cache: True
+ tags: install-python-virtualenv
+
+- name: Install python pip
+ apt:
+ name: 'python-pip'
+ state: 'present'
+ update_cache: True
+ tags: install-python-pip
+
+- name: Install libpcap-dev
+ apt:
+ name: 'libpcap-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-libpcap-dev
+
+- name: Install cpufrequtils
+ apt:
+ name: 'cpufrequtils'
+ state: 'present'
+ update_cache: True
+ tags: install-cpufrequtils
+
+- name: Install cgroup-support
+ apt:
+ name: 'cgroup-bin'
+ state: 'present'
+ update_cache: True
+ tags: install-cgroup-support
+
+- name: Install zlib1g-dev
+ apt:
+ name: 'zlib1g-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-zlib1g-dev
+
+- name: Install libnuma-dev
+ apt:
+ name: 'libnuma-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-libnuma-dev
+
+- name: Install Docker and role dependencies
+ apt:
+ name: "{{ item }}"
+ state: "present"
+ install_recommends: False
+ with_items:
+ - "apt-transport-https"
+ - "ca-certificates"
+ - "software-properties-common"
+ - "cron"
+ tags: install-docker
+
+- name: Install upstream APT GPG key
+ apt_key:
+ id: "{{ docker_apt_key }}"
+ keyserver: "{{ ansible_local.core.keyserver
+ if (ansible_local|d() and ansible_local.core|d() and
+ ansible_local.core.keyserver)
+ else 'hkp://pool.sks-keyservers.net' }}"
+ state: "present"
+ tags: install-docker
+
+- name: Install upstream APT repository
+ apt_repository:
+ repo: "{{ docker_repository }}"
+ state: "present"
+ update_cache: True
+ tags: install-docker
+
+- name: Install Docker
+ apt:
+ name: "docker-{{ docker_edition }}={{ docker_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-docker
+
+- name: Remove Upstart config file
+ file:
+ path: "/etc/default/docker"
+ state: "absent"
+ tags: docker
+
+- name: Ensure systemd directory exists
+ file:
+ path: "/etc/systemd/system"
+ state: "directory"
+ owner: "root"
+ group: "root"
+ mode: "0755"
+ tags: ensure-docker
+
+- name: Copy systemd unit file
+ template:
+ src: "templates/docker.service.j2"
+ dest: "/etc/systemd/system/docker.service"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ register: docker_register_systemd_service
+ tags: copy-docker
+
+- name: Reload systemd daemon
+ command: "systemctl daemon-reload"
+ notify: ["Restart Docker"]
+ when: (docker_register_systemd_service and
+ docker_register_systemd_service | changed)
+ tags: restart-docker
+
+- name: Set specific users to "docker" group
+ user:
+ name: "{{ item }}"
+ groups: "docker"
+ append: True
+ with_items: "{{ docker_users }}"
+ when: docker_users
+ tags: set-docker
+
+- name: Install upstream APT GPG key
+ apt_key:
+ id: "{{ kubernetes_apt_key }}"
+ keyserver: "{{ ansible_local.core.keyserver
+ if (ansible_local|d() and ansible_local.core|d() and
+ ansible_local.core.keyserver)
+ else 'hkp://pool.sks-keyservers.net' }}"
+ state: "present"
+ tags: install-kubernetes
+
+- name: Install upstream APT repository
+ apt_repository:
+ repo: "{{ kubernetes_repository }}"
+ state: "present"
+ update_cache: True
+ tags: install-kubernetes
+
+- name: Install kubeadm
+ apt:
+ name: "kubeadm={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
+- name: Install kubectl
+ apt:
+ name: "kubectl={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
+- name: Install kubelet
+ apt:
+ name: "kubelet={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 new file mode 100644 index 0000000000..26a1bcf372 --- /dev/null +++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 @@ -0,0 +1,34 @@ +# {{ ansible_managed }} + +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target docker.socket +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +{% if docker_daemon_environment %} +Environment="{{ docker_daemon_environment | join('" "') }}" +{% endif %} +ExecStart=/usr/bin/dockerd {{ docker_daemon_options | join(" ") }} +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target |