aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/testbed-setup/ansible/roles
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/testbed-setup/ansible/roles')
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/files/apt-sources.list61
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/files/grub34
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/handlers/main.yaml6
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/handlers/reboot.yaml14
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/tasks/main.yaml46
-rw-r--r--resources/tools/testbed-setup/ansible/roles/common/tasks/ubuntu_x86_64.yaml50
-rw-r--r--resources/tools/testbed-setup/ansible/roles/sut/tasks/main.yaml14
-rw-r--r--resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml90
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg/tasks/main.yaml6
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml9
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml30
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit79
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical14
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance6
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt13
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml8
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml68
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml182
-rw-r--r--resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j234
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl17
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch12
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt13
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt.b6427
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf3
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls3
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls31
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports12
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls5
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/ttyS010
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl27
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup25
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper58
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml216
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml87
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml27
-rw-r--r--resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml78
41 files changed, 1420 insertions, 0 deletions
diff --git a/resources/tools/testbed-setup/ansible/roles/common/files/apt-sources.list b/resources/tools/testbed-setup/ansible/roles/common/files/apt-sources.list
new file mode 100644
index 0000000000..e8ccadfc64
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/files/apt-sources.list
@@ -0,0 +1,61 @@
+# deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
+
+# deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
+# deb http://security.ubuntu.com/ubuntu bionic-security main restricted
+
+# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+# newer versions of the distribution.
+deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic main restricted
+
+## Major bug fix updates produced after the final release of the
+## distribution.
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates main restricted
+
+## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+## team. Also, please note that software in universe WILL NOT receive any
+## review or updates from the Ubuntu security team.
+deb http://us.archive.ubuntu.com/ubuntu/ bionic universe
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic universe
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates universe
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates universe
+
+## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+## team, and may not be under a free licence. Please satisfy yourself as to
+## your rights to use the software. Also, please note that software in
+## multiverse WILL NOT receive any review or updates from the Ubuntu
+## security team.
+deb http://us.archive.ubuntu.com/ubuntu/ bionic multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic multiverse
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-updates multiverse
+
+## N.B. software from this repository may not have been tested as
+## extensively as that contained in the main release, although it includes
+## newer versions of some applications which may provide useful features.
+## Also, please note that software in backports WILL NOT receive any review
+## or updates from the Ubuntu security team.
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
+
+deb http://security.ubuntu.com/ubuntu bionic-security main restricted
+deb-src http://security.ubuntu.com/ubuntu bionic-security main restricted
+deb http://security.ubuntu.com/ubuntu bionic-security universe
+deb-src http://security.ubuntu.com/ubuntu bionic-security universe
+deb http://security.ubuntu.com/ubuntu bionic-security multiverse
+deb-src http://security.ubuntu.com/ubuntu bionic-security multiverse
+
+## Uncomment the following two lines to add software from Canonical's
+## 'partner' repository.
+## This software is not part of Ubuntu, but is offered by Canonical and the
+## respective vendors as a service to Ubuntu users.
+# deb http://archive.canonical.com/ubuntu bionic partner
+# deb-src http://archive.canonical.com/ubuntu bionic partner
+
+## Uncomment the following two lines to add software from Ubuntu's
+## 'extras' repository.
+## This software is not part of Ubuntu, but is offered by third-party
+## developers who want to ship their latest software.
+# deb http://extras.ubuntu.com/ubuntu bionic main
+# deb-src http://extras.ubuntu.com/ubuntu bionic main
diff --git a/resources/tools/testbed-setup/ansible/roles/common/files/grub b/resources/tools/testbed-setup/ansible/roles/common/files/grub
new file mode 100644
index 0000000000..d4e27b3f0f
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/files/grub
@@ -0,0 +1,34 @@
+# If you change this file, run 'update-grub' afterwards to update
+# /boot/grub/grub.cfg.
+# For full documentation of the options in this file, see:
+# info -f grub -n 'Simple configuration'
+
+GRUB_DEFAULT=0
+GRUB_TIMEOUT=10
+GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
+GRUB_CMDLINE_LINUX_DEFAULT="console=tty0 console=ttyS0,115200n8"
+GRUB_CMDLINE_LINUX=""
+
+# Uncomment to enable BadRAM filtering, modify to suit your needs
+# This works with Linux (no patch required) and with any kernel that obtains
+# the memory map information from GRUB (GNU Mach, kernel of FreeBSD ...)
+#GRUB_BADRAM="0x01234567,0xfefefefe,0x89abcdef,0xefefefef"
+
+# Uncomment to disable graphical terminal (grub-pc only)
+#GRUB_TERMINAL=console
+GRUB_TERMINAL=serial
+GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
+
+# The resolution used on graphical terminal
+# note that you can use only modes which your graphic card supports via VBE
+# you can see them in real GRUB with the command `vbeinfo'
+#GRUB_GFXMODE=640x480
+
+# Uncomment if you don't want GRUB to pass "root=UUID=xxx" parameter to Linux
+#GRUB_DISABLE_LINUX_UUID=true
+
+# Uncomment to disable generation of recovery mode menu entries
+#GRUB_DISABLE_RECOVERY="true"
+
+# Uncomment to get a beep at grub start
+#GRUB_INIT_TUNE="480 440 1"
diff --git a/resources/tools/testbed-setup/ansible/roles/common/handlers/main.yaml b/resources/tools/testbed-setup/ansible/roles/common/handlers/main.yaml
new file mode 100644
index 0000000000..bdcf6832f4
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+# file: roles/common/handlers/main.yaml
+
+- name: Update GRUB
+ command: update-grub
+ tags: update-grub
diff --git a/resources/tools/testbed-setup/ansible/roles/common/handlers/reboot.yaml b/resources/tools/testbed-setup/ansible/roles/common/handlers/reboot.yaml
new file mode 100644
index 0000000000..01e1eb1b6b
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/handlers/reboot.yaml
@@ -0,0 +1,14 @@
+---
+# file roles/common/handlers/reboot.yaml
+
+- name: Reboot host
+ command: shutdown -r now "Ansible updates triggered"
+ async: 0
+ poll: 0
+ ignore_errors: true
+ tags: reboot-host
+
+- name: Waiting for server to come back
+ local_action: wait_for host={{ inventory_hostname }}
+ state=started
+ tags: reboot-host
diff --git a/resources/tools/testbed-setup/ansible/roles/common/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/common/tasks/main.yaml
new file mode 100644
index 0000000000..e16a6a7356
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/tasks/main.yaml
@@ -0,0 +1,46 @@
+---
+# file: roles/common/tasks/main.yaml
+
+- name: Ubuntu specific
+ import_tasks: ubuntu_x86_64.yaml
+ when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
+
+- name: Set hostname
+ hostname:
+ name: '{{ hostname }}'
+ tags: set-hostname
+
+- name: Ensure hostname is in /etc/hosts
+ lineinfile:
+ path: '/etc/hosts'
+ regexp: '^{{ ansible_default_ipv4.address }}.+$'
+ line: '{{ ansible_default_ipv4.address }} {{ hostname }}.linuxfoundation.org'
+ tags: set-hostname
+
+- name: Set sudoers
+ lineinfile:
+ path: '/etc/sudoers'
+ state: 'present'
+ regexp: '^%admin ALL='
+ line: '%admin ALL=(ALL) ALL'
+ validate: '/usr/sbin/visudo -cf %s'
+ tags: set-sudoers
+
+- name: Set sudoers
+ lineinfile:
+ path: '/etc/sudoers'
+ state: 'present'
+ regexp: '^%sudo'
+ line: '%sudo ALL=(ALL:ALL) NOPASSWD: ALL'
+ validate: '/usr/sbin/visudo -cf %s'
+ tags: set-sudoers
+
+- name: Copy grub file
+ template:
+ src: 'files/grub'
+ dest: '/etc/default/grub'
+ owner: 'root'
+ group: 'root'
+ mode: '644'
+ notify: ['Update GRUB']
+ tags: copy-grub
diff --git a/resources/tools/testbed-setup/ansible/roles/common/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/common/tasks/ubuntu_x86_64.yaml
new file mode 100644
index 0000000000..d0f32a31cc
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/common/tasks/ubuntu_x86_64.yaml
@@ -0,0 +1,50 @@
+---
+# file: roles/common/tasks/ubuntu_x86_64.yaml
+
+- name: Copy apt sources file
+ template:
+ src: 'files/apt-sources.list'
+ dest: '/etc/apt/sources.list'
+ tags: copy-apt-sources
+
+- name: Install python-apt
+ apt:
+ name: 'python-apt'
+ state: 'present'
+ update_cache: True
+ tags: install-python-apt
+
+- name: Install git
+ apt:
+ name: 'git'
+ state: 'present'
+ update_cache: True
+ tags: install-git
+
+- name: Install crudini
+ apt:
+ name: 'crudini'
+ state: 'present'
+ update_cache: True
+ tags: install-crudini
+
+- name: Install expect
+ apt:
+ name: 'expect'
+ state: 'present'
+ update_cache: True
+ tags: install-expect
+
+- name: Install socat
+ apt:
+ name: 'socat'
+ state: 'present'
+ update_cache: True
+ tags: install-socat
+
+- name: Install qemu
+ apt:
+ name: 'qemu-system-x86'
+ state: 'present'
+ update_cache: True
+ tags: install-qemu
diff --git a/resources/tools/testbed-setup/ansible/roles/sut/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/sut/tasks/main.yaml
new file mode 100644
index 0000000000..ffdbbcc860
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/sut/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+# file: roles/sut/tasks/main.yaml
+
+- name: Ubuntu specific
+ import_tasks: ubuntu_x86_64.yaml
+ when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
+
+- name: Copy 80-vpp.conf
+ file:
+ src: '/dev/null'
+ dest: '/etc/sysctl.d/80-vpp.conf'
+ state: 'link'
+ become: yes
+ tags: copy-80-vpp
diff --git a/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml
new file mode 100644
index 0000000000..f9ab901ded
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu_x86_64.yaml
@@ -0,0 +1,90 @@
+---
+# file: roles/sut/tasks/ubuntu_x86_64.yaml
+
+- name: Install DKMS
+ apt:
+ name: 'dkms'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-dkms
+
+- name: Install pkg-config
+ apt:
+ name: 'pkg-config'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-pkg-config
+
+- name: Install libglib2.0-dev
+ apt:
+ name: 'libglib2.0-dev'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-libglib2.0-dev
+
+- name: Install autoconf
+ apt:
+ name: 'autoconf'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-autoconf
+
+- name: Install libtool
+ apt:
+ name: 'libtool'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-libtool
+
+- name: Install screen
+ apt:
+ name: 'screen'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-screen
+
+- name: Install libmbedcrypto1
+ apt:
+ name: 'libmbedcrypto1'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-libmbedcrypto1
+
+- name: Install libmbedtls10
+ apt:
+ name: 'libmbedtls10'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-libmbedtls10
+
+- name: Install libmbedx509-0
+ apt:
+ name: 'libmbedx509-0'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-libmbedx509-0
+
+- name: Install lxc
+ apt:
+ name: 'lxc'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-lxc
+
+- name: Install java
+ apt:
+ name: 'openjdk-8-jdk'
+ state: 'present'
+ update_cache: True
+ become: yes
+ tags: install-java
diff --git a/resources/tools/testbed-setup/ansible/roles/tg/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg/tasks/main.yaml
new file mode 100644
index 0000000000..8e71800bca
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+# file: roles/tg/tasks/main.yaml
+
+- name: Ubuntu specific
+ import_tasks: ubuntu_x86_64.yaml
+ when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
diff --git a/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml
new file mode 100644
index 0000000000..9711b2678d
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml
@@ -0,0 +1,9 @@
+---
+# file: roles/tg/tasks/ubuntu_x86_64.yaml
+
+- name: Install Unzip
+ apt:
+ name: 'unzip'
+ state: 'present'
+ update_cache: True
+ tags: install-unzip
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml
new file mode 100644
index 0000000000..cefc496107
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml
@@ -0,0 +1,30 @@
+---
+docker_edition: "ce"
+docker_channel: "edge"
+
+docker_version: "18.05.0"
+docker_install_docker_compose: True
+docker_compose_version: "1.21.0"
+
+docker_users: ['testuser']
+
+docker_daemon_options: []
+# Can be used to set environment variables for the Docker daemon, such as:
+# docker_daemon_environment:
+# - "HTTP_PROXY=http://proxy.example.com:3128/"
+# - "HTTPS_PROXY=http://proxy.example.com:3128/"
+# - "NO_PROXY=localhost,127.0.0.1"
+docker_daemon_environment: []
+
+docker_apt_key: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
+docker_repository: "deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}"
+docker_apt_package_name: "{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}"
+
+apt_cache_time: 86400
+
+kubernetes_channel: "main"
+kubernetes_version: "1.10.3"
+
+kubernetes_apt_key: "54A647F9048D5688D7DA2ABE6A030B21BA07F4FB"
+kubernetes_repository: "deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}"
+kubernetes_apt_package_name: "{{ kubernetes_version }}-00"
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit
new file mode 100644
index 0000000000..2304162ce8
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/90-csit
@@ -0,0 +1,79 @@
+# change the minimum size of the hugepage pool.
+vm.nr_hugepages=4096
+
+# this file contains the maximum number of memory map areas a process
+# may have. memory map areas are used as a side-effect of calling
+# malloc, directly by mmap and mprotect, and also when loading shared
+# libraries.
+#
+# while most applications need less than a thousand maps, certain
+# programs, particularly malloc debuggers, may consume lots of them,
+# e.g., up to one or two maps per allocation.
+# must be greater than or equal to (2 * vm.nr_hugepages).
+vm.max_map_count=200000
+
+# hugetlb_shm_group contains group id that is allowed to create sysv
+# shared memory segment using hugetlb page.
+vm.hugetlb_shm_group=0
+
+# this control is used to define how aggressive the kernel will swap
+# memory pages. higher values will increase agressiveness, lower values
+# decrease the amount of swap. a value of 0 instructs the kernel not to
+# initiate swap until the amount of free and file-backed pages is less
+# than the high water mark in a zone.
+vm.swappiness=0
+
+# this parameter can be used to control the nmi watchdog
+# (i.e. the hard lockup detector) on x86 systems.
+#
+# 0 - disable the hard lockup detector
+# 1 - enable the hard lockup detector
+#
+# the hard lockup detector monitors each cpu for its ability to respond to
+# timer interrupts. the mechanism utilizes cpu performance counter registers
+# that are programmed to generate non-maskable interrupts (nmis) periodically
+# while a cpu is busy. hence, the alternative name 'nmi watchdog'.
+#
+# the nmi watchdog is disabled by default if the kernel is running as a guest
+# in a kvm virtual machine. this default can be overridden by adding
+#kernel. nmi_watchdog=1
+
+# shared memory max must be greator or equal to the total size of hugepages.
+# for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024
+# if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
+# is greater than the calculated totalhugepagesize then set this parameter
+# to current shmmax value.
+kernel.shmmax=8589934592
+
+# this option can be used to select the type of process address
+# space randomization that is used in the system, for architectures
+# that support this feature.
+# 0 - turn the process address space randomization off. this is the
+# default for architectures that do not support this feature anyways,
+# and kernels that are booted with the "norandmaps" parameter.
+kernel.randomize_va_space=0
+
+# this parameter can be used to control the soft lockup detector.
+#
+# 0 - disable the soft lockup detector
+# 1 - enable the soft lockup detector
+#
+# the soft lockup detector monitors cpus for threads that are hogging the cpus
+# without rescheduling voluntarily, and thus prevent the 'watchdog/n' threads
+# from running. the mechanism depends on the cpus ability to respond to timer
+# interrupts which are needed for the 'watchdog/n' threads to be woken up by
+# the watchdog timer function, otherwise the nmi watchdog - if enabled - can
+# detect a hard lockup condition.
+#kernel.soft_watchdog=0
+
+# this value can be used to control on which cpus the watchdog may run.
+# the default cpumask is all possible cores, but if no_hz_full is
+# enabled in the kernel config, and cores are specified with the
+# nohz_full= boot argument, those cores are excluded by default.
+# offline cores can be included in this mask, and if the core is later
+# brought online, the watchdog will be started based on the mask value.
+#
+# typically this value would only be touched in the nohz_full case
+# to re-enable cores that by default were not running the watchdog,
+# if a kernel lockup was suspected on those cores.
+kernel.watchdog_cpumask=0,18
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils
new file mode 100644
index 0000000000..03070fefe1
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/cpufrequtils
@@ -0,0 +1 @@
+GOVERNOR="performance"
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical
new file mode 100644
index 0000000000..734d8cd18f
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/interfaces_physical
@@ -0,0 +1,14 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto {{ ansible_default_ipv4["interface"] }}
+iface {{ ansible_default_ipv4["interface"] }} inet static
+ address {{ ansible_default_ipv4["address"] }}
+ netmask {{ ansible_default_ipv4["netmask"] }}
+ gateway {{ ansible_default_ipv4["gateway"] }}
+ dns-nameservers 199.204.44.24 199.204.47.54
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance
new file mode 100644
index 0000000000..84fb5f17e2
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/irqbalance
@@ -0,0 +1,6 @@
+#Configuration for the irqbalance daemon
+
+#Should irqbalance be enabled?
+ENABLED="0"
+#Balance the IRQs only once?
+ONESHOT="0"
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt
new file mode 100644
index 0000000000..11caf5d563
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt
@@ -0,0 +1,13 @@
+robotframework==2.9.2
+paramiko==1.16.0
+scp==0.10.2
+ipaddress==1.0.16
+interruptingcow==0.6
+PyYAML==3.11
+pykwalify==1.5.0
+scapy==2.3.1
+enum34==1.1.2
+requests==2.9.1
+ecdsa==0.13
+pycrypto==2.6.1
+pypcap==1.1.5
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml
new file mode 100644
index 0000000000..15a6803671
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/handlers/main.yaml
@@ -0,0 +1,8 @@
+---
+# file roles/tg_sut/handlers/main.yaml
+
+- name: Restart Docker
+ service:
+ name: 'docker'
+ state: 'restarted'
+ tags: restart-docker
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml
new file mode 100644
index 0000000000..c8454f58c6
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/main.yaml
@@ -0,0 +1,68 @@
+---
+# file: roles/tg_sut/tasks/main.yaml
+
+- name: Ubuntu specific
+ import_tasks: ubuntu_x86_64.yaml
+ when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
+
+- name: Copy interfaces file
+ template:
+ src: 'files/interfaces_physical'
+ dest: '/etc/network/interfaces'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-interface-file
+
+- name: Copy sysctl file
+ template:
+ src: 'files/90-csit'
+ dest: '/etc/sysctl.d/90-csit.conf'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-90-csit
+
+- name: Copy IRQ load balancing file
+ copy:
+ src: 'files/irqbalance'
+ dest: '/etc/default/irqbalance'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-irq
+
+- name: Copy cpufrequtils file
+ copy:
+ src: 'files/cpufrequtils'
+ dest: '/etc/default/cpufrequtils'
+ owner: 'root'
+ group: 'root'
+ mode: '0644'
+ tags: copy-cpufrequtils
+
+- name: Copy Python requirements file
+ copy:
+ src: 'files/requirements.txt'
+ dest: '/tmp/requirements.txt'
+ tags: copy-pip
+
+- name: Set isolcpus and pstate parameter
+ lineinfile:
+ path: '/etc/default/grub'
+ state: 'present'
+ regexp: '^GRUB_CMDLINE_LINUX='
+ line: 'GRUB_CMDLINE_LINUX="isolcpus={{ isolcpus }} nohz_full={{ isolcpus }} rcu_nocbs={{ isolcpus }} intel_pstate=disable"'
+ notify: ['Update GRUB']
+ tags: set-grub
+
+- name: Set ondemand service to disable
+ service:
+ name: 'ondemand'
+ enabled: 'no'
+ tags: set-ondemand
+
+- name: Install PIP requirements
+ pip:
+ requirements: '/tmp/requirements.txt'
+ tags: install-pip
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml
new file mode 100644
index 0000000000..3c6eb10d62
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu_x86_64.yaml
@@ -0,0 +1,182 @@
+---
+# file: roles/tg_sut/tasks/ubuntu_x86_64.yaml
+
+- name: Install python-dev
+ apt:
+ name: 'python-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-python-dev
+
+- name: Install python-virtualenv
+ apt:
+ name: 'python-virtualenv'
+ state: 'present'
+ update_cache: True
+ tags: install-python-virtualenv
+
+- name: Install python pip
+ apt:
+ name: 'python-pip'
+ state: 'present'
+ update_cache: True
+ tags: install-python-pip
+
+- name: Install libpcap-dev
+ apt:
+ name: 'libpcap-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-libpcap-dev
+
+- name: Install cpufrequtils
+ apt:
+ name: 'cpufrequtils'
+ state: 'present'
+ update_cache: True
+ tags: install-cpufrequtils
+
+- name: Install cgroup-support
+ apt:
+ name: 'cgroup-bin'
+ state: 'present'
+ update_cache: True
+ tags: install-cgroup-support
+
+- name: Install zlib1g-dev
+ apt:
+ name: 'zlib1g-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-zlib1g-dev
+
+- name: Install libnuma-dev
+ apt:
+ name: 'libnuma-dev'
+ state: 'present'
+ update_cache: True
+ tags: install-libnuma-dev
+
+- name: Install Docker and role dependencies
+ apt:
+ name: "{{ item }}"
+ state: "present"
+ install_recommends: False
+ with_items:
+ - "apt-transport-https"
+ - "ca-certificates"
+ - "software-properties-common"
+ - "cron"
+ tags: install-docker
+
+- name: Install upstream APT GPG key
+ apt_key:
+ id: "{{ docker_apt_key }}"
+ keyserver: "{{ ansible_local.core.keyserver
+ if (ansible_local|d() and ansible_local.core|d() and
+ ansible_local.core.keyserver)
+ else 'hkp://pool.sks-keyservers.net' }}"
+ state: "present"
+ tags: install-docker
+
+- name: Install upstream APT repository
+ apt_repository:
+ repo: "{{ docker_repository }}"
+ state: "present"
+ update_cache: True
+ tags: install-docker
+
+- name: Install Docker
+ apt:
+ name: "docker-{{ docker_edition }}={{ docker_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-docker
+
+- name: Remove Upstart config file
+ file:
+ path: "/etc/default/docker"
+ state: "absent"
+ tags: docker
+
+- name: Ensure systemd directory exists
+ file:
+ path: "/etc/systemd/system"
+ state: "directory"
+ owner: "root"
+ group: "root"
+ mode: "0755"
+ tags: ensure-docker
+
+- name: Copy systemd unit file
+ template:
+ src: "templates/docker.service.j2"
+ dest: "/etc/systemd/system/docker.service"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ register: docker_register_systemd_service
+ tags: copy-docker
+
+- name: Reload systemd daemon
+ command: "systemctl daemon-reload"
+ notify: ["Restart Docker"]
+ when: (docker_register_systemd_service and
+ docker_register_systemd_service | changed)
+ tags: restart-docker
+
+- name: Set specific users to "docker" group
+ user:
+ name: "{{ item }}"
+ groups: "docker"
+ append: True
+ with_items: "{{ docker_users }}"
+ when: docker_users
+ tags: set-docker
+
+- name: Install upstream APT GPG key
+ apt_key:
+ id: "{{ kubernetes_apt_key }}"
+ keyserver: "{{ ansible_local.core.keyserver
+ if (ansible_local|d() and ansible_local.core|d() and
+ ansible_local.core.keyserver)
+ else 'hkp://pool.sks-keyservers.net' }}"
+ state: "present"
+ tags: install-kubernetes
+
+- name: Install upstream APT repository
+ apt_repository:
+ repo: "{{ kubernetes_repository }}"
+ state: "present"
+ update_cache: True
+ tags: install-kubernetes
+
+- name: Install kubeadm
+ apt:
+ name: "kubeadm={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
+- name: Install kubectl
+ apt:
+ name: "kubectl={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
+- name: Install kubelet
+ apt:
+ name: "kubelet={{ kubernetes_apt_package_name }}"
+ state: "present"
+ update_cache: True
+ install_recommends: False
+ cache_valid_time: "{{ apt_cache_time }}"
+ tags: install-kubernetes
+
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2
new file mode 100644
index 0000000000..26a1bcf372
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2
@@ -0,0 +1,34 @@
+# {{ ansible_managed }}
+
+[Unit]
+Description=Docker Application Container Engine
+Documentation=https://docs.docker.com
+After=network-online.target docker.socket
+Requires=docker.socket
+
+[Service]
+Type=notify
+# the default is not to use systemd for cgroups because the delegate issues still
+# exists and systemd currently does not support the cgroup feature set required
+# for containers run by docker
+{% if docker_daemon_environment %}
+Environment="{{ docker_daemon_environment | join('" "') }}"
+{% endif %}
+ExecStart=/usr/bin/dockerd {{ docker_daemon_options | join(" ") }}
+ExecReload=/bin/kill -s HUP $MAINPID
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
+LimitNPROC=infinity
+LimitCORE=infinity
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+TimeoutStartSec=0
+# set delegate yes so that systemd does not reset the cgroups of docker containers
+Delegate=yes
+# kill only the docker process, not all processes in the cgroup
+KillMode=process
+
+[Install]
+WantedBy=multi-user.target
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl
new file mode 100644
index 0000000000..25ea35a0c5
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl
@@ -0,0 +1,17 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto br1
+iface br1 inet static
+ address {{ ansible_default_ipv4["address"] }}
+ netmask {{ ansible_default_ipv4["netmask"] }}
+ gateway {{ ansible_default_ipv4["gateway"] }}
+ dns-nameservers 199.204.44.24 199.204.47.54
+ bridge_maxwait 0
+ bridge_ports eth0 eth4
+ bridge_stp off
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch b/resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch
new file mode 100644
index 0000000000..a943dc9b25
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch
@@ -0,0 +1,12 @@
+--- api/metadata/base.py.old 2017-04-26 12:38:52.522991596 +0000
++++ api/metadata/base.py 2017-04-26 10:06:46.396450566 +0000
+@@ -493,7 +493,7 @@
+ path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
+ yield (path, self.lookup(path))
+
+- if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
++ if False and self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
+ path = 'openstack/%s/%s' % (version, NW_JSON_NAME)
+ yield (path, self.lookup(path))
+
+
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt b/resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt
new file mode 100644
index 0000000000..11caf5d563
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt
@@ -0,0 +1,13 @@
+robotframework==2.9.2
+paramiko==1.16.0
+scp==0.10.2
+ipaddress==1.0.16
+interruptingcow==0.6
+PyYAML==3.11
+pykwalify==1.5.0
+scapy==2.3.1
+enum34==1.1.2
+requests==2.9.1
+ecdsa==0.13
+pycrypto==2.6.1
+pypcap==1.1.5
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64 b/resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64
new file mode 100644
index 0000000000..5e76a8583b
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64
@@ -0,0 +1,27 @@
+H4sIAF7JK1cAA+1a32/bNhD2a/RXcMmDkyGy9VuDsQwDhhUoNuxhHfbSFQYj0TZrSVRJyklQ9H/f
+UZZsGbUj27HVduOXBzvkmUfyuzveURI4kcPeeWEBwtBXn3boW83PGj3bCS3P9v3QcnuW7YJAD/ln
+nleJQkjMEeolLMIJjlOabZdr6/9GIRT/gi/OaQOH8O+HgeI/8GzNfxdY8S/OFwgO598LLE/z3wU2
++Y/mjAhJ+GkNoeTf34d/LwwskLPB/V3NfxfYwX82EaYgfEH4QCTihToUwYFl7eTfBrJr/l0X+Hds
+R/FvnWSFLfif80+zKCliMjIQMlFN/6BBP81gg5JkZ3/EsgmdFpwYX3opGkdgh//HmUix+HAC5++1
++7/lr+O/4zng/5YfeNr/u8B2/6/p1879X0fr+X+CVHD//M9XH8r/IRHU+V8XaOd/FQOOPgza4n/o
+N/j3FP+uHdo6/ncBxbQK/hOakEFMOYkk40+qRR0IBdjACHHGZNUw5azIN1pSBqcHAvc2DDXWUEQc
+y2h2gjHtMAyrQTlrG29BebI5XqOlOUcioyF5zBmX63WnOMNTEo82xAPPq/6XJM0TLKHtPc3e46pV
+sIJH0HapPGc03Oo7anRRq7s0DOgaL7vGvMgymk2VTtVCIzJoNCkFGU5heDXanPCMJNWghlHkMcxm
+3FhFlMbq15u/XPZPBDI53nl+t/t/VQC8IBUs/d/zdtd/QbjK/3xL5X9OoOK/9v/z4zPzQhXfypby
++bSu/9beIeY0H4MgnTyN0F+8IFU7JxNOxGyEXuFEkBYb/tLL1qjQ7v/LGPYSHeUZ/1z9t87/LNsK
+1P2PH+j7v06wL//VYXOUjpb4D1lf2OBfxX/XcXT93wmu0EZGhOSMIBxFRAgEib/kLEEJFRJNGC9z
+JfEE9pEK9DCj0Qyl+AndkyrTILFxdXEhGfrj1RsUJZRkUgwQekNqAXHt3wyMK+MK/fqIIaUi5agg
+vXAQzuLymzuC7tIgZywlAi0xY0Kqo8S+5g+34imLbjM2FsW95ISMoxmJ5jcrGeeasx0yW3V7K41g
+9F6lEE2FGM75vU9XGieCxnfWbcSZEGkmt42+Hqae/efDbJtUM3G++PgRKV98289pkmA+mBLZf3fd
+V+nsKHHGGZEPjM/7t5XUlGM4omupDYG+HToDOxjYA2voeH10c4M+fVITgTmobHssPhRYzLbs5X5z
+cFon4TRm4Zx2Fm7bTrhbdsI9dA51+XFx8b0yq1bxL+3OB+P5+9/TPAc85Pmf44Xq/sfS5383aOH/
+5Zc/vfb7Hyewa/4D2/JU/ed7vj7/u0B5+tOMymHzzv9MNyO1VS1TyqbCS6PtHqSS/vYi7NeNFv9/
+efHXO6T+g/iv3v9zbNvT8b8T7MV/01OP0NFW/4V2UPPvh56t6j/f0vVfJ7hCC5qOBIEKT94VudoM
+iaS4cxCRIyMmUJXQXFKWocu/X//5ex2GLw1cyBlUUJe/YA4Wk6HflpaDfqxt6OeIioiBzaQ/QXRf
+DgzDQHBPyIIk6K3jev476GD5Zvt3yw6DE5Hjh8wwyCOJ0LAQfCjuaVZbIzJj9I9R12sm3rNoGU/h
+vHrAT621S0NwXUhBYlJVDy/R3V44NSXXBdRR2t19V75NcF04bddtZsxUhbdYfgXKWLKA70JyGkmT
+8RhMwjSBttikGVjFBEdECZPHiORy3XaXMGjNaWyqkHM3lGk+VPM369gDXSCgItBSYmMWy6uqu+f2
+YULBTscql1gK79iFLWL9HwblX//z1e+hVxCYc9yueJvcM5pjlsKv71hO1P18NC/D0xHJURn2VQ54
++tCyQnn+H/j+t+Xo878LrPj/ut7/Lus/zf/5sck/rA6O+kF8hve/D+I/cAJL898FdvAvIYUrV3xs
+zt9E2/N/y6uf/7hh4Dtl/aff/+4GKpcq77TL90jQPRZkee+CTLSuC/Wti4aGhoaGhoaGhoaGhoaG
+hoaGhoaGhsa3hH8BeOCWxQBQAAA=
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf
new file mode 100644
index 0000000000..fce910cc0b
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf
@@ -0,0 +1,3 @@
+file_roots:
+ base:
+ - /srv/salt/
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls
new file mode 100644
index 0000000000..bf3e47f9b3
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls
@@ -0,0 +1,3 @@
+include:
+ - ckoester.nfs-server.install
+ - ckoester.nfs-server.configure
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls
new file mode 100644
index 0000000000..ce362a38b2
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls
@@ -0,0 +1,31 @@
+/nfs:
+ file.directory:
+ - user: root
+ - group: root
+ - mode: 755
+
+/nfs/scratch:
+ file.directory:
+ - user: root
+ - group: root
+ - mode: 1777
+
+/nfs/ro:
+ file.directory:
+ - user: virl
+ - group: virl
+ - mode: 755
+
+/etc/exports:
+ file.managed:
+ - mode: 644
+ - template: jinja
+ - source: "salt://ckoester/nfs-server/files/exports"
+
+nfs_server_running:
+ service.running:
+ - name: nfs-kernel-server
+
+update_exports:
+ cmd.run:
+ - name: exportfs -ra
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports
new file mode 100644
index 0000000000..23802be486
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports
@@ -0,0 +1,12 @@
+# /etc/exports: the access control list for filesystems which may be exported
+# to NFS clients. See exports(5).
+#
+# Example for NFSv2 and NFSv3:
+# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
+#
+# Example for NFSv4:
+# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
+# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
+#
+/nfs/scratch {{ salt['pillar.get']('virl:l2_network', salt['grains.get']('l2_network', '172.16.1.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l2_network2', salt['grains.get']('l2_network2', '172.16.2.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l3_network', salt['grains.get']('l3_network', '172.16.3.0/24' )) }}(rw,no_root_squash,no_subtree_check)
+/nfs/ro *(ro,no_root_squash,no_subtree_check)
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls
new file mode 100644
index 0000000000..0f136346f8
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls
@@ -0,0 +1,5 @@
+nfs-kernel-server install:
+ pkg.installed:
+ - skip_verify: True
+ - refresh: False
+ - name: nfs-kernel-server
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in
new file mode 100644
index 0000000000..1797c2c636
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in
@@ -0,0 +1 @@
+jenkins-in ALL=(root) NOPASSWD: /bin/rm -fr /scratch/*, /bin/rm -fr /nfs/scratch/*
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl
new file mode 100644
index 0000000000..e0cf48ac10
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl
@@ -0,0 +1 @@
+virl ALL=(root) NOPASSWD:ALL
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0 b/resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0
new file mode 100644
index 0000000000..0ed8550190
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0
@@ -0,0 +1,10 @@
+# ttyS0 - getty
+#
+# This service maintains a getty on ttyS0 from the point the system is
+# started until it is shut down again.
+
+start on stopped rc RUNLEVEL=[12345]
+stop on runlevel [!12345]
+
+respawn
+exec /sbin/getty -L 115200 ttyS0 vt102
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl
new file mode 100644
index 0000000000..b4c3de745b
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
++m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+-----END RSA PRIVATE KEY-----
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub
new file mode 100644
index 0000000000..0ef508c8a1
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBQOVOnNIenAtCi1k4VLgCBw80RYKc/UAHaFYWa8j7wpqH+Bu/yu5lT1GqE+znq3IrPFuG82RjhVsfkXUaKUdm7eYurMzgp+CEyZp4wSM7VPsFhh2+4F9O8iW6WN9Da8CWdisPCf4KXEmTzWbSkOC+ssRfipVWHawaHtQSRjLmvzYM1rbhWPM8HUn9zxr3M/wWhlSFgjMXgUu9EyNQAOlsxeAkywalpkubP3lqnQSi3u9vUJzUu8X3uLIEKu0g5IpEoDnRmEsaMwqY7CdhT5w/nvXy1umugNWLpUk6jvN65fJ60yzxgGX2RZuVYb0Go/2Ny0W+yaLkbuFeCrVjy74P virl@tb4-virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup
new file mode 100644
index 0000000000..a4a743ac63
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup
@@ -0,0 +1,25 @@
+#! /bin/sh
+# Reload the OpenSSH server when an interface comes up, to allow it to start
+# listening on new addresses.
+
+set -e
+
+# Don't bother to restart sshd when lo is configured.
+if [ "$IFACE" = lo ]; then
+ exit 0
+fi
+
+# Only run from ifup.
+if [ "$MODE" != start ]; then
+ exit 0
+fi
+
+if [ "$IFACE" = br1 ]; then
+ /sbin/ip route delete default
+ /sbin/ip route add default via 10.30.51.1
+ /sbin/ifconfig br1:0 {{ virl_l2_ip }} netmask 255.255.255.0
+ exit 0
+fi
+
+
+exit 0
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment
new file mode 100644
index 0000000000..5ec594d181
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment
@@ -0,0 +1 @@
+PATH=/home/jenkins-in/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper
new file mode 100644
index 0000000000..dc7ead804d
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper
@@ -0,0 +1,58 @@
+#!/usr/bin/expect
+
+if ![ string equal $::env(USER) "root"] {
+ puts "Please re-run this script as root."
+ exit 1
+}
+
+log_file /tmp/virl-bootstrap.log
+set timeout 3600
+
+spawn ./virl-bootstrap.py
+expect "Which step are you on"
+send "1\r"
+
+expect "Salt master"
+send "vsm-md.virl.info\r"
+
+expect "Which step are you on"
+send "2\r"
+
+expect "Salt id"
+send "{{ hostname }}\r"
+expect "Salt domain name"
+send "linuxfoundation.org\r"
+
+expect "Which step are you on"
+send "3\r"
+
+expect "System hostname"
+send "{{ hostname }}\r"
+expect "System Domain name"
+send "linuxfoundation.org\r"
+
+expect "Which step are you on"
+send "4\r"
+
+puts "*******************STEP 6*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "6\r"
+expect "Salt installed"
+
+puts "*******************STEP 8*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "8\r"
+
+expect "True"
+
+puts "*******************STEP 9*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "9\r"
+
+expect "Failed: 0"
+
+puts "*******************STEP 11*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "11\r"
+
+expect eof
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml
new file mode 100644
index 0000000000..9ffb40caaf
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml
@@ -0,0 +1,216 @@
+---
+ - name: install virl-bootstrap
+ git:
+ repo: 'https://github.com/VIRL-Open/virl-bootstrap.git'
+ dest: /home/virl/virl-bootstrap
+ version: xenial
+ become_user: virl
+ - name: copy vsetting file to /etc
+ shell: /usr/bin/install -m 666 /home/virl/virl-bootstrap/vsettings.ini /etc/virl.ini
+ become: yes
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT hostname {{ hostname }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT domain_name linuxfoundation.org
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT using_dhcp_on_the_public_port False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT public_port dummy3
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT Static_IP 10.30.49.28
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT public_network 10.30.49.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT public_netmask {{ ansible_default_ipv4["netmask"] }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT public_gateway 10.30.49.1
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT proxy False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT ntp_server pool.ntp.org
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT first_nameserver 199.204.44.24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT second_nameserver 199.204.47.54
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_master vsm-md.virl.info
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_id {{ hostname }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_domain linuxfoundation.org
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_masterless false
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port {{ virl_public_port }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network {{ virl_l2_network }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_mask 255.255.255.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network_gateway {{ virl_l2_gateway }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_start_address {{ virl_l2_start }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_end_address {{ virl_l2_end }}
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_address {{ ansible_default_ipv4["address"] }}/24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT first_flat_nameserver 199.204.44.24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT second_flat_nameserver 199.204.47.54
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port2_enabled True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port2 dummy0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network2 172.16.2.0/24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_mask2 255.255.255.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network_gateway2 172.16.2.1
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_start_address2 172.16.2.50
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_end_address2 172.16.2.253
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_address2 172.16.2.254/24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT first_flat2_nameserver 199.204.44.24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT second_flat2_nameserver 199.204.47.54
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_port dummy1
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_network 172.16.3.0/24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_mask 255.255.255.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_network_gateway 172.16.3.1
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_floating_start_address 172.16.3.50
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_floating_end_address 172.16.3.253
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_address 172.16.3.254/24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT first_snat_nameserver 199.204.44.24
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT second_snat_nameserver 199.204.47.54
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT ramdisk True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT ank 19401
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT ank_live 19402
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_webservices 19399
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_user_management 19400
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_apache_port 80
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_webmux 19403
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT Start_of_serial_port_range 17000
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT End_of_serial_port_range 18000
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT serial_port 19406
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vnc_port 19407
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT location_region US
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vnc False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT guest_account True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT user_list tb4-virl:Cisco1234
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT uwmadmin_password Cisco1234
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT password password
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT mysql_password password
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT keystone_service_token fkgjhsdflkjh
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT enable_cinder True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_file True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_size 20000
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_location /var/lib/cinder/cinder-volumes.lvm
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT dummy_int True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT this_node_is_the_controller True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_controller_hostname controller
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_controller_IP 172.16.10.250
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_port dummy2
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_IP 172.16.10.250
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_network 172.16.10.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_netmask 255.255.255.0
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_gateway 172.16.10.1
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT iosv True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT csr1000v True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv432 False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv52 False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT nxosv True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vpagent True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT iosvl2 True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT asav True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_server True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_iperf True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_routem True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_ostinato True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT server True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_mac True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_win32 True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_win64 True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_linux True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_clients True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT ram_overcommit 2
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT web_editor True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT mitaka True
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT kilo False
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT libvirt_cpu_mode host-passthrough
+ become_user: virl
+ - command: crudini --inplace --set /etc/virl.ini DEFAULT neutron_bridge_flooding True
+ become_user: virl
+ - name: copy vinstall bootstrap wrapper script
+ template: src=files/virl/virl-bootstrap-wrapper dest=/home/virl/virl-bootstrap/virl-bootstrap-wrapper owner=virl group=virl mode=775
+ become_user: virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml
new file mode 100644
index 0000000000..44b9df2098
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml
@@ -0,0 +1,87 @@
+---
+ - name: Add jenkins-in user
+ user: name=jenkins-in shell=/bin/bash comment="Jenkins user"
+ - name: Add jenkins-in user to sudoers
+ copy: src=files/sudoers_jenkins-in dest=/etc/sudoers.d/jenkins-in owner=root group=root mode=660
+ - name: Set Jenkins user authorized key
+ authorized_key: user=jenkins-in key="{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
+ - name: copy salt states for dnsmasq and nfs
+ synchronize: src=files/salt/ dest=/
+ - name: install NFS salt
+ shell: salt-call --local state.sls ckoester.nfs-server
+ - name: NFS symlink
+ shell: ln -s /nfs/scratch /scratch
+ args:
+ creates: /scratch
+ - name: Upate Nova CPU mode
+ ini_file: dest=/etc/nova/nova.conf section=libvirt option=cpu_mode value=host-passthrough
+ - name: Restart nova-compute service
+ service: name=nova-compute state=restarted
+ - name: Change listen interface in NTP settings
+ lineinfile: dest=/etc/ntp.conf state=present regexp='^interface listen 172.16.*' line='interface listen {{ ansible_default_ipv4["address"] }}'
+ - name: Restart NTP service
+ service: name=ntp state=restarted
+ - name: Permit SSH user environment
+ lineinfile: dest=/etc/ssh/sshd_config state=present regexp='PermitUserEnvironment.*' line='PermitUserEnvironment yes'
+ - name: Restart SSH daemon
+ service: name=ssh state=restarted
+ - name: clone csit git repository
+ git: repo=https://gerrit.fd.io/r/csit
+ dest=/home/jenkins-in/git/csit
+ become: jenkins-in
+ - name: Link testcase-infra directory
+ command: ln -sf /home/jenkins-in/git/csit/resources/tools/virl /home/jenkins-in/testcase-infra
+ args:
+ creates: /home/jenkins-in/testcase-infra
+ become: jenkins-in
+ - name: Create bin directory
+ file: path=/home/jenkins-in/bin state=directory mode=0755
+ become: jenkins-in
+ - name: Link start-testcase executable
+ command: ln -sf /home/jenkins-in/testcase-infra/bin/start-testcase /home/jenkins-in/bin/start-testcase
+ args:
+ creates: /home/jenkins-in/bin/start-testcase
+ become: jenkins-in
+ - name: Link stop-testcase executable
+ command: ln -sf /home/jenkins-in/testcase-infra/bin/stop-testcase /home/jenkins-in/bin/stop-testcase
+ args:
+ creates: /home/jenkins-in/bin/stop-testcase
+ become: jenkins-in
+ - name: Link kill-idle-testcases executable
+ command: ln -sf /home/jenkins-in/testcase-infra/bin/kill-idle-testcases /home/jenkins-in/bin/kill-idle-testcases
+ args:
+ creates: /home/jenkins-in/bin/kill-idle-testcases
+ become: jenkins-in
+ - name: Copy SSH private key
+ copy: src=files/virl/id_rsa_virl dest=/home/jenkins-in/.ssh/id_rsa_virl mode=600
+ become: jenkins-in
+ - name: Copy SSH public key
+ copy: src=files/virl/id_rsa_virl.pub dest=/home/jenkins-in/.ssh/id_rsa_virl.pub mode=644
+ become: jenkins-in
+ - name: Copy SSH environment
+ copy: src=files/virl/ssh_environment dest=/home/jenkins-in/.ssh/environment mode=644
+ become: jenkins-in
+ - name: Add ~/bin to path
+ lineinfile: dest=/home/jenkins-in/.bashrc state=present line='PATH=${HOME}/bin:$PATH'
+ become: jenkins-in
+ - name: Update own IP address in start script
+ shell: sed -i /home/jenkins-in/testcase-infra/bin/start-testcase -e 's/10.30.51.28/{{ ansible_default_ipv4["address"] }}/'
+ become: jenkins-in
+ - name: Add authorized key
+ lineinfile: dest=/home/jenkins-in/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD4gderzsZyoxHULjuvPHoJuKnkaGrykqtuoqs/k1/jUdxitPoY5eX2cVYqww7MiUif7zLsiXbt5mHtyxAYCluDxAuIcy1xgSZY3MpmmSqDie4A/FdVfCUqCcpf3TZKsRP0an1MNrKIe0JFZV+uU889IDRQRdboGMs3+4cn5b9fOutpv71qwFVrTm9PZbqfQonrrN8Jp4Mz3XaZDpK22xwDAWhYOZ0eV6CJWquUgbYAHE6/HHMvd0zeJKaWZCXO/1tOGOj6cjgoViHqbnCtmYCjmv/ir0IglzbUdWdOqQY5YkhnPonveV48lVKrmBipqgbDezAUQD8wOQ7HttpYpKgt jenkins-in@tb4-virl'
+ become: jenkins-in
+# All of the below will fail if VIRL user/project already exist
+ - name: Create VIRL project
+ shell: virl_uwm_client project-edit --name tb4-virl --enabled True -i 400 -r 1024000 -c 500
+ ignore_errors: true
+ become: virl
+# - name: Delete VIRL project user
+# shell: virl_uwm_client user-delete --name tb4-virl
+# ignore_errors: true
+# - name: Recreate VIRL project user
+# shell: virl_uwm_client user-create --name tb4-virl --role admin --project tb4-virl --set-password Cisco1234
+# ignore_errors: true
+ - name: Create VPP flavor
+ shell: virl_uwm_client flavor-create --name vPP --ram 4096 --vcpus 2 --disk 0
+ ignore_errors: true
+ become: virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml
new file mode 100644
index 0000000000..254c05e709
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml
@@ -0,0 +1,27 @@
+---
+ - name: Create server image directory
+ file: path=/home/virl/virl-server-image state=directory mode=0755
+ become: virl
+ - name: Copy UBUNTU server image
+ copy: src=/home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu dest=/home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu owner=virl group=virl mode=644
+ become: virl
+ - name: Copy CENTOS server image
+ copy: src=/home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu dest=/home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu owner=virl group=virl mode=644
+ become: virl
+ - name: Import UBUNTU server image into glance
+ shell: virl_uwm_client image-create --subtype server --version csit-ubuntu-16.04.1_2017-10-21_2.0 --image-on-server /home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu
+ become: virl
+ - name: Import CENTOS server image into glance
+ shell: virl_uwm_client image-create --subtype server --version csit-centos-7.3-1611_2017-02-23_1.4 --image-on-server /home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu
+ become: virl
+ - name: Create common directory
+ file: path=/nfs/common state=directory mode=0755
+ - name: Create Nested VM directory
+ file: path=/nfs/common/nested-vm state=directory mode=0755
+ - name: Copy Nested VM image
+ copy: src=/home/virl/virl-server-image/csit-nested-1.6.img dest=/nfs/common/nested-vm/csit-nested-1.6.img owner=virl group=virl mode=644
+ - name: NFS symlink
+ file:
+ src: /nfs/common/nested-vm-current.img.disabled
+ dest: /nfs/common/nested-vm/csit-nested-1.6.img
+ state: link
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml
new file mode 100644
index 0000000000..8bca04e163
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml
@@ -0,0 +1,78 @@
+---
+- name: Interfaces file
+ template:
+ src: "files/interfaces_virl"
+ dest: "/etc/network/interfaces owner=root group=root mode=644"
+ tags: interfaces
+
+- name: Add VIRL user
+ user:
+ name: virl
+ shell: "/bin/bash"
+ comment: "VIRL user"
+ password: "$6$mJPlK5FKLar6$xxXPP0LRhC7T1yhHtym18Z3nKZweTtfTxzi1WpvVHJ0U/Em1UWcHqfMhRApFp0rsY9jHRuL6FfeFbKPN..uDK."
+ tags: users
+
+- name: Add VIRL user to sudoers
+ copy:
+ src: "files/sudoers_virl"
+ dest: "/etc/sudoers.d/virl owner=root group=root mode=660"
+ tags: sudoers
+
+- name: Add post up for additional address
+ copy:
+ src: "files/ifup"
+ dest: "/etc/network/if-up.d/virl owner=root group=root mode=755"
+ tags: interfaces
+
+- name: Set VIRL user authorized key
+ authorized_key:
+ user: virl
+ key: "{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
+ tags: users
+
+- name: Install bridge-utils
+ apt:
+ name: "bridge-utils"
+ state: "present"
+ tags: apt-install-bridge-utils
+
+- name: Old interface naming
+ command: "ln -s /dev/null /etc/udev/rules.d/80-net-setup-link.rules"
+ tags: interfaces
+
+- name: Update init for old interface naming
+ command: "update-initramfs -u"
+ tags: interfaces
+
+- name: QEMU log garbage collector
+ cron:
+ minute: "0"
+ hour: "0"
+ name: "QEMU log garbage collector"
+ job: "find /var/log/libvirt/qemu -type f -mtime +14 -name 'instance*.log' -delete"
+ tags: cron
+
+- name: VPP deb package garbage collector
+ cron:
+ minute: "0"
+ hour: "0"
+ name: "VPP deb package garbage collector"
+ job: "find /tmp -type f -atime +14 -name '*.deb' -delete"
+ tags: cron
+
+- name: VPP rpm package garbage collector
+ cron:
+ minute: "0"
+ hour: "0"
+ name: "VPP rpm package garbage collector"
+ job: "find /tmp -type f -atime +14 -name '*.rpm' -delete"
+ tags: cron
+
+- name: NFS scratch dir garbage collector
+ cron:
+ minute: "0"
+ hour: "0"
+ name: "NFS scratch dir garbage collector"
+ job: "find /nfs/scratch/ -type d -mtime +1 -name 'session-*' -exec rm -r '{}' \\;"
+ tags: cron