aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpmikus <pmikus@cisco.com>2020-12-04 08:44:21 +0000
committerpmikus <pmikus@cisco.com>2020-12-04 08:44:21 +0000
commit938d89a6d00a6bd4e2dd9ca870d0f1e62f144f93 (patch)
tree0ae0a30c2e0cc0da4dbb80586a410eaa301d04bd
parent523c6e6e24101206ff1318ca17c310dff8b3c9d2 (diff)
Terraform: csit-shim refactor
- remove snergster image dependency Signed-off-by: pmikus <pmikus@cisco.com> Change-Id: I76fef60371e35dddc6da56db5f9207e003d1c792
-rw-r--r--resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad17
-rw-r--r--resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad (renamed from resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_amd.nomad)91
-rw-r--r--resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_arm.nomad100
-rw-r--r--resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf9
-rw-r--r--resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/Dockerfile61
-rw-r--r--resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/badkeypub1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/sshconfig3
-rw-r--r--resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/wrapdocker113
-rw-r--r--resources/tools/testbed-setup/ansible/roles/csit_shim_image/tasks/main.yaml32
-rw-r--r--resources/tools/testbed-setup/ansible/vpp_device.yaml2
10 files changed, 307 insertions, 122 deletions
diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad
index 6c153ffd03..72d321d3b1 100644
--- a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad
+++ b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad
@@ -22,19 +22,19 @@ job "prod-nginx" {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
- max_parallel = 0
+ max_parallel = 0
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
- min_healthy_time = "10s"
+ min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
- healthy_deadline = "3m"
+ healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
@@ -47,7 +47,7 @@ job "prod-nginx" {
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
- auto_revert = false
+ auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
@@ -58,9 +58,16 @@ job "prod-nginx" {
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
- canary = 0
+ canary = 0
}
+ reschedule {
+ delay = "2m"
+ delay_function = "constant"
+ unlimited = true
+ }
+
+
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_amd.nomad b/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad
index 1ba24696a4..328f503a0b 100644
--- a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_amd.nomad
+++ b/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad
@@ -1,4 +1,4 @@
-job "prod-csit-shim-amd" {
+job "prod-csit-shim" {
# The "region" parameter specifies the region in which to execute the job.
# If omitted, this inherits the default region name of "global".
# region = "global"
@@ -18,11 +18,6 @@ job "prod-csit-shim-amd" {
#
type = "system"
- constraint {
- attribute = "${node.class}"
- value = "csit"
- }
-
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
@@ -32,11 +27,87 @@ job "prod-csit-shim-amd" {
#
# https://www.nomadproject.io/docs/job-specification/group.html
#
- group "prod-group1-csit-amd" {
+ group "prod-group1-csit-shim-amd" {
+ # The "count" parameter specifies the number of the task groups that should
+ # be running under this group. This value must be non-negative and defaults
+ # to 1.
+ count = 1
+
+ constraint {
+ attribute = "${node.class}"
+ value = "csit"
+ }
+
+ restart {
+ interval = "1m"
+ attempts = 3
+ delay = "15s"
+ mode = "delay"
+ }
+
+ # The "task" stanza creates an individual unit of work, such as a Docker
+ # container, web application, or batch processing.
+ #
+ # For more information and examples on the "task" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/task.html
+ #
+ task "prod-task1-csit-shim-amd" {
+ # The "driver" parameter specifies the task driver that should be used to
+ # run the task.
+ driver = "docker"
+
+ # The "config" stanza specifies the driver configuration, which is passed
+ # directly to the driver to start the task. The details of configurations
+ # are specific to each driver, so please see specific driver
+ # documentation for more information.
+ config {
+ image = "csit_shim-ubuntu1804:local"
+ network_mode = "host"
+ pid_mode = "host"
+ volumes = [
+ "/var/run/docker.sock:/var/run/docker.sock"
+ ]
+ privileged = true
+ }
+
+ # The "resources" stanza describes the requirements a task needs to
+ # execute. Resource requirements include memory, network, cpu, and more.
+ # This ensures the task will execute on a machine that contains enough
+ # resource capacity.
+ #
+ # For more information and examples on the "resources" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/resources.html
+ #
+ resources {
+ cpu = 100
+ memory = 128
+ network {
+ mbits = 10
+ port "ssh" {
+ static = 6022
+ }
+ port "ssh2" {
+ static = 6023
+ }
+ }
+ }
+ }
+ }
+
+ group "prod-group1-csit-shim-arm" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
- count = 2
+ count = 1
+
+ constraint {
+ attribute = "${node.class}"
+ value = "csitarm"
+ }
restart {
interval = "1m"
@@ -53,7 +124,7 @@ job "prod-csit-shim-amd" {
#
# https://www.nomadproject.io/docs/job-specification/task.html
#
- task "prod-task1-csit-amd" {
+ task "prod-task1-csit-shim-arm" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
@@ -63,7 +134,7 @@ job "prod-csit-shim-amd" {
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
- image = "snergster/csit-shim"
+ image = "csit_shim-ubuntu1804:local"
network_mode = "host"
pid_mode = "host"
volumes = [
diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_arm.nomad b/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_arm.nomad
deleted file mode 100644
index ba7b656d37..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_arm.nomad
+++ /dev/null
@@ -1,100 +0,0 @@
-job "prod-csit-shim-arm" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = [ "yul1" ]
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "system"
-
- constraint {
- attribute = "${node.class}"
- value = "csitarm"
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-csit-arm" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = 2
-
- restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-csit-arm" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "snergster/csit-arm-shim"
- network_mode = "host"
- pid_mode = "host"
- volumes = [
- "/var/run/docker.sock:/var/run/docker.sock"
- ]
- privileged = true
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 100
- memory = 128
- network {
- mbits = 10
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
- }
- }
- }
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf b/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf
index 8dc101c12a..dace9094f2 100644
--- a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf
+++ b/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf
@@ -1,9 +1,4 @@
-resource "nomad_job" "prod_csit_shim_arm" {
+resource "nomad_job" "prod_csit_shim" {
provider = nomad
- jobspec = file("${path.module}/prod_csit_shim_arm.nomad")
-}
-
-resource "nomad_job" "prod_csit_shim_amd" {
- provider = nomad
- jobspec = file("${path.module}/prod_csit_shim_amd.nomad")
+ jobspec = file("${path.module}/prod_csit_shim.nomad")
} \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/Dockerfile b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/Dockerfile
new file mode 100644
index 0000000000..2b2e1eae55
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/Dockerfile
@@ -0,0 +1,61 @@
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:18.04
+LABEL Description="CSIT vpp-device ubuntu 18.04 shim image"
+LABEL Version="master"
+
+# Setup the environment
+ENV DEBIAN_FRONTEND=noninteractive
+ENV NOTVISIBLE "in users profile"
+RUN echo "export VISIBLE=now" >> /etc/profile
+
+ADD files/wrapdocker /usr/local/bin/wrapdocker
+RUN chmod +x /usr/local/bin/wrapdocker
+
+# Install packages and Docker
+RUN apt-get -q update \
+ && apt-get install -y -qq \
+ bash \
+ curl \
+ iproute2 \
+ locales \
+ ssh \
+ sudo \
+ tzdata \
+ uuid-runtime \
+ && curl -fsSL https://get.docker.com | sh \
+ && rm -rf /var/lib/apt/lists/*
+
+# Configure locales
+RUN locale-gen en_US
+
+RUN mkdir /var/run/sshd
+RUN echo 'root:Csit1234' | chpasswd
+RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+# SSH login fix. Otherwise user is kicked off after login
+RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+# Need volume for sidecar docker launches
+VOLUME /var/lib/docker
+
+# SSH to listen on port 6022 in shim
+RUN echo 'Port 6022' >>/etc/ssh/sshd_config
+RUN echo 'Port 6023' >>/etc/ssh/sshd_config
+ADD files/badkeypub /root/.ssh/authorized_keys
+ADD files/sshconfig /root/.ssh/config
+
+# Start sshd by default
+EXPOSE 22
+CMD ["/usr/sbin/sshd", "-D"] \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/badkeypub b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/badkeypub
new file mode 100644
index 0000000000..4530b66b05
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/badkeypub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyUNd/iRk5Ajw4ZBB0gXyjzecEzQHh/MctgvHGJjasqJDkwYyZBrunUorOZ3n82W8VGdd5+eNINCWOM/ERjuaHjnutfade+ocPgZRdk+kEgTvetDVNWIgBd0PMVcnp57jJfx7CZVqTNgGeVQ8OJ2RbJGeOb/EKApQI74IPkAfc0PSieSw5gC0eqEOHb39Awgp0ycrzsUHF/OEicfCmo+6vvrMGenDe7frKUoTKYMWs7l3DOyFC8NaOxhGD3J1Ne5u3A/r4w6mN1HVI0rFwIcoms+t0B4lb2ODWKZiZikQdn8/eqwsmbSEZZsWN3FkshgjPS83+dNqVwB6pPY5Yqte7 ejk@bhima.local \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/sshconfig b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/sshconfig
new file mode 100644
index 0000000000..e7bd90757e
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/sshconfig
@@ -0,0 +1,3 @@
+Host 172.17.0.*
+ StrictHostKeyChecking no
+ UserKnownHostsFile=/dev/null \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/wrapdocker b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/wrapdocker
new file mode 100644
index 0000000000..d13f8b7c5e
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/files/files/wrapdocker
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
+dmsetup mknodes
+
+# First, make sure that cgroups are mounted correctly.
+CGROUP=/sys/fs/cgroup
+: {LOG:=stdio}
+
+[ -d $CGROUP ] ||
+ mkdir $CGROUP
+
+mountpoint -q $CGROUP ||
+ mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
+ echo "Could not make a tmpfs mount. Did you use --privileged?"
+ exit 1
+ }
+
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+ mount -t securityfs none /sys/kernel/security || {
+ echo "Could not mount /sys/kernel/security."
+ echo "AppArmor detection and --privileged mode might break."
+ }
+fi
+
+# Mount the cgroup hierarchies exactly as they are in the parent system.
+for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
+do
+ [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
+ mountpoint -q $CGROUP/$SUBSYS ||
+ mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
+
+ # The two following sections address a bug which manifests itself
+ # by a cryptic "lxc-start: no ns_cgroup option specified" when
+ # trying to start containers withina container.
+ # The bug seems to appear when the cgroup hierarchies are not
+ # mounted on the exact same directories in the host, and in the
+ # container.
+
+ # Named, control-less cgroups are mounted with "-o name=foo"
+ # (and appear as such under /proc/<pid>/cgroup) but are usually
+ # mounted on a directory named "foo" (without the "name=" prefix).
+ # Systemd and OpenRC (and possibly others) both create such a
+ # cgroup. To avoid the aforementioned bug, we symlink "foo" to
+ # "name=foo". This shouldn't have any adverse effect.
+ echo $SUBSYS | grep -q ^name= && {
+ NAME=$(echo $SUBSYS | sed s/^name=//)
+ ln -s $SUBSYS $CGROUP/$NAME
+ }
+
+ # Likewise, on at least one system, it has been reported that
+ # systemd would mount the CPU and CPU accounting controllers
+ # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+ # but on a directory called "cpu,cpuacct" (note the inversion
+ # in the order of the groups). This tries to work around it.
+ [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
+done
+
+# Note: as I write those lines, the LXC userland tools cannot setup
+# a "sub-container" properly if the "devices" cgroup is not in its
+# own hierarchy. Let's detect this and issue a warning.
+grep -q :devices: /proc/1/cgroup ||
+ echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
+grep -qw devices /proc/1/cgroup ||
+ echo "WARNING: it looks like the 'devices' cgroup is not mounted."
+
+# Now, close extraneous file descriptors.
+pushd /proc/self/fd >/dev/null
+for FD in *
+do
+ case "$FD" in
+ # Keep stdin/stdout/stderr
+ [012])
+ ;;
+ # Nuke everything else
+ *)
+ eval exec "$FD>&-"
+ ;;
+ esac
+done
+popd >/dev/null
+
+
+# If a pidfile is still around (for example after a container restart),
+# delete it so that docker can start.
+rm -rf /var/run/docker.pid
+
+# If we were given a PORT environment variable, start as a simple daemon;
+# otherwise, spawn a shell as well
+if [ "$PORT" ]
+then
+ exec dockerd -H 0.0.0.0:$PORT -H unix:///var/run/docker.sock \
+ $DOCKER_DAEMON_ARGS
+else
+ if [ "$LOG" == "file" ]
+ then
+ dockerd $DOCKER_DAEMON_ARGS &>/var/log/docker.log &
+ else
+ dockerd $DOCKER_DAEMON_ARGS &
+ fi
+ (( timeout = 60 + SECONDS ))
+ until docker info >/dev/null 2>&1
+ do
+ if (( SECONDS >= timeout )); then
+ echo 'Timed out trying to connect to internal docker host.' >&2
+ break
+ fi
+ sleep 1
+ done
+ [[ $1 ]] && exec "$@"
+ exec bash --login
+fi \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/roles/csit_shim_image/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/tasks/main.yaml
new file mode 100644
index 0000000000..bdba4f6563
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/roles/csit_shim_image/tasks/main.yaml
@@ -0,0 +1,32 @@
+---
+# file: roles/csit_shim_image/tasks/main.yaml
+
+- name: Create a directory if it does not exist
+ file:
+ path: "{{ item }}"
+ state: "directory"
+ mode: 0755
+ with_items:
+ - "/opt/csit-shim/"
+ - "/opt/csit-shim/files"
+ tags: csit-shim-image
+
+- name: Copy Build Items
+ copy:
+ src: "{{ item }}"
+ dest: "/opt/csit-shim/{{ item }}"
+ owner: "root"
+ group: "root"
+ mode: 0655
+ with_items:
+ - "Dockerfile"
+ - "files/badkeypub"
+ - "files/sshconfig"
+ - "files/wrapdocker"
+ tags: csit-shim-image
+
+- name: Build CSIT shim Docker Image
+ shell: "docker build -t csit_shim-ubuntu1804:local ."
+ args:
+ chdir: "/opt/csit-shim"
+ tags: csit-shim-image \ No newline at end of file
diff --git a/resources/tools/testbed-setup/ansible/vpp_device.yaml b/resources/tools/testbed-setup/ansible/vpp_device.yaml
index 363d1b190a..ac42b8cafe 100644
--- a/resources/tools/testbed-setup/ansible/vpp_device.yaml
+++ b/resources/tools/testbed-setup/ansible/vpp_device.yaml
@@ -24,5 +24,7 @@
tags: kernel_vm
- role: csit_sut_image
tags: csit_sut_image
+ - role: csit_shim_image
+ tags: csit_shim_image
- role: cleanup
tags: cleanup