summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Wallace <dwallacelf@gmail.com>2020-07-21 15:32:12 -0700
committerDave Wallace <dwallacelf@gmail.com>2020-10-28 18:31:44 +0000
commit97963740c8f3ae0b5b17fc15a53b2b8c86c7df1f (patch)
tree0374dce3c49637add2199e94aabb5536e3ee4ac5
parentae401885b0f508e321e02ba84a928bf7a9049776 (diff)
Automate generation of docker builder images.
- Add bash scripts to generate builder image Dockerfile that invokes the scripts to iterate over project branches installing OS packages and python modules. Change-Id: I9b3b355b593e2f982f287023c662cc4a4f35a734 Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
-rw-r--r--docker/scripts/README.md694
-rwxr-xr-xdocker/scripts/build_executor_docker_image.sh145
-rwxr-xr-xdocker/scripts/dbld_csit_find_ansible_packages.py88
-rwxr-xr-xdocker/scripts/dbld_csit_install_packages.sh51
-rwxr-xr-xdocker/scripts/dbld_dump_build_logs.sh56
-rwxr-xr-xdocker/scripts/dbld_install_docker.sh46
-rwxr-xr-xdocker/scripts/dbld_lfit_requirements.sh70
-rwxr-xr-xdocker/scripts/dbld_vpp_install_packages.sh81
-rw-r--r--docker/scripts/lib_apt.sh340
-rw-r--r--docker/scripts/lib_common.sh290
-rw-r--r--docker/scripts/lib_csit.sh173
-rw-r--r--docker/scripts/lib_dnf.sh163
-rw-r--r--docker/scripts/lib_vpp.sh90
-rw-r--r--docker/scripts/lib_yum.sh156
-rwxr-xr-xdocker/scripts/update_dockerhub_prod_tags.sh392
15 files changed, 2835 insertions, 0 deletions
diff --git a/docker/scripts/README.md b/docker/scripts/README.md
new file mode 100644
index 00000000..7c9f2e0e
--- /dev/null
+++ b/docker/scripts/README.md
@@ -0,0 +1,694 @@
+# Automated Building Of FD.io CI Executor Docker Images
+
+This collection of bash scripts and libraries is used to automate the process
+of building FD.io docker 'builder' images (aka Nomad executors). The goal is to
+create a completely automated CI/CD pipeline. The bash code is designed to be
+run in a regular Linux bash shell in order to bootstrap the CI/CD pipeline
+as well as in a docker 'builder' image started by a ci-management jenkins job.
+The Dockerfile is generated prior to executing 'docker build' based on the os
+parameter specified. The project git repos are also copied into the docker
+container and retained for optimization of git object retrieval by the Jenkins
+jobs running the CI/CD tasks.
+
+## Image Builder Algorithm
+
+The general algorithm to automate the generation of the docker images such that
+the downloadable requirements for each project are pre-installed or cached in
+the executor image is as follows:
+
+1. Run the docker image builder on a host of the target architecture. Bootstrap
+ images will be built 'by hand' on target hosts until such a time when the
+ CI is capable of executing the docker image builder scripts inside docker
+ images running on Nomad instances via jenkins jobs.
+
+2. For each OS package manager, there is a bash function which generates the
+ Dockerfile for the specified OS which uses said package manager. For example,
+ lib_apt.sh contains 'generate_apt_dockerfile()' which is executed for Ubuntu
+ and debian OS's. lib_yum.sh and lib_dnf.sh contain similar functions for yum
+ (centos-7) and dnf (centos-8).
+
+3. The Dockerfiles contain the following sections:
+ - a. Environment setup and copying of project workspace git repos
+ - b. Installation of OS package pre-requisites
+ - c. Docker install and project requirements installation (more on this below)
+ - d. Working environment setup
+ - e. Build cleanup
+
+4. The Project installation section (c.) above is where all of the packages
+ for each of the supported project branches are installed or cached to
+ save time and bandwidth when the CI jobs are run. Each project script
+ defines the branches supported for each OS and iterates over them from
+ oldest to newest using the dependency and requirements files or build
+ targets in each supported project branch.
+
+5. `docker build` is run on the generated Dockerfile.
+
+## Bash Libraries (lib_*.sh)
+
+The bash libraries are designed to be sourced both inside of the docker build
+environment (e.g. from a script invoked in a Dockerfile RUN statement) as well
+as in a normal Linux shell. These scripts create environment variables and
+bash functions for use by the operational scripts.
+
+- `lib_apt.sh`: Dockerfile generation functions for apt package manager.
+
+- `lib_common.sh`: Common utility functions and environment variables
+
+- `lib_csit.sh`: CSIT specific functions and environment variables
+
+- `lib_dnf.sh`: Dockerfile generation functions for dnf package manager.
+
+- `lib_vpp.sh`: VPP specific functions and environment variables
+
+- `lib_yum.sh`: Dockerfile generation functions for yum package manager.
+
+## Bash Scripts
+
+There are two types of bash scripts, those intended to be run solely inside
+the docker build execution environment, the other run either inside or
+outside of it.
+
+### Docker Build (dbld_*.sh) Scripts
+
+These scripts run inside the 'docker build' environment are either per-project
+scripts that install OS and python packages or scripts that install other docker
+image runtime requirements.
+
+Python packages are not retained because they are typically installed in virtual
+environments. However installing the python packages in the Docker Build scripts
+populates the pip/http caches. Therefore packages are installed from the cache
+files during CI job execution instead of being downloaded from the Internet.
+
+- `dbld_csit_find_ansible_packages.sh`: Script to find OS packages installed by
+CSIT using ansible.
+
+- `dbld_csit_install_packages.sh`: Install OS and python packages for CSIT
+branches
+
+- `dbld_dump_build_logs.sh`: Find warnings/errors in the build logs and dump
+the build_executor_docker_image.sh execution log.
+
+- `dbld_install_docker.sh`: Install docker ce
+
+- `dbld_lfit_requirements.sh`: Install requirements for LFIT global-jjb
+macros / scripts
+
+- `dbld_vpp_install_packages.sh`: Install OS and python packages for VPP
+branches
+
+### Executor Docker Image Management Bash Scripts
+
+These scripts are used to build executor docker images, inspect the results, and
+manage the docker image tags in the Docker Hub fdiotools repositories.
+
+- `build_executor_docker_image.sh`: Build script to create one or more executor
+docker images.
+
+- `update_dockerhub_prod_tags.sh`: Inspect/promote/revert production docker tag
+in the Docker Hub fdiotools repositories.
+
+## Running The Scripts
+
+### Bootstrapping The Builder Images
+
+The following commands are useful to build the initial builder images:
+
+`cd <ci-managment repository directory>`
+
+`sudo ./docker/scripts/build_executor_docker_image.sh ubuntu-18.04 2>&1 | tee u1804-$(uname -m).log | grep -ve '^+'`
+
+`sudo ./docker/scripts/build_executor_docker_image.sh centos-7 2>&1 | tee centos7-$(uname -m).log | grep -ve '^+'`
+
+`sudo ./docker/scripts/build_executor_docker_image.sh -apr sandbox 2>&1 | tee all-sandbox-$(uname -m).log | grep -ve '^+'`
+
+### Building in a Builder Image
+
+By running the docker image with docker socket mounted in the container,
+the docker build environment runs on the host's docker daemon. This
+avoids the pitfalls encountered with Docker-In-Docker environments:
+
+`sudo docker run -it -v /var/run/docker.sock:/var/run/docker.sock <docker-image>`
+
+The environment in the docker shell contains all of the necessary
+environment variable definitions so the docker scripts can be run
+directly on the cli. Here is an example command that would be used in a CI job
+which automates the generation and testing of a new ubuntu-18.04 docker image
+and push it to Docker Hub fdiotools/builder-ubuntu1804:test-<arch>:
+
+`build_executor_docker_image.sh -pr test ubuntu-18.04`
+
+In the future, a fully automated CI/CD pipeline may be created for production
+docker images.
+
+# Docker Image Script Workflow
+
+This section describes the current workflow used for managing the CI/CD pipeline
+for the Docker Images used by the FD.io CI Jobs.
+
+Note: all operations that push images or image tags to Docker Hub require an
+account with management privileges of the fdiotools repositories.
+
+## Update Production Docker Images
+
+Note: Presently only the 'builder' class executor docker images are supported.
+The others will be supported in the near future.
+
+### Build Docker Images and Push to Docker Hub with Sandbox CI Tag
+
+For each hardware architecture, the build_executor_docker_image.sh script is
+used to build all variants of the each executor class:
+
+1. `git clone https://gerrit.fd.io/r/ci-management && cd ci-management`
+
+2. `sudo ./docker/scripts/build_executor_docker_image.sh -p -r sandbox -a | tee builder-all-sandbox-$(uname -m).log | grep -ve '^+'``
+
+3. `Inspect the build log for Errors and other build anomalies`
+
+This step will take a very long time so best to do it overnight. There is not
+currently an option to automatically run builds in parallel, so if optimizing
+build times is important, then run the jobs in separate shells for each OS.
+The aarch64 builds are particularly slow, thus may benefit from being run on
+separate hosts in parallel.
+
+Note: the 'prod' role is disallowed in the build script to prevent accidental
+deployment of untested docker images to production.
+
+### Test Docker Images in the Jenkins Sandbox
+
+In the future, this step will be automated using the role 'test' and associated
+tags, but for now testing is a manual operation.
+
+1. `git clone https://gerrit.fd.io/r/vpp ../vpp && source ../vpp/extras/bash/functions.sh`
+
+2. Edit jjb/vpp/vpp.yam (or other project yaml file) and replace '-prod-' with '-sandbox-' for all of the docker image
+
+3. `jjb-sandbox-env` # This bash function currently lives in ../vpp/extras/bash/functions.sh
+ - TODO: move it to ci-management repo.
+
+4. For each job using one of the docker images:
+
+ a. `jjsb-update <job name(s)>` # bash function created by jjb-sandbox-env to
+ push job to the sandbox
+
+ b. manually run the job in https://jenkins.fd.io/sandbox
+
+ c. Inspect the console output of each job for unnecessary downloads & errors.
+
+### Promote Docker Images to Production
+
+Once all of the docker images have been tested, promote each one to production:
+
+`sudo ./docker/scripts/update_dockerhub_prod_tags.sh promote <image name>`
+
+Note: this script currently requires human acceptance via the terminal to ensure
+correctness.
+It pulls all tags from the Docker Hub repos, does an Inspect action (displaying
+the current state of 'prod' & 'prod-prev' tags) and local Promotion action (i.e.
+tags local images with 'prod-<arch>' and 'prod-prev-<arch>') with a required
+confirmation to continue the promotion by pushing the tags to Docker Hub. If
+'no' is specified, it restores the previous local tags so they match the state
+of Docker Hub and does a new Inspect action for verification. If 'yes' is
+specified, it prints out the command to use to restore the existing state of the
+production tags on Docker Hub in case the script is terminated prior to
+completion. If necessary, the restore command can be repeated multiple times
+until it completes successfully since it promotes the 'prod-prev-<arch>' image,
+then the 'prod-<arch>' image in succession.
+
+## Other Docker Hub Operations
+
+### Inspect Production Docker Image Tags
+
+Inspect the current production docker image tags:
+
+`sudo ./docker/scripts/update_dockerhub_prod_tags.sh inspect fdiotools/<class>-<os name>:prod-$(uname -m)`
+
+### Revert Production Docker Image To Previous Docker Image
+
+Inspect the current production docker image tags:
+
+`sudo ./docker/scripts/update_dockerhub_prod_tags.sh revert fdiotools/<class>-<os name>:prod-$(uname -m)`
+
+### Restoring Previous Production Image State
+
+Assuming that the images still exist in the Docker Hub repository, any previous
+state of the production image tags can be restored by executing the 'restore
+command' as output by the build_executor_docker_image.sh script. This script
+writes a copy of all of the terminal output to a log file in
+/tmp/build_executor_docker_image.sh.<date>.log thus providing a history of the
+restore commands. When the building of executor docker images is peformed by a
+CI job, the logging can be removed since the job execution will be captured in
+the Jenkins console output log.
+
+### Docker Image Garbage Collection
+
+Presently, cleaning up the Docker Hub repositories of old images/tags is a
+manual process using the Docker Hub WebUI. In the future, a garbage collection
+script will be written to automate the process.
+
+# DockerHub Repository & Docker Image Tag Nomenclature:
+
+## DockerHub Repositories
+
+- fdiotools/builder-centos7
+- fdiotools/builder-centos8
+- fdiotools/builder-debian9
+- fdiotools/builder-debian10
+- fdiotools/builder-ubuntu1804
+- fdiotools/builder-ubuntu2004
+- fdiotools/csit-ubuntu1804
+- fdiotools/csit_dut-ubuntu1804
+- fdiotools/csit_shim-ubuntu1804
+
+## Docker Image Tags
+
+- prod-x86_64: Tag used to select the x86_64 production image by the associated
+Jenkins-Nomad Label.
+- prod-prev-x86_64: Tag of the previous x86_64 production image used to revert
+a production image to the previous image used in production.
+- prod-aarch64: Tag used to select the aarch64 production image by the
+associated Jenkins-Nomad Label.
+- prod-prev-aarch64 Tag of the previous aarch64 production image used to revert
+a production image to the previous image used in production.
+- sandbox-x86_64: Tag used to select the x86_64 sandbox image by the associated
+Jenkins-Nomad Label.
+- sandbox-aarch64: Tag used to select the aarch64 sandbox image by the
+associated Jenkins-Nomad Label.
+- test-x86_64: Tag used to select the x86_64 sandbox image by the associated
+Jenkins-Nomad Label.
+- test-aarch64: Tag used to select the aarch64 sandbox image by the associated
+Jenkins-Nomad Label.
+
+# Jenkins-Nomad Label Definitions
+
+<class>-<os>-<role>-<arch> (e.g. builder-ubuntu1804-prod-x86_64)
+
+- class
+-- builder
+-- csit
+-- csit_dut
+-- csit_shim
+
+- os
+-- ubuntu1804
+-- centos7
+-- ubuntu2004
+-- centos8
+-- debian9
+-- debian10
+
+- role
+-- prod
+-- test
+-- sandbox
+
+- arch
+-- x86_64
+-- aarch64
+
+## Jenkins Nomad Plugin Node Labels
+
+### Common Attributes of All Jenkins Nomad Plugin Nodes
+- Disk: 3000
+- Priority: 50
+- Idle Termination Time: 10
+- Executors: 1
+- Usage: Only build jobs with label expressions matching this node
+- Workspace root: /w
+- Privileged: Y
+- Network: bridge
+- Force-pull: Y
+
+### Production (prod) Jenkins Nomad Plugin Nodes
+
+#### Node 'builder-ubuntu1804-prod-x86_64'
+- Labels: builder-ubuntu1804-prod-x86_64
+- Job Prefix: builder-ubuntu1804-prod-x86_64
+- Image: fdiotools/builder-ubuntu1804:prod-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu1804-prod-aarch64'
+- Labels: builder-ubuntu1804-prod-aarch64
+- Job Prefix: builder-ubuntu1804-prod-aarch64
+- Image: fdiotools/builder-ubuntu1804:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-prod-x86_64'
+- Labels: builder-centos7-prod-x86_64
+- Job Prefix: builder-centos7-prod-x86_64
+- Image: fdiotools/builder-centos7:prod-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-prod-aarch64'
+- Labels: builder-centos7-prod-aarch64
+- Job Prefix: builder-centos7-prod-aarch64
+- Image: fdiotools/builder-centos7:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-prod-x86_64'
+- Labels: builder-ubuntu2004-prod-x86_64
+- Job Prefix: builder-ubuntu2004-prod-x86_64
+- Image: fdiotools/builder-ubuntu2004:prod-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-prod-aarch64'
+- Labels: builder-ubuntu2004-prod-aarch64
+- Job Prefix: builder-ubuntu2004-prod-aarch64
+- Image: fdiotools/builder-ubuntu2004:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-prod-x86_64'
+- Labels: builder-centos8-prod-x86_64
+- Job Prefix: builder-centos7-prod-x86_64
+- Image: fdiotools/builder-centos8:prod-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-prod-aarch64'
+- Labels: builder-centos8-prod-aarch64
+- Job Prefix: builder-centos8-prod-aarch64
+- Image: fdiotools/builder-centos8:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-prod-x86_64'
+- Labels: builder-debian9-prod-x86_64
+- Job Prefix: builder-debian9-prod-x86_64
+- Image: fdiotools/builder-debian9:prod-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-prod-aarch64'
+- Labels: builder-debian9-prod-aarch64
+- Job Prefix: builder-debian9-prod-aarch64
+- Image: fdiotools/builder-debian9:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'csit_dut-ubuntu1804-prod-x86_64'
+- Labels: csit_dut-ubuntu1804-prod-x86_64
+- Job Prefix: csit_dut-ubuntu1804-prod-x86_64
+- Image: fdiotools/csit_dut-ubuntu1804:prod-x86_64
+- CPU: 10000
+- Memory: 18000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: csit
+
+#### Node 'csit_dut-ubuntu1804-prod-aarch64'
+- Labels: csit_dut-ubuntu1804-prod-aarch64
+- Job Prefix: csit_dut-ubuntu1804-prod-aarch64
+- Image: fdiotools/csit_dut-ubuntu1804:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
+
+#### Node 'csit_shim-ubuntu1804-prod-x86_64'
+- Labels: csit_shim-ubuntu1804-prod-x86_64
+- Job Prefix: csit_shim-ubuntu1804-prod-x86_64
+- Image: fdiotools/csit_shim-ubuntu1804:prod-x86_64
+- CPU: 10000
+- Memory: 18000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: csit
+
+#### Node 'csit_shim-ubuntu1804-prod-aarch64'
+- Labels: csit_shim-ubuntu1804-prod-aarch64
+- Job Prefix: csit_shim-ubuntu1804-prod-aarch64
+- Image: fdiotools/csit_shim-ubuntu1804:prod-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
+
+### Sandbox (sandbox) Jenkins Nomad Plugin Nodes
+
+#### Node 'builder-ubuntu1804-sandbox-x86_64'
+- Labels: builder-ubuntu1804-sandbox-x86_64
+- Job Prefix: builder-ubuntu1804-sandbox-x86_64
+- Image: fdiotools/builder-ubuntu1804:sandbox-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu1804-sandbox-aarch64'
+- Labels: builder-ubuntu1804-sandbox-aarch64
+- Job Prefix: builder-ubuntu1804-sandbox-aarch64
+- Image: fdiotools/builder-ubuntu1804:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-sandbox-x86_64'
+- Labels: builder-centos7-sandbox-x86_64
+- Job Prefix: builder-centos7-sandbox-x86_64
+- Image: fdiotools/builder-centos7:sandbox-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-sandbox-aarch64'
+- Labels: builder-centos7-sandbox-aarch64
+- Job Prefix: builder-centos7-sandbox-aarch64
+- Image: fdiotools/builder-centos7:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-sandbox-x86_64'
+- Labels: builder-ubuntu2004-sandbox-x86_64
+- Job Prefix: builder-ubuntu2004-sandbox-x86_64
+- Image: fdiotools/builder-ubuntu2004:sandbox-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-sandbox-aarch64'
+- Labels: builder-ubuntu2004-sandbox-aarch64
+- Job Prefix: builder-ubuntu2004-sandbox-aarch64
+- Image: fdiotools/builder-ubuntu2004:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-sandbox-x86_64'
+- Labels: builder-centos8-sandbox-x86_64
+- Job Prefix: builder-centos8-sandbox-x86_64
+- Image: fdiotools/builder-centos8:sandbox-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-sandbox-aarch64'
+- Labels: builder-centos8-sandbox-aarch64
+- Job Prefix: builder-centos8-sandbox-aarch64
+- Image: fdiotools/builder-centos8:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-sandbox-x86_64'
+- Labels: builder-debian9-sandbox-x86_64
+- Job Prefix: builder-debian9-sandbox-x86_64
+- Image: fdiotools/builder-debian9:sandbox-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-sandbox-aarch64'
+- Labels: builder-debian9-sandbox-aarch64
+- Job Prefix: builder-debian9-sandbox-aarch64
+- Image: fdiotools/builder-debian9:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'csit_dut-ubuntu1804-sandbox-x86_64'
+- Labels: csit_dut-ubuntu1804-sandbox-x86_64
+- Job Prefix: csit_dut-ubuntu1804-sandbox-x86_64
+- Image: fdiotools/csit_dut-ubuntu1804:sandbox-x86_64
+- CPU: 10000
+- Memory: 18000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: csit
+
+#### Node 'csit_dut-ubuntu1804-sandbox-aarch64'
+- Labels: csit_dut-ubuntu1804-sandbox-aarch64
+- Job Prefix: csit_dut-ubuntu1804-sandbox-aarch64
+- Image: fdiotools/csit_dut-ubuntu1804:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
+
+#### Node 'csit_shim-ubuntu1804-sandbox-x86_64'
+- Labels: csit_shim-ubuntu1804-sandbox-x86_64
+- Job Prefix: csit_shim-ubuntu1804-sandbox-x86_64
+- Image: fdiotools/csit_shim-ubuntu1804:sandbox-x86_64
+- CPU: 10000
+- Memory: 18000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: csit
+
+#### Node 'csit_shim-ubuntu1804-sandbox-aarch64'
+- Labels: csit_shim-ubuntu1804-sandbox-aarch64
+- Job Prefix: csit_shim-ubuntu1804-sandbox-aarch64
+- Image: fdiotools/csit_shim-ubuntu1804:sandbox-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
+
+### Automated Testing (test) Jenkins Nomad Plugin Nodes
+
+#### Node 'builder-ubuntu1804-test-x86_64'
+- Labels: builder-ubuntu1804-test-x86_64
+- Job Prefix: builder-ubuntu1804-test-x86_64
+- Image: fdiotools/builder-ubuntu1804:test-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu1804-test-aarch64'
+- Labels: builder-ubuntu1804-test-aarch64
+- Job Prefix: builder-ubuntu1804-test-aarch64
+- Image: fdiotools/builder-ubuntu1804:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-test-x86_64'
+- Labels: builder-centos7-test-x86_64
+- Job Prefix: builder-centos7-test-x86_64
+- Image: fdiotools/builder-centos7:test-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos7-test-aarch64'
+- Labels: builder-centos7-test-aarch64
+- Job Prefix: builder-centos7-test-aarch64
+- Image: fdiotools/builder-centos7:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-test-x86_64'
+- Labels: builder-ubuntu2004-test-x86_64
+- Job Prefix: builder-ubuntu2004-test-x86_64
+- Image: fdiotools/builder-ubuntu2004:test-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-ubuntu2004-test-aarch64'
+- Labels: builder-ubuntu2004-test-aarch64
+- Job Prefix: builder-ubuntu2004-test-aarch64
+- Image: fdiotools/builder-ubuntu2004:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-test-x86_64'
+- Labels: builder-centos8-test-x86_64
+- Job Prefix: builder-centos8-test-x86_64
+- Image: fdiotools/builder-centos8:test-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-centos8-test-aarch64'
+- Labels: builder-centos8-test-aarch64
+- Job Prefix: builder-centos8-test-aarch64
+- Image: fdiotools/builder-centos8:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-test-x86_64'
+- Labels: builder-debian9-test-x86_64
+- Job Prefix: builder-debian9-test-x86_64
+- Image: fdiotools/builder-debian9:test-x86_64
+- CPU: 14000
+- Memory: 14000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: builder
+
+#### Node 'builder-debian9-test-aarch64'
+- Labels: builder-debian9-test-aarch64
+- Job Prefix: builder-debian9-test-aarch64
+- Image: fdiotools/builder-debian9:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: builder
+
+#### Node 'csit_dut-ubuntu1804-sandbox-x86_64'
+- Labels: csit_dut-ubuntu1804-sandbox-x86_64
+- Job Prefix: csit_dut-ubuntu1804-sandbox-x86_64
+- Image: fdiotools/csit_dut-ubuntu1804:sandbox-x86_64
+- CPU: 10000
+- Memory: 18000
+- ${attr.cpu.arch}: amd64
+- ${node.class}: csit
+
+#### Node 'csit_dut-ubuntu1804-test-aarch64'
+- Labels: csit_dut-ubuntu1804-test-aarch64
+- Job Prefix: csit_dut-ubuntu1804-test-aarch64
+- Image: fdiotools/csit_dut-ubuntu1804:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
+
+#### Node 'csit_shim-ubuntu1804-test-aarch64'
+- Labels: csit_shim-ubuntu1804-test-aarch64
+- Job Prefix: csit_shim-ubuntu1804-test-aarch64
+- Image: fdiotools/csit_shim-ubuntu1804:test-aarch64
+- CPU: 6000
+- Memory: 10000
+- ${attr.cpu.arch}: arm64
+- ${node.class}: csitarm
diff --git a/docker/scripts/build_executor_docker_image.sh b/docker/scripts/build_executor_docker_image.sh
new file mode 100755
index 00000000..d0af78be
--- /dev/null
+++ b/docker/scripts/build_executor_docker_image.sh
@@ -0,0 +1,145 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+# Log all output to stdout & stderr to a log file
+logname="/tmp/$(basename $0).$(date +%Y_%m_%d_%H%M%S).log"
+echo -e "\n*** Logging output to $logname ***\n\n"
+exec > >(tee -a $logname) 2>&1
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_csit.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+
+all_os_names=""
+ci_tag=""
+ci_image=""
+os_names=""
+push_to_docker_hub=""
+
+usage() {
+ set +x
+ echo
+ echo "Usage: $0 [-c <class>] [-p] [-r <role>] -a | <os name> [... <os name>]"
+ echo " -a Run all OS's supported on class $EXECUTOR_CLASS & arch $OS_ARCH"
+ echo " -c <class> Default is '$EXECUTOR_DEFAULT_CLASS'"
+ executor_list_classes
+ echo " -p Push docker images to Docker Hub"
+ echo " -r <role> Add a role based tag (e.g. sandbox-x86_64):"
+ executor_list_roles
+ executor_list_os_names
+ exit 1
+}
+
+must_be_run_as_root
+while getopts ":ahpc:r:" opt; do
+ case "$opt" in
+ a) all_os_names=1 ;;
+ c) if executor_verify_class "$OPTARG" ; then
+ EXECUTOR_CLASS="$OPTARG"
+ EXECUTOR_CLASS_ARCH="$EXECUTOR_CLASS-$OS_ARCH"
+ else
+ echo "ERROR: Invalid executor class '$OPTARG'!"
+ usage
+ fi ;;
+ h) usage ;;
+ p) push_to_docker_hub=1 ;;
+ r) if executor_verify_role "$OPTARG" ; then
+ ci_tag="${OPTARG}-$OS_ARCH"
+ else
+ echo "ERROR: Invalid executor role: '$OPTARG'!"
+ usage
+ fi ;;
+ \?)
+ echo "ERROR: Invalid option: -$OPTARG" >&2
+ usage ;;
+ :)
+ echo "ERROR: Option -$OPTARG requires an argument." >&2
+ usage ;;
+ esac
+done
+shift $(( $OPTIND-1 ))
+
+if [ -n "$all_os_names" ] ; then
+ os_names="${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]}"
+else
+ os_names="$@"
+fi
+
+# Validate arguments
+if [ -z "$os_names" ] ; then
+ echo "ERROR: Missing executor OS name(s) for class '$EXECUTOR_CLASS'!"
+ usage
+fi
+
+# Build the specified docker images
+docker_build_setup_ciman
+docker_build_setup_vpp
+docker_build_setup_csit
+for executor_os_name in $os_names ; do
+ docker_from_image="$(echo $executor_os_name | sed -e 's/-/:/')"
+ # Remove '-' and '.' from executor_os_name in Docker Hub repo name
+ os_name="${executor_os_name//-}"
+ repository="fdiotools/${EXECUTOR_CLASS}-${os_name//.}"
+ executor_docker_image="$repository:$DOCKER_TAG"
+
+ if ! executor_verify_os_name "$executor_os_name" ; then
+ set_opts=$-
+ grep -q x <<< $set_opts && set +x # disable undefined variable check
+ echo "WARNING: Invalid executor OS name for class '$EXECUTOR_CLASS': $executor_os_name!"
+ executor_list_os_names
+ echo
+ grep -q x <<< $set_opts && set -x # re-enable undefined variable check
+ continue
+ fi
+ case "$executor_os_name" in
+ ubuntu*)
+ generate_apt_dockerfile $executor_os_name $docker_from_image \
+ $executor_docker_image ;;
+ debian*)
+ generate_apt_dockerfile $executor_os_name $docker_from_image \
+ $executor_docker_image ;;
+ centos-7)
+ generate_yum_dockerfile $executor_os_name $docker_from_image \
+ $executor_docker_image ;;
+ centos-8)
+ generate_dnf_dockerfile $executor_os_name $docker_from_image \
+ $executor_docker_image ;;
+ *)
+ echo "ERROR: Don't know how to generate dockerfile for $executor_os_name!"
+ usage ;;
+ esac
+
+ docker build -t $executor_docker_image $DOCKER_BUILD_DIR
+ rm -f $DOCKERFILE
+ if [ -n "$ci_tag" ] ; then
+ ci_image="$repository:$ci_tag"
+ echo -e "\nAdding docker tag $ci_image to $executor_docker_image"
+ docker tag $executor_docker_image $ci_image
+ fi
+ if [ -n "$push_to_docker_hub" ] ; then
+ echo -e "\nPushing $executor_docker_image to Docker Hub..."
+ docker login
+ docker push $executor_docker_image
+ if [ -n "$ci_image" ] ; then
+ echo -e "\nPushing $ci_image to Docker Hub..."
+ docker push $ci_image
+ fi
+ fi
+done
diff --git a/docker/scripts/dbld_csit_find_ansible_packages.py b/docker/scripts/dbld_csit_find_ansible_packages.py
new file mode 100755
index 00000000..2e6c6cfc
--- /dev/null
+++ b/docker/scripts/dbld_csit_find_ansible_packages.py
@@ -0,0 +1,88 @@
+#! /usr/bin/env python3
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pprint
+import sys
+from typing import List
+import yaml
+import logging
+
+logging.basicConfig(format='%(message)s')
+log = logging.getLogger(__name__)
+
+def print_yaml_struct(yaml_struct, depth=0):
+ indent = " " * depth
+ for k,v in sorted(yaml_struct.items(), key=lambda x: x[0]):
+ if isinstance(v, dict):
+ log.warning(f"{indent}{k}")
+ print_yaml_struct(v, depth+1)
+ else:
+ log.warning(f"{indent}{k} {v}")
+
+class CsitAnsibleYamlStruct:
+ def __init__(self, **entries):
+ self.__dict__.update(entries)
+
+def packages_in_csit_ansible_yaml_file(yamlfile: str, distro, arch) -> list:
+ with open(yamlfile) as yf:
+ csit_ansible_yaml = yaml.safe_load(yf)
+ cays = CsitAnsibleYamlStruct(**csit_ansible_yaml)
+ packages = [pkg for pkg in cays.packages_base if type(pkg) is str]
+ if arch in [*cays.packages_by_arch]:
+ packages += [pkg for pkg in cays.packages_by_arch[arch]
+ if type(pkg) is str]
+ if distro in [*cays.packages_by_distro]:
+ packages += [pkg for pkg in cays.packages_by_distro[distro]
+ if type(pkg) is str]
+ return packages
+
+def is_csit_ansible_yaml_file(filename: str):
+ (root,ext) = os.path.splitext(filename)
+ if ext == '.yaml' \
+ and filename.find('csit/') >= 0 \
+ and filename.find('/ansible/') > 0 \
+ and os.path.isfile(filename):
+ return True
+ else:
+ return False
+
+def main(args: List[str]) -> None:
+ if len(args) < 1:
+ log.warning('Must have at least 1 file name')
+ return
+ pkg_list = []
+ distro = 'ubuntu'
+ arch = 'x86_64'
+
+ for arg in args:
+ if arg.lower() == '--ubuntu':
+ distro = 'ubuntu'
+ elif arg.lower() == '--centos':
+ distro = 'centos'
+ elif arg.lower() == '--x86_64':
+ arch = 'x86_64'
+ elif arg.lower() == '--aarch64':
+ arch = 'aarch64'
+ elif is_csit_ansible_yaml_file(arg):
+ pkg_list += packages_in_csit_ansible_yaml_file(arg, distro, arch)
+ else:
+ log.warning(f'Invalid CSIT Ansible YAML file: {arg}')
+ pkg_list = list(set(pkg_list))
+ pkg_list.sort()
+ print(" ".join(pkg_list))
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/docker/scripts/dbld_csit_install_packages.sh b/docker/scripts/dbld_csit_install_packages.sh
new file mode 100755
index 00000000..d179b786
--- /dev/null
+++ b/docker/scripts/dbld_csit_install_packages.sh
@@ -0,0 +1,51 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_csit.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
+
+must_be_run_as_root
+must_be_run_in_docker_build
+
+case "$OS_NAME" in
+ ubuntu-18.04)
+ supported_os="true" ;;
+ *)
+ supported_os="" ;;
+esac
+if [ -z "$supported_os" ] ; then
+ echo "CSIT is not supported on $OS_NAME. Skipping CSIT package install..."
+ exit 0
+fi
+
+echo_log
+echo_log "Starting $(basename $0)"
+
+do_git_config csit
+for vpp_branch in ${VPP_BRANCHES[$OS_NAME]} ; do
+ # Returns checked out branch in csit_branch
+ csit_checkout_branch_for_vpp $vpp_branch
+
+ # Install csit OS packages
+ csit_install_packages $csit_branch
+
+ # Install/cache python packages
+ csit_pip_cache $csit_branch
+done
+
+echo_log -e "Completed $(basename $0)!\n\n=========="
diff --git a/docker/scripts/dbld_dump_build_logs.sh b/docker/scripts/dbld_dump_build_logs.sh
new file mode 100755
index 00000000..212e095f
--- /dev/null
+++ b/docker/scripts/dbld_dump_build_logs.sh
@@ -0,0 +1,56 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+export CIMAN_ROOT=${CIMAN_ROOT:-"$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+must_be_run_as_root
+must_be_run_in_docker_build
+
+dump_build_logs() {
+ local set_opts=$-
+ grep -q e <<< $set_opts && set +e # disable exit on errors
+
+ # Find errors
+ local found="$(grep -nisH error $DOCKER_BUILD_LOG_DIR/*-bld.log)"
+ if [ -n "$found" ] ; then
+ echo -e "\nErrors found in build log files:\n$found\n"
+ else
+ echo -e "\nNo errors found in build logs\n"
+ fi
+
+ # Find warnings
+ found="$(grep -nisH warning $DOCKER_BUILD_LOG_DIR/*-bld.log)"
+ if [ -n "$found" ] ; then
+ echo -e "\nWarnings found in build log files:\n$found\n"
+ else
+ echo -e "\nNo warnings found in build logs\n"
+ fi
+
+ grep -q e <<< $set_opts && set -e # re-enable exit on errors
+}
+
+dump_cache_files() {
+ local cache_files_log="$DOCKER_BUILD_LOG_DIR/cached_files.json"
+ tree -a --timefmt "+%Y-%m-%d %H:%M:%S" --prune /root
+ tree -afJ --timefmt "+%Y-%m-%d %H:%M:%S" --prune -o $cache_files_log /root
+}
+
+dump_cache_files
+dump_build_logs
+dump_echo_log
diff --git a/docker/scripts/dbld_install_docker.sh b/docker/scripts/dbld_install_docker.sh
new file mode 100755
index 00000000..de102835
--- /dev/null
+++ b/docker/scripts/dbld_install_docker.sh
@@ -0,0 +1,46 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+
+must_be_run_as_root
+must_be_run_in_docker_build
+
+echo_log
+echo_log "Starting $(basename $0)"
+
+case "$DOCKERFILE_FROM" in
+ *ubuntu*)
+ write_apt_ubuntu_docker_gpg_keyfile
+ apt_install_docker_os_package_dependancies
+ apt_install_docker $DOCKER_APT_UBUNTU_DOCKER_GPGFILE ;;
+ *debian*)
+ write_apt_debian_docker_gpg_keyfile
+ apt_install_docker_os_package_dependancies
+ apt_install_docker $DOCKER_APT_DEBIAN_DOCKER_GPGFILE ;;
+ *centos:7)
+ yum_install_docker_os_package_dependancies
+ yum_install_docker ;;
+ *centos:8)
+ dnf_install_docker_os_package_dependancies
+ dnf_install_docker ;;
+esac
+
+echo_log -e "Completed $(basename $0)!\n\n=========="
diff --git a/docker/scripts/dbld_lfit_requirements.sh b/docker/scripts/dbld_lfit_requirements.sh
new file mode 100755
index 00000000..7e58ac87
--- /dev/null
+++ b/docker/scripts/dbld_lfit_requirements.sh
@@ -0,0 +1,70 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+must_be_run_as_root
+must_be_run_in_docker_build
+
+# Add jenkins user and make it equivalent to root
+groupadd jenkins || true
+useradd -m -s /bin/bash -g jenkins jenkins || true
+rm -rf /home/jenkins
+ln -s /root /home/jenkins
+
+# Add packagecloud files
+cat <<EOF > /root/.packagecloud
+{"url":"https://packagecloud.io","token":"\$token"}
+EOF
+cat <<EOF >/root/packagecloud_api
+machine packagecloud.io
+login \$pclogin
+password
+EOF
+
+# Check if docker group exists
+if grep -q docker /etc/group
+then
+ # Add jenkins user to docker group
+ usermod -a -G docker jenkins
+fi
+
+# Check if mock group exists
+if grep -q mock /etc/group
+then
+ # Add jenkins user to mock group so it can build RPMs
+ # using mock if available
+ usermod -a -G mock jenkins
+fi
+
+# Give jenkins account root privileges
+jenkins_uid=$(id -u jenkins)
+perl -i -p -e "s/$jenkins_uid\:/0\:/g" /etc/passwd
+
+# Copy lf-env.sh for LF Releng scripts
+cp $DOCKER_CIMAN_ROOT/global-jjb/jenkins-init-scripts/lf-env.sh /root
+chmod 644 /root/lf-env.sh
+
+# Install lftools[openstack] -- from global-jjb/shell/python-tools-install.sh
+pinned_version=""
+if [ "$OS_NAME" = "debian-9" } ; then
+ # debian-9 does not have osc-lib==2.2.0 available breaking docker image
+ # build so pin the version of lftools which does not pin osc-lib==2.2.0
+ pinned_version="==0.34.1"
+fi
+python3 -m pip install --no-deps lftools[openstack]$pinned_version
diff --git a/docker/scripts/dbld_vpp_install_packages.sh b/docker/scripts/dbld_vpp_install_packages.sh
new file mode 100755
index 00000000..f18ef752
--- /dev/null
+++ b/docker/scripts/dbld_vpp_install_packages.sh
@@ -0,0 +1,81 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euxo pipefail
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_vpp.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+
+must_be_run_as_root
+must_be_run_in_docker_build
+
+echo_log
+echo_log "Starting $(basename $0)"
+
+do_git_config vpp
+for branch in ${VPP_BRANCHES[$OS_NAME]} ; do
+ do_git_branch $branch
+
+ # Install OS packages
+ make_vpp "install-dep" $branch
+ make_vpp "centos-pyyaml" $branch # VPP Makefile tests for centos versions
+ if [ "$OS_ID" = "ubuntu" ] ; then
+ # TODO: fix VPP stable/2005 bug in sphinx-make.sh
+ # which fails on 'yum install python3-venv'
+ # that does not exist.
+ # 'Make docs jobs are only run on ubuntu executors
+ # so only run for ubuntu build executors until fixed.
+ make_vpp "docs-venv" $branch
+ elif [ "$OS_NAME" = "debian-9" ] ; then
+ apt_override_cmake_install_with_pip3_version
+ fi
+
+ # Download, build, and cache external deps packages
+ make_vpp "install-ext-deps" $branch
+ set +e
+ vpp_ext_dir="$DOCKER_VPP_DIR/build/external"
+ [ -d "$vpp_ext_dir/downloads" ] \
+ && rsync -ac $vpp_ext_dir/downloads/. $DOCKER_DOWNLOADS_DIR
+ [ -n "$(ls $vpp_ext_dir/*.deb)" ] \
+ && rsync -ac $vpp_ext_dir/*.deb $DOCKER_DOWNLOADS_DIR
+ [ -n "$(ls $vpp_ext_dir/*.rpm)" ] \
+ && rsync -ac $vpp_ext_dir/*.rpm $DOCKER_DOWNLOADS_DIR
+ set -e
+
+ # Install/cache python packages
+ if [ "$OS_ID" = "ubuntu" ] ; then
+ make_vpp_test "test-dep" $branch
+ make_vpp_test "doc" $branch
+ make_vpp test-wipe $branch
+ make_vpp "bootstrap-doxygen" $branch
+ fi
+
+ # Dump packages installed
+ case "$DOCKERFILE_FROM" in
+ *ubuntu*)
+ dump_apt_package_list $branch ;;
+ *debian*)
+ dump_apt_package_list $branch ;;
+ *centos:7)
+ dump_yum_package_list $branch ;;
+ *centos:8)
+ dump_dnf_package_list $branch ;;
+ esac
+done
+
+echo_log -e "Completed $(basename $0)!\n\n=========="
diff --git a/docker/scripts/lib_apt.sh b/docker/scripts/lib_apt.sh
new file mode 100644
index 00000000..d2aa70db
--- /dev/null
+++ b/docker/scripts/lib_apt.sh
@@ -0,0 +1,340 @@
+# lib_apt.sh - Docker build script apt library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_apt_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_apt_imported=true
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+dump_apt_package_list() {
+ branchname="$(echo $branch | sed -e 's,/,_,')"
+ dpkg -l > \
+ "$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME-$branchname-apt-packages.log"
+}
+
+apt_install_packages() {
+ apt-get install -y --allow-downgrades --allow-remove-essential \
+ --allow-change-held-packages $@
+}
+
+apt_install_docker_os_package_dependancies() {
+ # Assumes 'apt-get update -q' has aready been run.
+ apt_install_packages \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ gnupg-agent \
+ software-properties-common
+}
+
+# Used for older OS distro's which are incompatible
+# with modern distro cmake vesrion
+apt_override_cmake_install_with_pip3_version() {
+ local os_cmake="/usr/bin/cmake"
+ local os_cmake_ver="$($os_cmake --version | head -1)"
+ local pip3_cmake="/usr/local/bin/cmake"
+
+ python3 -m pip install --disable-pip-version-check cmake || true
+ local pip3_cmake_ver="$($pip3_cmake --version | head -1)"
+ echo_log "Overriding $OS_NAME '$os_cmake_ver' with '$pip3_cmake_ver'!"
+ sudo apt-get remove -y cmake --autoremove || true
+ update-alternatives --quiet --remove-all cmake || true
+ update-alternatives --quiet --install $os_cmake cmake $pip3_cmake 100
+ echo_log "Default cmake ($(which cmake)) version: '$(cmake --version | head -1)'!"
+}
+
+apt_install_docker() {
+ local apt_docker_gpg_key_file=$1
+ apt-key add $apt_docker_gpg_key_file
+ add-apt-repository "deb [arch=$DEB_ARCH] \
+ https://download.docker.com/linux/$OS_ID \
+ $(lsb_release -cs) stable"
+ apt-get update -q
+ apt_install_packages -y -qq docker-ce docker-ce-cli containerd.io
+ rm -rf $DOCKER_GPG_KEY_DIR
+}
+
+generate_apt_dockerfile() {
+ local executor_os_name=$1
+ local from_image=$2
+ local executor_image=$3
+ local vpp_install_skip_sysctl_envvar="";
+
+ if grep -q debian-9 <<<$executor_os_name ; then
+ # Workaround to VPP package installation failure on debian-9
+ vpp_install_skip_sysctl_envvar="ENV VPP_INSTALL_SKIP_SYSCTL=1"
+ fi
+ cat <<EOF >$DOCKERIGNOREFILE
+**/__pycache__
+*.pyc
+EOF
+ cat <<EOF >$DOCKERFILE
+FROM $from_image AS executor-image
+LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
+
+# Create download dir to cache external tarballs
+WORKDIR $DOCKER_DOWNLOADS_DIR
+
+# Copy-in temporary build tree containing
+# ci-management, vpp, & csit git repos
+WORKDIR $DOCKER_BUILD_DIR
+COPY . .
+
+# Build Environment Variables
+ENV DEBIAN_FRONTEND=noninteractive
+ENV FDIOTOOLS_IMAGE=$executor_image
+ENV LC_ALL=C.UTF-8
+ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
+ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
+
+# Install baseline packages (minimum build & utils).
+#
+# ci-management global-jjb requirements:
+# facter
+# python3-pip
+# python3-venv
+# from global-jjb/packer/provision/baseline.sh:
+# unzip
+# xz-utils
+# puppet
+# git
+# git-review
+# libxml2-dev
+# libxml-xpath-perl
+# libxslt-dev
+# make
+# wget
+# jq
+#
+# Python build from source requirements:
+# build-essential
+#
+# TODO: Fix broken project requirement install targets
+#
+# graphviz for 'make bootstrap-doxygen' (VPP)
+# doxygen for 'make doxygen' (VPP)
+# enchant for 'make docs' (VPP)
+# libffi-dev for python cffi install (Ubuntu20.04/VPP/aarch64)
+# liblapack-dev for python numpy/scipy (CSIT/aarch64)
+# libopenblas-dev for python numpy/scipy (CSIT/aarch64)
+# libpcap-dev for python pypcap install (CSIT)
+#
+RUN apt-get update -q \\
+ && apt-get install -y -qq \\
+ apt-utils \\
+ default-jdk \\
+ default-jre \\
+ doxygen \\
+ enchant \\
+ emacs \\
+ facter \\
+ gfortran \\
+ git \\
+ git-review \\
+ graphviz \\
+ iproute2 \\
+ iputils-clockdiff \\
+ iputils-ping \\
+ iputils-tracepath \\
+ jq \\
+ libffi-dev \\
+ liblapack-dev \\
+ libopenblas-dev \\
+ libpcap-dev \\
+ libxml2-dev \\
+ libxml-xpath-perl \\
+ libxslt-dev \\
+ make \\
+ python3-pip \\
+ python3-venv \\
+ rsync \\
+ ruby-dev \\
+ sudo \\
+ traceroute \\
+ tree \\
+ vim \\
+ wget \\
+ xz-utils \\
+ && rm -r /var/lib/apt/lists/*
+
+# Install packages for all project branches
+#
+RUN apt-get update -q \\
+ && dbld_install_docker.sh \\
+ && dbld_vpp_install_packages.sh \\
+ && dbld_csit_install_packages.sh \\
+ && dbld_lfit_requirements.sh \\
+ && rm -r /var/lib/apt/lists/*
+
+# CI Runtime Environment
+WORKDIR /
+$vpp_install_skip_sysctl_envvar
+ENV VPP_ZOMBIE_NOCHECK=1
+ENV DOCKER_TEST=1
+# TODO: Mount ccache volume into docker container, then remove this.
+ENV CCACHE_DISABLE=1
+RUN gem install rake package_cloud \\
+ && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | sudo bash
+
+# Clean up copy-in build tree
+RUN dbld_dump_build_logs.sh \\
+ && rm -rf /tmp/*
+EOF
+}
+
+write_apt_ubuntu_docker_gpg_keyfile() {
+ # To update docker gpg key
+ # curl -fsSL https://download.docker.com/linux/ubuntu/gpg
+ cat <<EOF >$DOCKER_APT_UBUNTU_DOCKER_GPGFILE
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
+lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
+38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
+L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
+UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
+cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
+ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
+vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
+G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
+XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
+q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
+tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
+BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
+v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
+tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
+jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
+6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
+XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
+FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
+g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
+ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
+9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
+G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
+FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
+EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
+M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
+Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
+w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
+z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
+eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
+VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
+1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
+zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
+pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
+ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
+BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
+1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
+YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
+mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
+KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
+JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
+cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
+6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
+U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
+VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
+irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
+SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
+QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
+9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
+24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
+dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
+Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
+H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
+/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
+M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
+xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
+jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
+YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
+=0YYh
+-----END PGP PUBLIC KEY BLOCK-----
+EOF
+}
+
+write_apt_debian_docker_gpg_keyfile() {
+ # To update docker gpg key
+ # curl -fsSL https://download.docker.com/linux/debian/gpg
+ cat <<EOF >$DOCKER_APT_DEBIAN_DOCKER_GPGFILE
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
+lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
+38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
+L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
+UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
+cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
+ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
+vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
+G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
+XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
+q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
+tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
+BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
+v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
+tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
+jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
+6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
+XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
+FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
+g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
+ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
+9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
+G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
+FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
+EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
+M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
+Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
+w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
+z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
+eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
+VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
+1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
+zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
+pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
+ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
+BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
+1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
+YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
+mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
+KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
+JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
+cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
+6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
+U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
+VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
+irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
+SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
+QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
+9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
+24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
+dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
+Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
+H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
+/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
+M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
+xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
+jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
+YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
+=0YYh
+-----END PGP PUBLIC KEY BLOCK-----
+EOF
+}
diff --git a/docker/scripts/lib_common.sh b/docker/scripts/lib_common.sh
new file mode 100644
index 00000000..59d34c12
--- /dev/null
+++ b/docker/scripts/lib_common.sh
@@ -0,0 +1,290 @@
+# lib_common.sh - Docker build script common library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_common_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_common_imported=true
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+export CIMAN_ROOT="$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))"
+
+must_be_run_as_root() {
+ set_opts=$-
+ grep -q e <<< $set_opts && set +e # disable exit on errors
+
+ # test if the user is root
+ if [ "${EUID:-$(id -u)}" -eq "0" ] ; then
+ grep -q e <<< $set_opts && set -e # re-enable exit on errors
+ else
+ set +x
+ echo -e "\nERROR: Must be run as root!"
+ if [ -n "$(declare -f usage)" ] ; then
+ usage
+ fi
+ grep -q e <<< $set_opts && set -e # re-enable exit on errors
+ exit 1
+ fi
+}
+
+must_be_run_in_docker_build() {
+ if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then
+ set +x
+ echo -e "\nERROR: $(basename $0) must be run in 'docker build'\n"
+ exit 1
+ fi
+}
+
+echo_log() {
+ if [ "$#" -eq "0" ] ; then
+ if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then
+ echo
+ else
+ echo | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2
+ fi
+ return 0
+ fi
+
+ local echo_opts=""
+ case "$1" in
+ -[en])
+ echo_opts="$1 "
+ shift
+ ;;
+ esac
+ if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then
+ echo ${echo_opts}"####> $@"
+ else
+ echo ${echo_opts}"####> $(date): $@" | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2
+ fi
+}
+
+dump_echo_log() {
+ [ -z "$(alias running_in_docker_build 2> /dev/null)" ] && return 0
+ echo -e "\n\n####> $(date) Build log ($FDIOTOOLS_IMAGE_BUILD_LOG):"
+ cat $FDIOTOOLS_IMAGE_BUILD_LOG
+}
+
+do_git_config() {
+ if [ "$#" -ne "1" ] ; then
+ echo_log "ERROR: do_git_config(): Invalid number of arguments ($#)!"
+ return 1
+ fi
+ cd $DOCKER_BUILD_DIR/$1
+
+ # Add user to git config so git commands don't fail
+ local git_config_list="$(git config -l)"
+ if [ -z "$(grep 'user\.email' <<<$git_config_list)" ] ; then
+ git config user.email "ci-management-dev@lists.fd.io"
+ fi
+ if [ -z "$(grep 'user\.name' <<<$git_config_list)" ] ; then
+ git config user.name "ci-management"
+ fi
+}
+
+do_git_branch() {
+ local branch="$1"
+
+ echo_log " Checking out '$branch' in $(pwd)"
+ if [ -n "$(git branch | grep $branch)" ] ; then
+ git checkout $branch
+ else
+ git checkout -b $branch --track origin/$branch
+ fi
+ git pull -q
+ echo_log -e " 'git log --oneline | head':\n----- %< -----\n$(git log --oneline | head)\n----- %< -----"
+}
+
+clean_git_repo() {
+ local curr_dir=$(pwd)
+ cd $1
+ git clean -qfdx
+ git checkout -q master
+ git pull -q
+ cd $curr_dir
+}
+
+remove_pyc_files_and_pycache_dirs() {
+ find . -type f -name '*.pyc' -exec rm -f {} \; 2>/dev/null || true
+ find . -type d -name __pycache__ -exec echo -n "Removing " \; \
+ -print -exec rm -rf {} \; 2>/dev/null || true
+}
+
+# Get the refspec for the specified project branch at HEAD
+#
+# Arguments:
+# $1 - branch
+# $2 - project (Optional: defaults to 'vpp')
+get_gerrit_refspec() {
+ local branch=${1:-"master"}
+ local project=${2:-"vpp"}
+ local query="$(ssh -p 29418 gerrit.fd.io gerrit query status:merged project:$project branch:$branch limit:1 --format=JSON --current-patch-set | tr ',' '\n' | grep refs | cut -d'"' -f4)"
+
+ if [ -z "$query" ] ; then
+ echo "ERROR: Invalid project ($1) or branch ($2)"
+ else
+ echo "$query"
+ fi
+}
+
+# Well-known filename variables
+export APT_DEBIAN_DOCKER_GPGFILE="docker.linux.debian.gpg"
+export APT_UBUNTU_DOCKER_GPGFILE="docker.linux.ubuntu.gpg"
+export YUM_CENTOS_DOCKER_GPGFILE="docker.linux.centos.gpg"
+
+# OS type variables
+# TODO: Investigate if sourcing /etc/os-release and using env vars from it
+# works across all OS variants. If so, clean up copy-pasta...
+# Alternatively use facter as does LF Releng scripts.
+export OS_ID="$(grep '^ID=' /etc/os-release | cut -d= -f2 | sed -e 's/\"//g')"
+export OS_VERSION_ID="$(grep '^VERSION_ID=' /etc/os-release | cut -d= -f2 | sed -e 's/\"//g')"
+export OS_CODENAME="$(grep 'VERSION_CODENAME=' /etc/os-release | cut -d= -f2)"
+export OS_NAME="${OS_ID}-${OS_VERSION_ID}"
+export OS_ARCH="$(uname -m)"
+case "$OS_ARCH" in
+ x86_64)
+ export DEB_ARCH="amd64"
+ ;;
+ aarch64)
+ export DEB_ARCH="arm64"
+ ;;
+ *)
+ echo "ERROR: Unsupported OS architecture '$OS_ARCH'!"
+ return 1
+ ;;
+esac
+
+# Executor attribute variables
+# Note: the role 'prod' is only applied and uploaded using the script
+# update_dockerhub_prod_tags.sh to avoid accidentally pushing
+# an untested docker image into production.
+export EXECUTOR_ROLES="sandbox test"
+export EXECUTOR_DEFAULT_CLASS="builder"
+export EXECUTOR_CLASS="$EXECUTOR_DEFAULT_CLASS"
+export EXECUTOR_CLASS_ARCH="$EXECUTOR_DEFAULT_CLASS-$OS_ARCH"
+export EXECUTOR_CLASSES="$EXECUTOR_DEFAULT_CLASS csit csit_dut csit_shim"
+export EXECUTOR_ARCHS="aarch64 x86_64"
+declare -A EXECUTOR_CLASS_ARCH_OS_NAMES
+EXECUTOR_CLASS_ARCH_OS_NAMES["builder-aarch64"]="ubuntu-18.04 ubuntu-20.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["builder-x86_64"]="centos-7 centos-8 debian-9 debian-10 ubuntu-18.04 ubuntu-20.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit-aarch64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit-x86_64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-aarch64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-x86_64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-aarch64"]="ubuntu-18.04"
+EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-x86_64"]="ubuntu-18.04"
+export EXECUTOR_CLASS_ARCH_OS_NAMES
+
+executor_list_roles() {
+ local set_opts=$-
+ grep -q u <<< $set_opts && set +u # disable undefined variable check
+ local indent=${1:-" "}
+ grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+
+ for role in $EXECUTOR_ROLES ; do
+ echo -e "${indent}$role"
+ done
+}
+
+executor_verify_role() {
+ for role in $EXECUTOR_ROLES ; do
+ if [ "$role" = "$1" ] ; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+executor_list_classes() {
+ local set_opts=$-
+ grep -q u <<< $set_opts && set +u # disable undefined variable check
+ local indent=${1:-" "}
+ grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+
+ for class in $EXECUTOR_CLASSES ; do
+ echo -e "${indent}$class"
+ done
+}
+
+executor_verify_class() {
+ for class in $EXECUTOR_CLASSES ; do
+ if [ "$class" = "$1" ] ; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+executor_list_os_names() {
+ local set_opts=$-
+ grep -q u <<< $set_opts && set +u # disable undefined variable check
+ local indent=${1:-" "}
+ grep -q u <<< $set_opts && set -u # re-enable undefined variable check
+
+ echo
+ echo "Valid executor OS names for class '$EXECUTOR_CLASS':"
+ for os in ${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]} ; do
+ echo "${indent}$os"
+ done | sort
+}
+
+executor_verify_os_name() {
+ for os in ${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]} ; do
+ if [ "$os" = "$1" ] ; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Docker variables
+export DOCKER_BUILD_DIR="/scratch/docker-build"
+export DOCKER_CIMAN_ROOT="$DOCKER_BUILD_DIR/ci-management"
+export DOCKERFILE="$DOCKER_BUILD_DIR/Dockerfile"
+export DOCKERIGNOREFILE="$DOCKER_BUILD_DIR/.dockerignore"
+export DOCKERFILE_FROM=${DOCKERFILE_FROM:="${OS_ID}:${OS_VERSION_ID}"}
+export DOCKER_TAG="$(date +%Y_%m_%d_%H%M%S)-$OS_ARCH"
+export DOCKER_VPP_DIR="$DOCKER_BUILD_DIR/vpp"
+export DOCKER_CSIT_DIR="$DOCKER_BUILD_DIR/csit"
+export DOCKER_GPG_KEY_DIR="$DOCKER_BUILD_DIR/gpg-key"
+export DOCKER_APT_UBUNTU_DOCKER_GPGFILE="$DOCKER_GPG_KEY_DIR/$APT_UBUNTU_DOCKER_GPGFILE"
+export DOCKER_APT_DEBIAN_DOCKER_GPGFILE="$DOCKER_GPG_KEY_DIR/$APT_DEBIAN_DOCKER_GPGFILE"
+export DOCKER_DOWNLOADS_DIR="/root/Downloads"
+
+docker_build_setup_ciman() {
+ mkdir -p $DOCKER_BUILD_DIR $DOCKER_GPG_KEY_DIR
+
+ if [ "$(dirname $CIMAN_ROOT)" != "$DOCKER_BUILD_DIR" ] ; then
+ echo_log "Syncing $CIMAN_ROOT into $DOCKER_CIMAN_ROOT..."
+ pushd $CIMAN_ROOT
+ git submodule update --init --recursive
+ popd
+ rsync -a $CIMAN_ROOT/. $DOCKER_CIMAN_ROOT
+ fi
+}
+
+# Variables used in docker build environment
+set_opts=$-
+grep -q u <<< $set_opts && set +u # disable undefined variable check
+if [ -n "$FDIOTOOLS_IMAGE" ] ; then
+ alias running_in_docker_build=true
+ export DOCKER_BUILD_LOG_DIR="$DOCKER_BUILD_DIR/logs"
+ export FDIOTOOLS_IMAGENAME="$(echo $FDIOTOOLS_IMAGE | sed -e 's/:/-/' -e 's,/,_,g')"
+ export FDIOTOOLS_IMAGE_BUILD_LOG="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME.log"
+ mkdir -p $DOCKER_BUILD_LOG_DIR
+fi
+grep -q u <<< $set_opts && set -u # re-enable undefined variable check
diff --git a/docker/scripts/lib_csit.sh b/docker/scripts/lib_csit.sh
new file mode 100644
index 00000000..9958faba
--- /dev/null
+++ b/docker/scripts/lib_csit.sh
@@ -0,0 +1,173 @@
+# lib_csit.sh - Docker build script CSIT library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_csit_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_csit_imported=true
+
+export CIMAN_DOCKER_SCRIPTS="${CIMAN_DOCKER_SCRIPTS:-$(dirname $BASH_SOURCE)}"
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_apt.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_yum.sh
+. $CIMAN_DOCKER_SCRIPTS/lib_dnf.sh
+
+csit_checkout_branch_for_vpp() {
+ local vpp_branch=$1
+ local csit_dir="$DOCKER_CSIT_DIR"
+ local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function"
+
+ # import checkout_csit_for_vpp() if not defined
+ set +e && [ -z "$(declare -f checkout_csit_for_vpp)" ] \
+ && source $csit_bash_function_dir/branch.sh
+ CSIT_DIR=$(csit_dir) checkout_csit_for_vpp $vpp_branch
+
+ csit_branch="$(git branch | grep -e '^*' | mawk '{print $2}')"
+}
+
+csit_install_packages() {
+ local branch="$1"
+ local branchname="$(echo $branch | sed -e 's,/,_,')"
+ local csit_dir="$DOCKER_CSIT_DIR"
+ local csit_ansible_dir="$csit_dir/resources/tools/testbed-setup/ansible"
+ local bld_log="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME"
+ bld_log="${bld_log}-$branchname-csit_install_packages-bld.log"
+
+ git clean -qfdx
+
+ # Install PyYAML required by dbld_csit_find_ansible_packages.py
+ #
+ # Note: Conditional install due to Bug 1696324 -
+ # Update to python3.6 breaks PyYAML dependencies
+ # Status: CLOSED CANTFIX
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1696324
+ if [ "$OS_NAME" = "centos-8" ] ; then
+ dnf_install_packages python3-pyyaml
+ else
+ python3 -m pip install pyyaml
+ fi
+
+ # NOTE: the CSIT baseline os is implicitly pinned to ubuntu 18.04
+ # so only gather packages from ansible for that OS.
+ if [ "$OS_NAME" = "ubuntu-18.04" ] ; then
+ # Not in double quotes to let bash remove newline characters
+ local exclude_roles="-e calibration -e kernel -e mellanox -e nomad"
+ [ "$OS_ARCH" = "aarch64" ] && exclude_roles="$exclude_roles -e iperf"
+ local yaml_files=$(grep -r packages_by $csit_ansible_dir | cut -d: -f1 | sort -u | grep -v $exclude_roles)
+ packages=$(dbld_csit_find_ansible_packages.py --$OS_ID --$OS_ARCH $yaml_files)
+
+ if [ -n "$packages" ] ; then
+ case "$OS_NAME" in
+ ubuntu*)
+ apt_install_packages $packages
+ ;;
+ debian*)
+ apt_install_packages $packages
+ ;;
+ centos-7)
+ yum_install_packages $packages
+ ;;
+ centos-8)
+ dnf_install_packages $packages
+ ;;
+ *)
+ echo "Unsupported OS ($OS_ID): CSIT packages NOT INSTALLED!"
+ ;;
+ esac
+ fi
+ fi
+}
+
+csit_pip_cache() {
+ local branch="$1"
+ local VENV_OPTS=""
+ # ensure PS1 is defined (used by virtualenv activate script)
+ PS1=${PS1:-"#"}
+ local csit_dir="$DOCKER_CSIT_DIR"
+ local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function"
+
+ if [ -f "$csit_dir/VPP_REPO_URL" ] \
+ && [ -f "$csit_dir/requirements.txt" ]; then
+
+ local branchname="$(echo $branch | sed -e 's,/,_,')"
+ local bld_log="$DOCKER_BUILD_LOG_DIR"
+ bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname-csit_pip_cache-bld.log"
+
+ export PYTHONPATH=$csit_dir
+ git clean -qfdx
+
+ description="Install CSIT python packages from $branch branch"
+ echo_log " Starting $description..."
+ [ -n "$(declare -f deactivate)" ] && deactivate
+ local PIP=pip
+ local setup_framework=$csit_dir/resources/libraries/python/SetupFramework.py
+ if [ -n "$(grep pip3 $setup_framework)" ]; then
+ PIP=pip3
+ VENV_OPTS="-p python3"
+ fi
+ rm -rf $PYTHONPATH/env
+ virtualenv $VENV_OPTS $PYTHONPATH/env
+ . $PYTHONPATH/env/bin/activate
+ if [ "$OS_ARCH" = "aarch64" ] ; then
+ local numpy_ver="$(grep numpy $PYTHONPATH/requirements.txt)"
+ [ -n "$numpy_ver" ] && $PIP install --upgrade $numpy_ver 2>&1 \
+ | tee -a $bld_log
+ fi
+ $PIP install --upgrade -r $PYTHONPATH/requirements.txt 2>&1 \
+ | tee -a $bld_log
+ $PIP install --upgrade -r $PYTHONPATH/tox-requirements.txt 2>&1 \
+ | tee -a $bld_log
+ if [ "$OS_ARCH" = "x86_64" ] ; then
+ local PRESENTATION_DIR="$PYTHONPATH/resources/tools/presentation"
+ # TODO: Remove condition when 19.08 is deprecated.
+ if [ -n "$(grep -r python3 $PRESENTATION_DIR)" ] && [ "$PIP" = "pip3" ] ; then
+ $PIP install --upgrade -r $PRESENTATION_DIR/requirements.txt 2>&1 \
+ | tee -a $bld_log
+ else
+ echo_log "Skipping 'pip install $PRESENTATION_DIR/requirements.txt' in branch $branch!"
+ fi
+ fi
+
+ deactivate
+ rm -rf $PYTHONPATH/env
+
+ # Virtualenv version is pinned in common.sh in newer csit branches.
+ # (note: xargs removes leading/trailing spaces)
+ #
+ # TODO: pip3 install virtualenv==20.0.20 installs a version of virtualenv
+ # which hardcodes python3 in the shebang line. This breaks branches
+ # containing python2 code (eg. oper1908-*).
+ # Restore when 19.08 is no longer supported or is updated to override
+ # the shebang in virtualenv (e.g. 'python2.7 virtualenv env')
+ # install_virtualenv="$(grep 'virtualenv' $csit_bash_function_dir/common.sh | grep pip | grep install | cut -d'|' -f1 | xargs)"
+ # $install_virtualenv
+
+ git checkout -q -- .
+ echo_log " Completed $description!"
+ else
+ echo_log "ERROR: Missing or invalid CSIT_DIR: '$csit_dir'!"
+ return 1
+ fi
+}
+
+docker_build_setup_csit() {
+ if [ ! -d "$DOCKER_CSIT_DIR" ] ; then
+ echo_log "Cloning CSIT into $DOCKER_CSIT_DIR..."
+ git clone -q https://gerrit.fd.io/r/csit $DOCKER_CSIT_DIR
+ fi
+ clean_git_repo $DOCKER_CSIT_DIR
+}
diff --git a/docker/scripts/lib_dnf.sh b/docker/scripts/lib_dnf.sh
new file mode 100644
index 00000000..4f1f22f2
--- /dev/null
+++ b/docker/scripts/lib_dnf.sh
@@ -0,0 +1,163 @@
+# lib_dnf.sh - Docker build script dnf library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_dnf_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_dnf_imported=true
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+dump_dnf_package_list() {
+ branchname="$(echo $branch | sed -e 's,/,_,')"
+ dnf list installed > \
+ "$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME-$branchname-dnf-packages.log"
+}
+
+dnf_install_packages() {
+ dnf install -y $@
+}
+
+dnf_install_docker_os_package_dependancies() {
+ dnf_install_packages dnf-utils
+}
+
+dnf_install_docker() {
+ # Note: Support for docker has been removed starting with centos-8, so the
+ # only recourse is to pin the latest version by what's in the download dir.
+ # Browse the base URL to see what is available & update accordingly.
+
+ if [ "$OS_NAME" = "centos-8" ] ; then
+ dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/containerd.io-1.3.7-3.1.el8.x86_64.rpm
+ dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/docker-ce-cli-19.03.13-3.el8.x86_64.rpm
+ dnf install -y https://download.docker.com/linux/$OS_ID/$OS_VERSION_ID/$OS_ARCH/stable/Packages/docker-ce-19.03.13-3.el8.x86_64.rpm
+ else
+ echo_log "WARNING: Docker Image unknown for $OS_NAME!"
+ fi
+}
+
+generate_dnf_dockerfile() {
+ local executor_os_name=$1
+ local from_image=$2
+ local executor_image=$3
+ local from_image_os_id="$(echo $from_image | cut -d: -f2)"
+
+ cat <<EOF >$DOCKERFILE
+FROM $from_image AS executor-image
+LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
+
+# Build Environment Variables
+ENV FDIOTOOLS_IMAGE=$executor_image
+ENV LC_ALL=C.UTF-8
+ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
+ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
+
+# Copy-in build tree containing
+# ci-management, vpp, & csit git repos
+WORKDIR $DOCKER_BUILD_DIR
+COPY . .
+
+# Install baseline packages (minimum build & utils).
+#
+# ci-management global-jjb requirements:
+# for lf-env.sh:
+# facter
+# from global-jjb/packer/provision/baseline.sh:
+# deltarpm
+# unzip
+# xz
+# puppet
+# python3-pip
+# git
+# git-review
+# perl-XML-XPath
+# make
+# wget
+#
+# TODO: Fix broken project requirement install targets
+#
+# graphviz for 'make bootstrap-doxygen' (VPP)
+# doxygen for 'make doxygen' (VPP)
+# enchant for 'make docs' (VPP)
+# libffi-devel for python cffi install (Ubuntu20.04/VPP/aarch64)
+# libpcap-devel for python pypcap install (CSIT)
+# lapack-devel for python numpy/scipy (CSIT/aarch64)
+# openblas-devel for python numpy/scipy (CSIT/aarch64)
+#
+RUN dnf update -y \\
+ && dnf install -y \\
+ dnf-plugins-core \\
+ epel-release \\
+ && dnf config-manager --set-enabled PowerTools --set-enabled epel \\
+ && dnf clean all
+RUN dnf update -y \\
+ && dnf install -y \\
+ dnf-utils \\
+ doxygen \\
+ enchant \\
+ emacs \\
+ facter \\
+ git \\
+ git-review \\
+ graphviz \\
+ iproute \\
+ java-1.8.0-openjdk \\
+ java-1.8.0-openjdk-devel \\
+ jq \\
+ lapack-devel \\
+ libffi-devel \\
+ libpcap-devel \\
+ make \\
+ mawk \\
+ mock \\
+ openblas-devel \\
+ perl \\
+ perl-XML-XPath \\
+ python3-pip \\
+ puppet \\
+ rake \\
+ ruby-devel \\
+ sudo \\
+ tree \\
+ unzip \\
+ vim \\
+ wget \\
+ xz \\
+ && dnf clean all
+
+# Install OS packages for project branches
+#
+RUN dbld_vpp_install_packages.sh \\
+ && dbld_install_docker.sh \\
+ && dbld_csit_install_packages.sh \\
+ && dbld_lfit_requirements.sh \\
+ && dnf clean all
+
+# CI Runtime Environment
+WORKDIR /
+ENV VPP_ZOMBIE_NOCHECK=1
+RUN gem install package_cloud \\
+ && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | sudo bash
+
+# Clean up
+RUN dbld_dump_build_logs.sh \\
+ && rm -rf /tmp/*
+EOF
+}
diff --git a/docker/scripts/lib_vpp.sh b/docker/scripts/lib_vpp.sh
new file mode 100644
index 00000000..38245c7b
--- /dev/null
+++ b/docker/scripts/lib_vpp.sh
@@ -0,0 +1,90 @@
+# lib_vpp.sh - Docker build script VPP library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_vpp_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_vpp_imported=true
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+make_vpp() {
+ local target=$1
+ local branch=${2:-"master"}
+ local branchname="$(echo $branch | sed -e 's,/,_,')"
+ local bld_log="$DOCKER_BUILD_LOG_DIR"
+ bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname"
+ bld_log="${bld_log}-make_vpp_${target}-bld.log"
+
+ makefile_target="^${target}:"
+ if [ -z "$(grep $makefile_target Makefile)" ] ; then
+ echo "Make target '$target' does not exist in VPP branch '$branch'!"
+ return
+ fi
+ git clean -qfdx
+ description="'make UNATTENDED=y $target' in $(pwd) ($branch)"
+ echo_log -e " Starting $description..."
+ local force_opts="--allow-downgrades --allow-remove-essential"
+ force_opts="$force_opts --allow-change-held-packages"
+ make UNATTENDED=y CONFIRM="-y" FORCE="$force_opts" \
+ $target 2>&1 | tee -a "$bld_log"
+ git checkout -q -- .
+ echo_log " Completed $description!"
+}
+
+make_vpp_test() {
+ local target=$1
+ local branch=${2:-"master"}
+ local branchname="$(echo $branch | sed -e 's,/,_,')"
+ local bld_log="$DOCKER_BUILD_LOG_DIR"
+ bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname"
+ bld_log="${bld_log}-make_vpp_test_${target}-bld.log"
+
+ makefile_target="^${target}:"
+ if [ -z "$(grep -e $makefile_target test/Makefile)" ] ; then
+ echo "Make test target '$target' does not exist in VPP branch '$branch'!"
+ return
+ fi
+ git clean -qfdx
+ description="'make -C test $target' in $(pwd) ($branch)"
+ echo_log " Starting $description..."
+ make WS_ROOT="$DOCKER_VPP_DIR" BR="$DOCKER_VPP_DIR/build-root" \
+ TEST_DIR="$DOCKER_VPP_DIR/test" -C test $target 2>&1 | tee -a $bld_log
+ remove_pyc_files_and_pycache_dirs
+ git checkout -q -- .
+ echo_log " Completed $description!"
+}
+
+docker_build_setup_vpp() {
+ if [ ! -d "$DOCKER_VPP_DIR" ] ; then
+ echo_log "Cloning VPP into $DOCKER_VPP_DIR..."
+ git clone -q https://gerrit.fd.io/r/vpp $DOCKER_VPP_DIR
+ fi
+ clean_git_repo $DOCKER_VPP_DIR
+}
+
+# Branches must be listed in chronological order -- oldest stable branch
+# first and master last.
+declare -A VPP_BRANCHES
+VPP_BRANCHES["centos-7"]="stable/1908 stable/2001 stable/2005 stable/2009 master"
+VPP_BRANCHES["centos-8"]="stable/2009 master"
+VPP_BRANCHES["debian-9"]="stable/2009 master"
+VPP_BRANCHES["debian-10"]="stable/2009 master"
+VPP_BRANCHES["ubuntu-18.04"]="stable/1908 stable/2001 stable/2005 stable/2009 master"
+VPP_BRANCHES["ubuntu-20.04"]="stable/2009 master"
+export VPP_BRANCHES
diff --git a/docker/scripts/lib_yum.sh b/docker/scripts/lib_yum.sh
new file mode 100644
index 00000000..95a728e6
--- /dev/null
+++ b/docker/scripts/lib_yum.sh
@@ -0,0 +1,156 @@
+# lib_yum.sh - Docker build script yum library.
+# For import only.
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't import more than once.
+if [ -n "$(alias lib_yum_imported 2> /dev/null)" ] ; then
+ return 0
+fi
+alias lib_yum_imported=true
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+dump_yum_package_list() {
+ branchname="$(echo $branch | sed -e 's,/,_,')"
+ yum list installed > \
+ "$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME-$branchname-yum-packages.log"
+}
+
+yum_install_packages() {
+ yum install -y $@
+}
+
+yum_install_docker_os_package_dependancies() {
+ yum_install_packages yum-utils
+}
+
+yum_install_docker() {
+ yum-config-manager --add-repo \
+ https://download.docker.com/linux/${OS_ID}/docker-ce.repo
+ yum-config-manager --enablerepo=docker-ce-stable
+ yum_install_packages docker-ce docker-ce-cli containerd.io
+}
+
+generate_yum_dockerfile() {
+ local executor_os_name=$1
+ local from_image=$2
+ local executor_image=$3
+ local from_image_os_id="$(echo $from_image | cut -d: -f2)"
+
+ cat <<EOF >$DOCKERFILE
+FROM $from_image AS executor-image
+LABEL Description="FD.io CI executor docker image for $executor_os_name/$OS_ARCH"
+LABEL Vendor="fd.io"
+LABEL Version="$DOCKER_TAG"
+
+# Build Environment Variables
+ENV FDIOTOOLS_IMAGE=$executor_image
+ENV LC_ALL=en_US.UTF-8
+ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT"
+ENV PATH=$PATH:$DOCKER_CIMAN_ROOT/docker/scripts
+
+# Copy-in build tree containing
+# ci-management, vpp, & csit git repos
+WORKDIR $DOCKER_BUILD_DIR
+COPY . .
+
+# Install baseline packages (minimum build & utils).
+#
+# ci-management global-jjb requirements:
+# for lf-env.sh:
+# facter
+# from global-jjb/packer/provision/baseline.sh:
+# deltarpm
+# unzip
+# xz
+# puppet
+# python3-pip
+# git
+# git-review
+# perl-XML-XPath
+# make
+# wget
+#
+# TODO: Fix broken project requirement install targets
+#
+# graphviz for 'make bootstrap-doxygen' (VPP)
+# doxygen for 'make doxygen' (VPP)
+# enchant for 'make docs' (VPP)
+# libffi-devel for python cffi install (Ubuntu20.04/VPP/aarch64)
+# libpcap-devel for python pypcap install (CSIT)
+# liblapack-devel for python numpy/scipy (CSIT/aarch64)
+# libopenblas-devel for python numpy/scipy (CSIT/aarch64)
+#
+RUN yum update -y \\
+ && yum install -y \\
+ epel-release \\
+ && yum clean all
+RUN yum update -y \\
+ && yum install -y \\
+ yum-utils \\
+ deltarpm \\
+ doxygen \\
+ enchant \\
+ emacs \\
+ facter \\
+ git \\
+ git-review \\
+ graphviz \\
+ iproute \\
+ java-1.8.0-openjdk \\
+ java-1.8.0-openjdk-devel \\
+ jq \\
+ libffi-devel \\
+ liblapack-devel \\
+ libopenblas-devel \\
+ libpcap-devel \\
+ make \\
+ mawk \\
+ mock \\
+ perl \\
+ perl-XML-XPath \\
+ python3-pip \\
+ puppet \\
+ rake \\
+ ruby-devel \\
+ sudo \\
+ tree \\
+ unzip \\
+ vim \\
+ wget \\
+ xz \\
+ && yum clean all
+
+# Install packages for all project branches
+#
+RUN yum update -y \\
+ && dbld_install_docker.sh \\
+ && dbld_vpp_install_packages.sh \\
+ && dbld_csit_install_packages.sh \\
+ && dbld_lfit_requirements.sh \\
+ && yum clean all
+
+# CI Runtime Environment
+WORKDIR /
+ENV VPP_ZOMBIE_NOCHECK=1
+RUN gem install package_cloud \\
+ && curl -s https://packagecloud.io/install/repositories/fdio/master/script.rpm.sh | sudo bash
+
+# Clean up
+RUN dbld_dump_build_logs.sh \\
+ && rm -rf /tmp/*
+EOF
+}
diff --git a/docker/scripts/update_dockerhub_prod_tags.sh b/docker/scripts/update_dockerhub_prod_tags.sh
new file mode 100755
index 00000000..323dabfd
--- /dev/null
+++ b/docker/scripts/update_dockerhub_prod_tags.sh
@@ -0,0 +1,392 @@
+#! /bin/bash
+
+# Copyright (c) 2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+shopt -s extglob
+
+# Log all output to stdout & stderr to a log file
+logname="/tmp/$(basename $0).$(date +%Y_%m_%d_%H%M%S).log"
+echo -e "\n*** Logging output to $logname ***\n"
+exec > >(tee -a $logname) 2>&1
+
+export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"}
+. $CIMAN_DOCKER_SCRIPTS/lib_common.sh
+
+# Global variables
+long_bar="################################################################"
+short_bar="-----"
+image_not_found=""
+image_user=""
+image_repo=""
+image_version=""
+image_arch=""
+image_name_prod=""
+image_name_prev=""
+image_name_new=""
+image_realname=""
+image_realname_prod=""
+image_realname_prev=""
+image_tags=""
+image_tags_prod=""
+image_tags_prev=""
+image_tags_new=""
+docker_id_prod=""
+docker_id_prev=""
+docker_id_new=""
+digest_prod=""
+digest_prev=""
+digest_new=""
+restore_cmd=""
+
+usage() {
+ local script="$(basename $0)"
+ echo
+ echo "Usage: $script r[evert] <prod image>"
+ echo " $script p[romote] <new image> [<new image>]"
+ echo " $script i[nspect] <prod image>"
+ echo
+ echo " revert: swaps 'prod-<arch>' and 'prod-prev-<arch>' images"
+ echo " <prod image>: e.g. fdiotools/builder-ubuntu1804:prod-x86_64"
+ echo
+ echo " promote: moves 'prod-<arch>' image to 'prod-prev-<arch>' tag and"
+ echo " tags <new image> with 'prod-<arch>'"
+ echo " <new image>: e.g. fdiotools/builder-ubuntu1804:2020_09_23_151655-x86_64"
+ echo " inspect: prints out all tags for prod-<arch> and prod-prev-<arch>"
+ echo
+ exit 1
+}
+
+echo_restore_cmd() {
+ echo -e "\n$long_bar\n"
+ echo "To restore tags to original state, issue the following command:"
+ echo -e "\n$restore_cmd\n\n$long_bar\n"
+}
+
+push_to_dockerhub() {
+ echo_restore_cmd
+ for image in "$@" ; do
+ set +e
+ echo "Pushing '$image' to docker hub..."
+ if ! docker push $image ; then
+ echo "ERROR: 'docker push $image' failed!"
+ exit 1
+ fi
+ done
+}
+
+parse_image_name() {
+ image_user="$(echo $1 | cut -d'/' -f1)"
+ image_repo="$(echo $1 | cut -d'/' -f2 | cut -d':' -f1)"
+ local tag="$(echo $1 | cut -d':' -f2)"
+ image_version="$(echo $tag | cut -d'-' -f1)"
+ image_arch="$(echo $tag | sed -e s/$image_version-//)"
+ image_name_new="${image_user}/${image_repo}:${image_version}-${image_arch}"
+ if [ "$1" != "$image_name_new" ] ; then
+ echo "ERROR: Image name parsing failed: $1 != '$image_name_new'"
+ usage
+ fi
+ if [[ "$image_version" =~ "prod" ]] ; then
+ image_name_new=""
+ fi
+ image_name_prod="${image_user}/${image_repo}:prod-${image_arch}"
+ image_name_prev="${image_user}/${image_repo}:prod-prev-${image_arch}"
+}
+
+format_image_tags() {
+ # Note: 'grep $image_arch' & grep -v 'prod-curr' is required due to a
+ # bug in docker hub which returns old tags which were deleted via
+ # the webUI, but are still retrieved by 'docker pull -a'
+ image_tags="$(docker images | grep $1 | grep $image_arch | grep -v prod-curr | sort -r | mawk '{print $1":"$2}' | tr '\n' ' ')"
+ image_realname="$(docker images | grep $1 | grep $image_arch | sort -r | grep -v prod | mawk '{print $1":"$2}')"
+}
+
+get_image_id_tags() {
+ for image in "$image_name_new" "$image_name_prod" "$image_name_prev" ; do
+ if [ -z "$image" ] ; then
+ continue
+ fi
+ # ensure image exists
+ set +e
+ local image_found="$(docker images | mawk '{print $1":"$2}' | grep $image)"
+ set -e
+ if [ -z "$image_found" ] ; then
+ if [ "$image" = "$image_name_prev" ] ; then
+ if [ "$action" = "revert" ] ; then
+ echo "ERROR: Image '$image' not found!"
+ echo "Unable to revert production image '$image_name_prod'!"
+ usage
+ else
+ continue
+ fi
+ else
+ echo "ERROR: Image '$image' not found!"
+ usage
+ fi
+ fi
+ set +e
+ local id="$(docker image inspect $image | mawk -F':' '/Id/{print $3}')"
+ local digest="$(docker image inspect $image | grep -A1 RepoDigests | grep -v RepoDigests | mawk -F':' '{print $2}')"
+ local retval="$?"
+ set -e
+ if [ "$retval" -ne "0" ] ; then
+ echo "ERROR: Docker ID not found for '$image'!"
+ usage
+ fi
+ if [ "$image" = "$image_name_prod" ] ; then
+ docker_id_prod="${id::12}"
+ digest_prod="${digest::12}"
+ format_image_tags "$docker_id_prod"
+ image_tags_prod="$image_tags"
+ if [ -z "$image_realname_prod" ] ; then
+ image_realname_prod="$image_realname"
+ fi
+ elif [ "$image" = "$image_name_prev" ] ; then
+ docker_id_prev="${id::12}"
+ digest_prev="${digest::12}"
+ format_image_tags "$docker_id_prev"
+ image_tags_prev="$image_tags"
+ if [ -z "$image_realname_prev" ] ; then
+ image_realname_prev="$image_realname"
+ fi
+ else
+ docker_id_new="${id::12}"
+ digest_new="${digest::12}"
+ format_image_tags "$docker_id_new" "NEW"
+ image_tags_new="$image_tags"
+ fi
+ done
+ if [ -z "$restore_cmd" ] ; then
+ restore_cmd="sudo $0 p $image_realname_prev $image_realname_prod"
+ fi
+}
+
+get_all_tags_from_dockerhub() {
+ local dh_repo="$image_user/$image_repo"
+ echo -e "Pulling all tags from docker hub repo '$dh_repo':\n$long_bar"
+ if ! docker pull -a "$dh_repo" ; then
+ echo "ERROR: Repository '$dh_repo' not found on docker hub!"
+ usage
+ fi
+ echo "$long_bar"
+}
+
+verify_image_name() {
+ image_not_found=""
+ # Invalid user
+ if [ "$image_user" != "fdiotools" ] ; then
+ image_not_found="true"
+ echo "ERROR: invalid user '$image_user' in '$image_name_new'!"
+ fi
+ # Invalid version
+ if [ -z "$image_not_found" ] \
+ && [ "$image_version" != "prod" ] \
+ && ! [[ "$image_version" =~ \
+ ^[0-9]{4}_[0-1][0-9]_[0-3][0-9]_[0-2][0-9][0-5][0-9][0-5][0-9]$ ]]
+ then
+ image_not_found="true"
+ echo "ERROR: invalid version '$image_version' in '$image_name_new'!"
+ fi
+ # Invalid arch
+ if [ -z "$image_not_found" ] \
+ && ! [[ "$EXECUTOR_ARCHS" =~ .*"$image_arch".* ]] ; then
+ image_not_found="true"
+ echo "ERROR: invalid arch '$image_arch' in '$image_name_new'!"
+ fi
+ if [ -n "$image_not_found" ] ; then
+ echo "ERROR: Invalid image '$image_name_new'!"
+ usage
+ fi
+}
+
+docker_tag_image() {
+ echo ">>> docker tag $1 $2"
+ set +e
+ docker tag $1 $2
+ local retval="$?"
+ set -e
+ if [ "$retval" -ne "0" ] ; then
+ echo "WARNING: 'docker tag $1 $2' failed!"
+ fi
+}
+
+docker_rmi_tag() {
+ set +e
+ echo ">>> docker rmi $1"
+ docker rmi $1
+ local retval="$?"
+ set -e
+ if [ "$retval" -ne "0" ] ; then
+ echo "WARNING: 'docker rmi $1' failed!"
+ fi
+}
+
+print_image_list() {
+ if [ -z "$2" ] ; then
+ echo "$1 Image Not Found"
+ return
+ fi
+ echo "$1 (Id $2, Digest $3):"
+ for image in $4 ; do
+ echo -e "\t$image"
+ done
+}
+
+inspect_images() {
+ echo -e "\n${1}Production Docker Images:"
+ echo "$short_bar"
+ if [ -n "$image_tags_new" ] ; then
+ print_image_list "NEW" "$docker_id_new" "$digest_new" "$image_tags_new"
+ echo
+ fi
+ print_image_list "prod-$image_arch" "$docker_id_prod" "$digest_prod" \
+ "$image_tags_prod"
+ echo
+ print_image_list "prod-prev-$image_arch" "$docker_id_prev" "$digest_prev" \
+ "$image_tags_prev"
+ echo -e "$short_bar\n"
+}
+
+revert_prod_image() {
+ inspect_images "EXISTING "
+ docker_tag_image $docker_id_prod $image_name_prev
+ docker_tag_image $docker_id_prev $image_name_prod
+ get_image_id_tags
+ inspect_images "REVERTED "
+
+ local yn=""
+ while true; do
+ read -p "Push Reverted tags to '$image_user/$image_repo' (yes/no)? " yn
+ case ${yn:0:1} in
+ y|Y )
+ break ;;
+ n|N )
+ echo -e "\nABORTING REVERT!\n"
+ docker_tag_image $docker_id_prev $image_name_prod
+ docker_tag_image $docker_id_prod $image_name_prev
+ get_image_id_tags
+ inspect_images "RESTORED LOCAL "
+ exit 1 ;;
+ * )
+ echo "Please answer yes or no." ;;
+ esac
+ done
+ echo
+ push_to_dockerhub $image_name_prev $image_name_prod
+ inspect_images ""
+ echo_restore_cmd
+}
+
+promote_new_image() {
+ inspect_images "EXISTING "
+ docker_tag_image $docker_id_prod $image_name_prev
+ docker_tag_image $docker_id_new $image_name_prod
+ get_image_id_tags
+ inspect_images "PROMOTED "
+
+ local yn=""
+ while true; do
+ read -p "Push promoted tags to '$image_user/$image_repo' (yes/no)? " yn
+ case ${yn:0:1} in
+ y|Y )
+ break ;;
+ n|N )
+ echo -e "\nABORTING PROMOTION!\n"
+ docker_tag_image $docker_id_prev $image_name_prod
+ local restore_both="$(echo $restore_cmd | mawk '{print $5}')"
+ if [[ -n "$restore_both" ]] ; then
+ docker_tag_image $image_realname_prev $image_name_prev
+ else
+ docker_rmi_tag $image_name_prev
+ image_name_prev=""
+ docker_id_prev=""
+ fi
+ get_image_id_tags
+ inspect_images "RESTORED "
+ exit 1 ;;
+ * )
+ echo "Please answer yes or no." ;;
+ esac
+ done
+ echo
+ push_to_dockerhub $image_name_new $image_name_prev $image_name_prod
+ inspect_images ""
+ echo_restore_cmd
+}
+
+must_be_run_as_root
+
+# Validate arguments
+num_args="$#"
+if [ "$num_args" -lt "1" ] ; then
+ usage
+fi
+action=""
+case "$1" in
+ r?(evert))
+ action="revert"
+ if [ "$num_args" -ne "2" ] ; then
+ echo "ERROR: Invalid number of arguments: $#"
+ usage
+ fi ;;
+ p?(romote))
+ if [ "$num_args" -eq "2" ] || [ "$num_args" -eq "3" ] ; then
+ action="promote"
+ else
+ echo "ERROR: Invalid number of arguments: $#"
+ usage
+ fi ;;
+ i?(nspect))
+ action="inspect"
+ if [ "$num_args" -ne "2" ] ; then
+ echo "ERROR: Invalid number of arguments: $#"
+ usage
+ fi ;;
+ *)
+ echo "ERROR: Invalid option '$1'!"
+ usage ;;
+esac
+shift
+docker login >& /dev/null
+
+# Update local tags
+tags_to_push=""
+for image in "$@" ; do
+ parse_image_name "$image"
+ verify_image_name "$image"
+ get_all_tags_from_dockerhub
+ get_image_id_tags
+ if [ "$action" = "promote" ] ; then
+ if [ -n "$image_name_new" ] ; then
+ promote_new_image
+ else
+ echo "ERROR: No new image specified to promote!"
+ usage
+ fi
+ elif [ "$action" = "revert" ] ; then
+ if [ "$image_version" = "prod" ] ; then
+ revert_prod_image
+ else
+ echo "ERROR: Non-production image '$image' specified!"
+ usage
+ fi
+ else
+ if [ "$image_version" = "prod" ] ; then
+ inspect_images ""
+ else
+ echo "ERROR: Non-production image '$image' specified!"
+ usage
+ fi
+ fi
+done