diff options
Diffstat (limited to 'docker/scripts')
-rw-r--r-- | docker/scripts/README.md | 271 | ||||
-rwxr-xr-x | docker/scripts/build_executor_docker_image.sh | 151 | ||||
-rwxr-xr-x | docker/scripts/dbld_csit_find_ansible_packages.py | 91 | ||||
-rwxr-xr-x | docker/scripts/dbld_csit_install_packages.sh | 60 | ||||
-rwxr-xr-x | docker/scripts/dbld_dump_build_logs.sh | 55 | ||||
-rwxr-xr-x | docker/scripts/dbld_lfit_requirements.sh | 58 | ||||
-rwxr-xr-x | docker/scripts/dbld_vpp_install_packages.sh | 79 | ||||
-rw-r--r-- | docker/scripts/lib_apt.sh | 352 | ||||
-rw-r--r-- | docker/scripts/lib_common.sh | 271 | ||||
-rw-r--r-- | docker/scripts/lib_csit.sh | 390 | ||||
-rw-r--r-- | docker/scripts/lib_vpp.sh | 102 | ||||
-rwxr-xr-x | docker/scripts/update_dockerhub_prod_tags.sh | 402 |
12 files changed, 2282 insertions, 0 deletions
diff --git a/docker/scripts/README.md b/docker/scripts/README.md new file mode 100644 index 000000000..5fb4cee40 --- /dev/null +++ b/docker/scripts/README.md @@ -0,0 +1,271 @@ +# Automated Building Of FD.io CI Executor Docker Images + +This collection of bash scripts and libraries is used to automate the process +of building FD.io docker 'builder' images (aka Nomad executors). The goal is to +create a completely automated CI/CD pipeline. The bash code is designed to be +run in a regular Linux bash shell in order to bootstrap the CI/CD pipeline +as well as in a docker 'builder' image started by a ci-management jenkins job. +The Dockerfile is generated prior to executing 'docker build' based on the os +parameter specified. The project git repos are also copied into the docker +container and retained for optimization of git object retrieval by the Jenkins +jobs running the CI/CD tasks. + +## Image Builder Algorithm + +The general algorithm to automate the generation of the docker images such that +the downloadable requirements for each project are pre-installed or cached in +the executor image is as follows: + +1. Run the docker image builder on a host of the target architecture. Bootstrap + images will be built 'by hand' on target hosts until such a time when the + CI is capable of executing the docker image builder scripts inside docker + images running on Nomad instances via jenkins jobs. + +2. For each OS package manager, there is a bash function which generates the + Dockerfile for the specified OS which uses said package manager. For example, + lib_apt.sh contains 'generate_apt_dockerfile()' which is executed for Ubuntu + and debian OS's. + +3. The Dockerfiles contain the following sections: + - a. Environment setup and copying of project workspace git repos + - b. Installation of OS package pre-requisites + - c. Docker install and project requirements installation (more on this below) + - d. Working environment setup + - e. Build cleanup + +4. The Project installation section (c.) above is where all of the packages + for each of the supported project branches are installed or cached to + save time and bandwidth when the CI jobs are run. Each project script + defines the branches supported for each OS and iterates over them from + oldest to newest using the dependency and requirements files or build + targets in each supported project branch. + +5. `docker build` is run on the generated Dockerfile. + +## Bash Libraries (lib_*.sh) + +The bash libraries are designed to be sourced both inside of the docker build +environment (e.g. from a script invoked in a Dockerfile RUN statement) as well +as in a normal Linux shell. These scripts create environment variables and +bash functions for use by the operational scripts. + +- `lib_apt.sh`: Dockerfile generation functions for apt package manager. + +- `lib_common.sh`: Common utility functions and environment variables + +- `lib_csit.sh`: CSIT specific functions and environment variables + +- `lib_vpp.sh`: VPP specific functions and environment variables + + +## Bash Scripts + +There are two types of bash scripts, those intended to be run solely inside +the docker build execution environment, the other run either inside or +outside of it. + +### Docker Build (dbld_*.sh) Scripts + +These scripts run inside the 'docker build' environment are either per-project +scripts that install OS and python packages or scripts that install other docker +image runtime requirements. + +Python packages are not retained because they are typically installed in virtual +environments. However installing the python packages in the Docker Build scripts +populates the pip/http caches. Therefore packages are installed from the cache +files during CI job execution instead of being downloaded from the Internet. + +- `dbld_csit_find_ansible_packages.sh`: Script to find OS packages installed by +CSIT using ansible. + +- `dbld_csit_install_packages.sh`: Install OS and python packages for CSIT +branches + +- `dbld_dump_build_logs.sh`: Find warnings/errors in the build logs and dump +the build_executor_docker_image.sh execution log. + +- `dbld_install_docker.sh`: Install docker ce + +- `dbld_lfit_requirements.sh`: Install requirements for LFIT global-jjb +macros / scripts + +- `dbld_vpp_install_packages.sh`: Install OS and python packages for VPP +branches + +### Executor Docker Image Management Bash Scripts + +These scripts are used to build executor docker images, inspect the results, and +manage the docker image tags in the Docker Hub fdiotools repositories. + +- `build_executor_docker_image.sh`: Build script to create one or more executor +docker images. + +- `update_dockerhub_prod_tags.sh`: Inspect/promote/revert production docker tag +in the Docker Hub fdiotools repositories. + +## Running The Scripts + +### Bootstrapping The Builder Images + +The following commands are useful to build the initial builder images: + +`cd <ci-managment repository directory>` + +`sudo ./docker/scripts/build_executor_docker_image.sh ubuntu-20.04 2>&1 | tee u2004-$(uname -m).log | grep -ve '^+'` + +`sudo ./docker/scripts/build_executor_docker_image.sh -apr sandbox 2>&1 | tee all-sandbox-$(uname -m).log | grep -ve '^+'` + +Note: The initial population of a Docker Hub repository is performed manually by +tagging and pushing the verified sandbox image as 'prod-<arch>' and +'prod-prev-<arch>' as the update_dockerhub_prod_tags.sh script assumes that +both labels exist in the repo. After the intial images have been pushed to the +Docker Hub respository, the update script is used to prevent inadvertently +applying the wrong tags to images in the repository. + +### Building in a Builder Image + +By running the docker image with docker socket mounted in the container, +the docker build environment runs on the host's docker daemon. This +avoids the pitfalls encountered with Docker-In-Docker environments: + +`sudo docker run -it -v /var/run/docker.sock:/var/run/docker.sock <docker-image>` + +The environment in the docker shell contains all of the necessary +environment variable definitions so the docker scripts can be run +directly on the cli. Here is an example command that would be used in a CI job +which automates the generation and testing of a new ubuntu-20.04 docker image +and push it to Docker Hub fdiotools/builder-ubuntu2004:test-<arch>: + +`build_executor_docker_image.sh -pr test ubuntu-20.04` + +In the future, a fully automated CI/CD pipeline may be created for production +docker images. + +# Docker Image Script Workflow + +This section describes the current workflow used for managing the CI/CD pipeline +for the Docker Images used by the FD.io CI Jobs. + +Note: all operations that push images or image tags to Docker Hub require an +account with management privileges of the fdiotools repositories. + +## Update Production Docker Images + +Note: Presently only the 'builder' class executor docker images are supported. +The others will be supported in the near future. + +### Build Docker Images and Push to Docker Hub with Sandbox CI Tag + +For each hardware architecture, the build_executor_docker_image.sh script is +used to build all variants of the each executor class: + +1. `git clone https://gerrit.fd.io/r/ci-management && cd ci-management` + +2. `sudo ./docker/scripts/build_executor_docker_image.sh -p -r sandbox -a | tee builder-all-sandbox-$(uname -m).log | grep -ve '^+'`` + +3. `Inspect the build log for Errors and other build anomalies` + +This step will take a very long time so best to do it overnight. There is not +currently an option to automatically run builds in parallel, so if optimizing +build times is important, then run the jobs in separate shells for each OS. +The aarch64 builds are particularly slow, thus may benefit from being run on +separate hosts in parallel. + +Note: the 'prod' role is disallowed in the build script to prevent accidental +deployment of untested docker images to production. + +### Test Docker Images in the Jenkins Sandbox + +In the future, this step will be automated using the role 'test' and associated +tags, but for now testing is a manual operation. + +1. `git clone https://gerrit.fd.io/r/vpp ../vpp && source ../vpp/extras/bash/functions.sh` + +2. Edit jjb/vpp/vpp.yam (or other project yaml file) and replace '-prod-' with '-sandbox-' for all of the docker image + +3. `jjb-sandbox-env` + +4. For each job using one of the docker images: + + a. `jjsb-update <job name(s)>` # bash function created by jjb-sandbox-env to + push job to the sandbox + + b. manually run the job in https://jenkins.fd.io/sandbox + + c. Inspect the console output of each job for unnecessary downloads & errors. + +### Promote Docker Images to Production + +Once all of the docker images have been tested, promote each one to production: + +`sudo ./docker/scripts/update_dockerhub_prod_tags.sh promote <image name>` + +Note: this script currently requires human acceptance via the terminal to ensure +correctness. +It pulls all tags from the Docker Hub repos, does an Inspect action (displaying +the current state of 'prod' & 'prod-prev' tags) and local Promotion action (i.e. +tags local images with 'prod-<arch>' and 'prod-prev-<arch>') with a required +confirmation to continue the promotion by pushing the tags to Docker Hub. If +'no' is specified, it restores the previous local tags so they match the state +of Docker Hub and does a new Inspect action for verification. If 'yes' is +specified, it prints out the command to use to restore the existing state of the +production tags on Docker Hub in case the script is terminated prior to +completion. If necessary, the restore command can be repeated multiple times +until it completes successfully since it promotes the 'prod-prev-<arch>' image, +then the 'prod-<arch>' image in succession. + +## Other Docker Hub Operations + +### Inspect Production Docker Image Tags + +Inspect the current production docker image tags: + +`sudo ./docker/scripts/update_dockerhub_prod_tags.sh inspect fdiotools/<class>-<os name>:prod-$(uname -m)` + +### Revert Production Docker Image To Previous Docker Image + +Inspect the current production docker image tags: + +`sudo ./docker/scripts/update_dockerhub_prod_tags.sh revert fdiotools/<class>-<os name>:prod-$(uname -m)` + +### Restoring Previous Production Image State + +Assuming that the images still exist in the Docker Hub repository, any previous +state of the production image tags can be restored by executing the 'restore +command' as output by the build_executor_docker_image.sh script. This script +writes a copy of all of the terminal output to a log file in +/tmp/build_executor_docker_image.sh.<date>.log thus providing a history of the +restore commands. When the building of executor docker images is peformed by a +CI job, the logging can be removed since the job execution will be captured in +the Jenkins console output log. + +### Docker Image Garbage Collection + +Presently, cleaning up the Docker Hub repositories of old images/tags is a +manual process using the Docker Hub WebUI. In the future, a garbage collection +script will be written to automate the process. + +# DockerHub Repository & Docker Image Tag Nomenclature: + +## DockerHub Repositories + +- fdiotools/builder-debian11 +- fdiotools/builder-ubuntu2004 +- fdiotools/builder-ubuntu2204 +- fdiotools/csit_dut-ubuntu2004 +- fdiotools/csit_shim-ubuntu2004 + +## Docker Image Tags + +- prod-x86_64: Tag used to select the x86_64 production image by the associated +Jenkins-Nomad Label. +- prod-prev-x86_64: Tag of the previous x86_64 production image used to revert +a production image to the previous image used in production. +- prod-aarch64: Tag used to select the aarch64 production image by the +associated Jenkins-Nomad Label. +- prod-prev-aarch64 Tag of the previous aarch64 production image used to revert +a production image to the previous image used in production. +- sandbox-x86_64: Tag used to select the x86_64 sandbox image by the associated +Jenkins-Nomad Label. +- sandbox-aarch64: Tag used to select the aarch64 sandbox image by the +associated Jenkins-Nomad Label. diff --git a/docker/scripts/build_executor_docker_image.sh b/docker/scripts/build_executor_docker_image.sh new file mode 100755 index 000000000..7a731956e --- /dev/null +++ b/docker/scripts/build_executor_docker_image.sh @@ -0,0 +1,151 @@ +#! /bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +# Log all output to stdout & stderr to a log file +export DOCKER_DATE=${DOCKER_DATE:-"$(date -u +%Y_%m_%d_%H%M%S_UTC)"} +logname="/tmp/$(basename $0).${DOCKER_DATE}.log" +echo -e "\n*** Logging output to $logname ***\n\n" +exec > >(tee -a $logname) 2>&1 + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh" + +all_os_names="" +ci_tag="" +ci_image="" +os_names="" +push_to_docker_hub="" +dump_dockerfile="" + +usage() { + set +x + echo + echo "Usage: $0 [-c <class>] [-p] [-r <role>] -a | <os name> [... <os name>]" + echo " -a Run all OS's supported on class $EXECUTOR_CLASS & arch $OS_ARCH" + echo " -c <class> Default is '$EXECUTOR_DEFAULT_CLASS'" + executor_list_classes + echo " -d Generate Dockerfile, dump it to stdout, and exit" + echo " -p Push docker images to Docker Hub" + echo " -r <role> Add a role based tag (e.g. sandbox-x86_64):" + executor_list_roles + executor_list_os_names + exit 1 +} + +must_be_run_as_root_or_docker_group +while getopts ":ac:dhpr:" opt; do + case "$opt" in + a) all_os_names="1" ;; + c) if executor_verify_class "$OPTARG" ; then + EXECUTOR_CLASS="$OPTARG" + EXECUTOR_CLASS_ARCH="$EXECUTOR_CLASS-$OS_ARCH" + else + echo "ERROR: Invalid executor class '$OPTARG'!" + usage + fi ;; + d) dump_dockerfile="1"; set +x ;; + h) usage ;; + p) push_to_docker_hub="1" ;; + r) if executor_verify_role "$OPTARG" ; then + ci_tag="${OPTARG}-$OS_ARCH" + else + echo "ERROR: Invalid executor role: '$OPTARG'!" + usage + fi ;; + \?) + echo "ERROR: Invalid option: -$OPTARG" >&2 + usage ;; + :) + echo "ERROR: Option -$OPTARG requires an argument." >&2 + usage ;; + esac +done +shift $(( $OPTIND-1 )) + +if [ -n "$all_os_names" ] ; then + os_names="${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]}" +else + os_names="$@" +fi + +# Validate arguments +if [ -z "$os_names" ] ; then + echo "ERROR: Missing executor OS name(s) for class '$EXECUTOR_CLASS'!" + usage +fi +for executor_os_name in $os_names ; do + if ! executor_verify_os_name "$executor_os_name" ; then + set_opts="$-" + set +x # disable trace output + echo "ERROR: Invalid executor OS name for class '$EXECUTOR_CLASS': $executor_os_name!" + executor_list_os_names + echo + exit 1 + fi +done + +# Build the specified docker images +docker_build_setup_ciman +docker_build_setup_vpp +docker_build_setup_csit +for executor_os_name in $os_names ; do + docker_from_image="$(echo $executor_os_name | sed -e 's/-/:/')" + # Remove '-' and '.' from executor_os_name in Docker Hub repo name + os_name="${executor_os_name//-}" + repository="fdiotools/${EXECUTOR_CLASS}-${os_name//.}" + executor_docker_image="$repository:$DOCKER_TAG" + + case "$executor_os_name" in + ubuntu*) + generate_apt_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \ + "$docker_from_image" "$executor_docker_image" ;; + debian*) + generate_apt_dockerfile "$EXECUTOR_CLASS" "$executor_os_name" \ + "$docker_from_image" "$executor_docker_image" ;; + *) + echo "ERROR: Don't know how to generate dockerfile for OS $executor_os_name!" + usage ;; + esac + + if [ -n "$dump_dockerfile" ] ; then + line="===========================================================================" + echo -e "\nDockerfile for '$EXECUTOR_CLASS' executor docker image on OS '$executor_os_name':\n$line" + cat "$DOCKERFILE" + echo -e "$line\n" + else + docker build -t "$executor_docker_image" "$DOCKER_BUILD_DIR" + rm -f "$DOCKERFILE" + if [ -n "$ci_tag" ] ; then + ci_image="$repository:$ci_tag" + echo -e "\nAdding docker tag $ci_image to $executor_docker_image" + docker tag "$executor_docker_image" "$ci_image" + fi + if [ -n "$push_to_docker_hub" ] ; then + echo -e "\nPushing $executor_docker_image to Docker Hub..." + docker login + docker push "$executor_docker_image" + if [ -n "$ci_image" ] ; then + echo -e "\nPushing $ci_image to Docker Hub..." + docker push "$ci_image" + fi + fi + fi +done + +echo -e "\n$(basename $BASH_SOURCE) COMPLETE\nHave a great day! :D" diff --git a/docker/scripts/dbld_csit_find_ansible_packages.py b/docker/scripts/dbld_csit_find_ansible_packages.py new file mode 100755 index 000000000..52ed4c3b0 --- /dev/null +++ b/docker/scripts/dbld_csit_find_ansible_packages.py @@ -0,0 +1,91 @@ +#! /usr/bin/env python3 + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pprint +import sys +from typing import List +import yaml +import logging + +logging.basicConfig(format='%(message)s') +log = logging.getLogger(__name__) + +def print_yaml_struct(yaml_struct, depth=0): + indent = " " * depth + for k,v in sorted(yaml_struct.items(), key=lambda x: x[0]): + if isinstance(v, dict): + log.warning(f"{indent}{k}") + print_yaml_struct(v, depth+1) + else: + log.warning(f"{indent}{k} {v}") + +class CsitAnsibleYamlStruct: + def __init__(self, **entries): + self.__dict__.update(entries) + +def packages_in_csit_ansible_yaml_file(yamlfile: str, distro, arch) -> list: + with open(yamlfile) as yf: + csit_ansible_yaml = yaml.safe_load(yf) + if csit_ansible_yaml is None: + return "" + cays = CsitAnsibleYamlStruct(**csit_ansible_yaml) + try: + packages = [pkg for pkg in cays.packages_base if type(pkg) is str] + except AttributeError: + return "" + if arch in [*cays.packages_by_arch]: + packages += [pkg for pkg in cays.packages_by_arch[arch] + if type(pkg) is str] + if distro in [*cays.packages_by_distro]: + packages += [pkg for pkg in cays.packages_by_distro[distro] + if type(pkg) is str] + return packages + +def is_csit_ansible_yaml_file(filename: str): + (root,ext) = os.path.splitext(filename) + if ext == '.yaml' \ + and filename.find('csit/') >= 0 \ + and filename.find('ansible/') > 0 \ + and os.path.isfile(filename): + return True + else: + return False + +def main(args: List[str]) -> None: + if len(args) < 1: + log.warning('Must have at least 1 file name') + return + pkg_list = [] + distro = 'ubuntu' + arch = 'x86_64' + + for arg in args: + if arg.lower() == '--ubuntu': + distro = 'ubuntu' + elif arg.lower() == '--x86_64': + arch = 'x86_64' + elif arg.lower() == '--aarch64': + arch = 'aarch64' + elif is_csit_ansible_yaml_file(arg): + pkg_list += packages_in_csit_ansible_yaml_file(arg, distro, arch) + else: + log.warning(f'Invalid CSIT Ansible YAML file: {arg}') + pkg_list = list(set(pkg_list)) + pkg_list.sort() + print(" ".join(pkg_list)) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/docker/scripts/dbld_csit_install_packages.sh b/docker/scripts/dbld_csit_install_packages.sh new file mode 100755 index 000000000..e303ce28e --- /dev/null +++ b/docker/scripts/dbld_csit_install_packages.sh @@ -0,0 +1,60 @@ +#! /bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh" + +must_be_run_in_docker_build + +echo_log + +if ! csit_supported_executor_class "$FDIOTOOLS_EXECUTOR_CLASS" ; then + echo_log "CSIT is not supported on executor class '$FDIOTOOLS_EXECUTOR_CLASS'. Skipping $(basename $0)..." + exit 0 +elif ! csit_supported_os "$OS_NAME" ; then + echo_log "CSIT is not supported on OS '$OS_NAME'. Skipping $(basename $0)..." + exit 0 +else + echo_log "Starting $(basename $0)" +fi + +do_git_config csit +for vpp_branch in ${VPP_BRANCHES[$OS_NAME]} ; do + # Returns checked out branch in csit_branch + csit_checkout_branch_for_vpp "$vpp_branch" + + # Install csit OS packages + csit_install_packages "$csit_branch" + + # Install/cache python packages + csit_install_hugo "$csit_branch" + + # Install/cache python packages + csit_pip_cache "$csit_branch" +done + +# Install csit OS packages +csit_install_packages "master" + +# Install/cache python packages +csit_install_hugo "master" + +# Install/cache python packages +csit_pip_cache "master" + +echo_log -e "Completed $(basename $0)!\n\n==========" diff --git a/docker/scripts/dbld_dump_build_logs.sh b/docker/scripts/dbld_dump_build_logs.sh new file mode 100755 index 000000000..443d538c8 --- /dev/null +++ b/docker/scripts/dbld_dump_build_logs.sh @@ -0,0 +1,55 @@ +#! /bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +export CIMAN_ROOT=${CIMAN_ROOT:-"$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))"} +. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh" + +must_be_run_in_docker_build + +dump_build_logs() { + local set_opts="$-" + set +e # disable exit on errors + + # Find errors + local found="$(grep -nisH error $DOCKER_BUILD_LOG_DIR/*-bld.log)" + if [ -n "$found" ] ; then + echo -e "\nErrors found in build log files:\n$found\n" + else + echo -e "\nNo errors found in build logs\n" + fi + + # Find warnings + found="$(grep -nisH warning $DOCKER_BUILD_LOG_DIR/*-bld.log)" + if [ -n "$found" ] ; then + echo -e "\nWarnings found in build log files:\n$found\n" + else + echo -e "\nNo warnings found in build logs\n" + fi + + grep -q e <<< "$set_opts" && set -e # re-enable exit on errors +} + +dump_cache_files() { + local cache_files_log="$DOCKER_BUILD_LOG_DIR/cached_files.json" + tree -a --timefmt "+%Y-%m-%d %H:%M:%S" --prune /root + tree -afJ --timefmt "+%Y-%m-%d %H:%M:%S" --prune -o "$cache_files_log" /root +} + +dump_cache_files +dump_build_logs +dump_echo_log diff --git a/docker/scripts/dbld_lfit_requirements.sh b/docker/scripts/dbld_lfit_requirements.sh new file mode 100755 index 000000000..ca54edd89 --- /dev/null +++ b/docker/scripts/dbld_lfit_requirements.sh @@ -0,0 +1,58 @@ +#! /bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh" + +must_be_run_in_docker_build + +# Add packagecloud files +cat <<EOF >/root/.packagecloud +{"url":"https://packagecloud.io","token":"\$token"} +EOF +cat <<EOF >/root/packagecloud_api +machine packagecloud.io +login \$pclogin +password +EOF + +# Copy lf-env.sh for LF Releng scripts +lf_env_sh="/root/lf-env.sh" +cp "$DOCKER_CIMAN_ROOT/global-jjb/jenkins-init-scripts/lf-env.sh" "$lf_env_sh" +chmod 644 "$lf_env_sh" +cat <<EOF >>"$lf_env_sh" + +# When running in CI docker image, use the pre-installed venv +# instead of installing python packages every job run. +# +unset -f lf-activate-venv +lf-activate-venv() { + echo "\${FUNCNAME[0]}(): INFO: Adding $LF_VENV/bin to PATH" + PATH="\$LF_VENV/bin:\$PATH" + return 0 +} +EOF + +# Install lftools & boto3 for log / artifact upload. +python3 -m pip install boto3 +mkdir -p "$LF_VENV" +OLD_PATH="$PATH" +python3 -m venv "$LF_VENV" +PATH="$LF_VENV/bin:$PATH" +python3 -m pip install pip --upgrade +python3 -m pip install --upgrade --upgrade-strategy eager lftools +PATH="$OLD_PATH" diff --git a/docker/scripts/dbld_vpp_install_packages.sh b/docker/scripts/dbld_vpp_install_packages.sh new file mode 100755 index 000000000..5e1e0f8f0 --- /dev/null +++ b/docker/scripts/dbld_vpp_install_packages.sh @@ -0,0 +1,79 @@ +#! /bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euxo pipefail + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_vpp.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh" + +must_be_run_in_docker_build + +echo_log +if ! vpp_supported_executor_class "$FDIOTOOLS_EXECUTOR_CLASS" ; then + echo_log "VPP is not supported on executor class '$FDIOTOOLS_EXECUTOR_CLASS'. Skipping $(basename $0)..." + exit 0 +else + echo_log "Starting $(basename $0)" +fi + +do_git_config vpp +for branch in ${VPP_BRANCHES[$OS_NAME]} ; do + do_git_branch "$branch" + + # Install OS packages + make_vpp "install-dep" "$branch" + + # Download, build, and cache external deps packages + make_vpp "install-ext-deps" "$branch" + vpp_ext_dir="$DOCKER_VPP_DIR/build/external" + rsync -ac $vpp_ext_dir/downloads/. $DOCKER_DOWNLOADS_DIR || true + if which apt >/dev/null ; then + vpp_ext_deps_pkg=$vpp_ext_dir/$(dpkg -l vpp-ext-deps 2>/dev/null | mawk '/vpp-ext-deps/{print $2"_"$3"_"$4".deb"}') + else + echo "ERROR: Package Manager not installed!" + exit 1 + fi + if [ -f "$vpp_ext_deps_pkg" ] ; then + cp -f $vpp_ext_deps_pkg $DOCKER_DOWNLOADS_DIR + else + echo "ERROR: Missing VPP external deps package: '$vpp_ext_deps_pkg'" + exit 1 + fi + # TODO: remove this after all supported VPP branches have removed + # python3-virtualenv & virtualenv from install-deps which are no longer + # used in vpp repo. These packages can mess up csit virtualenv + # installation which uses pip3 + sudo apt remove -y --purge --autoremove python3-virtualenv virtualenv || true + + # Install/cache python packages + make_vpp_test "test-dep" "$branch" + if [ "$OS_ID" = "ubuntu" ] ; then + make_vpp test-wipe "$branch" + fi + # Clean up virtual environment + git checkout -q -- . + git clean -qfdx + + # Dump packages installed + case "$DOCKERFILE_FROM" in + *ubuntu*) + dump_apt_package_list "$branch" ;; + *debian*) + dump_apt_package_list "$branch" ;; + esac +done + +echo_log -e "Completed $(basename $0)!\n\n==========" diff --git a/docker/scripts/lib_apt.sh b/docker/scripts/lib_apt.sh new file mode 100644 index 000000000..6cf37ae76 --- /dev/null +++ b/docker/scripts/lib_apt.sh @@ -0,0 +1,352 @@ +# lib_apt.sh - Docker build script apt library. +# For import only. + +# Copyright (c) 2023 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Don't import more than once. +if [ -n "$(alias lib_apt_imported 2> /dev/null)" ] ; then + return 0 +fi +alias lib_apt_imported=true + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_csit.sh" + +dump_apt_package_list() { + branchname="$(echo $branch | sed -e 's,/,_,')" + dpkg -l > \ + "$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME-$branchname-apt-packages.log" +} + +apt_install_packages() { + apt-get install -y --allow-downgrades --allow-remove-essential \ + --allow-change-held-packages $@ +} + +generate_apt_dockerfile_common() { + local executor_class="$1" + local executor_image="$2" + local dpkg_arch="$(dpkg --print-architecture)" + + cat <<EOF >>"$DOCKERFILE" + +# Create download dir to cache external tarballs +WORKDIR $DOCKER_DOWNLOADS_DIR + +# Copy-in temporary build tree containing +# ci-management, vpp, & csit git repos +WORKDIR $DOCKER_BUILD_DIR +COPY . . + +# Build Environment Variables +ENV DEBIAN_FRONTEND="noninteractive" +ENV FDIOTOOLS_IMAGE="$executor_image" +ENV FDIOTOOLS_EXECUTOR_CLASS="$executor_class" +ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT" +ENV PATH="\$PATH:$DOCKER_CIMAN_ROOT/docker/scripts" + +# Configure locales +RUN apt-get update -qq \\ + && apt-get install -y \\ + apt-utils \\ + locales \\ + && sed -i 's/# \(en_US\.UTF-8 .*\)/\1/' /etc/locale.gen \\ + && locale-gen en_US.UTF-8 \\ + && dpkg-reconfigure --frontend=noninteractive locales \\ + && update-locale LANG=en_US.UTF-8 \\ + && TZ=Etc/UTC && ln -snf /usr/share/zoneinfo/\$TZ /etc/localtime && echo \$TZ > /etc/timezone \\ + && rm -r /var/lib/apt/lists/* +ENV LANG="en_US.UTF-8" LANGUAGE="en_US" LC_ALL="en_US.UTF-8" + +# Install baseline packages (minimum build & utils). +# +# ci-management global-jjb requirements: +# facter +# python3-pip +# python3-venv +# for lftools: +# xmlstarlet +# libxml2-dev +# libxslt-dev +# from packer/provision/baseline.sh: +# unzip +# xz-utils +# git +# git-review +# libxml2-dev +# libxml-xpath-perl +# libxslt-dev +# make +# wget +# jq +# +# Python build from source requirements: +# build-essential +# +# TODO: Fix broken project requirement install targets +# graphviz for doxygen (HICN) +# doxygen for doxygen (HICN) +# libffi-dev for python cffi install (Ubuntu20.04/VPP/aarch64) +# liblapack-dev for python numpy/scipy (CSIT/aarch64) +# libopenblas-dev for python numpy/scipy (CSIT/aarch64) +# libpcap-dev for python pypcap install (CSIT) +# sshpass for CSIT jobs +# +RUN apt-get update -qq \\ + && apt-get install -y \\ + apt-transport-https \\ + curl \\ + ca-certificates \\ + default-jdk \\ + default-jre \\ + dnsutils \\ + doxygen \\ + emacs \\ + facter \\ + gawk \\ + gdb \\ + gfortran \\ + git \\ + git-review \\ + gnupg-agent \\ + graphviz \\ + iproute2 \\ + iputils-clockdiff \\ + iputils-ping \\ + iputils-tracepath \\ + jq \\ + libffi-dev \\ + liblapack-dev \\ + libopenblas-dev \\ + libpcap-dev \\ + libxml-xpath-perl \\ + make \\ + python3-pip \\ + python3-venv \\ + rsync \\ + ruby-dev \\ + software-properties-common \\ + sshpass \\ + sysstat \\ + sudo \\ + traceroute \\ + tree \\ + vim \\ + wget \\ + xmlstarlet \\ + xz-utils \\ + && curl -L https://packagecloud.io/fdio/master/gpgkey | apt-key add - \\ + && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | bash \\ + && rm -r /var/lib/apt/lists/* + +# Install terraform for CSIT +# +RUN wget https://releases.hashicorp.com/terraform/1.7.3/terraform_1.7.3_linux_$dpkg_arch.zip \\ + && unzip terraform_1.7.3_linux_$dpkg_arch.zip \\ + && mv terraform /usr/bin \\ + && rm -f terraform_1.7.3_linux_$dpkg_arch.zip + +# Install packages for all project branches +# +RUN apt-get update -qq \\ + && dbld_vpp_install_packages.sh \\ + && dbld_csit_install_packages.sh \\ + && rm -r /var/lib/apt/lists/* +EOF +} + +generate_apt_dockerfile_clean() { + cat <<EOF >>"$DOCKERFILE" + +# Clean up copy-in build tree +RUN dbld_dump_build_logs.sh \\ + && rm -rf "/tmp/*" "$DOCKER_BUILD_FILES_DIR" "/root/.ccache" +EOF +} + +# Generate 'builder' class apt dockerfile +builder_generate_apt_dockerfile() { + local executor_class="$1" + local executor_os_name="$2" + local executor_image="$3" + local vpp_install_skip_sysctl_envvar=""; + + generate_apt_dockerfile_common $executor_class $executor_image + csit_builder_generate_docker_build_files + cat <<EOF >>"$DOCKERFILE" + +# Install LF-IT requirements +ENV LF_VENV="/root/lf-venv" +RUN apt-get update -qq \\ + && dbld_lfit_requirements.sh \\ + && rm -r /var/lib/apt/lists/* + +# Install packagecloud requirements +RUN gem install rake package_cloud \\ + && curl -s https://packagecloud.io/install/repositories/fdio/master/script.deb.sh | bash + +# Install CSIT ssh requirements +# TODO: Verify why badkey is required & figure out how to avoid it. +COPY files/badkey /root/.ssh/id_rsa +COPY files/sshconfig /root/.ssh/config + +# CI Runtime Environment +WORKDIR / +$vpp_install_skip_sysctl_envvar +ENV VPP_ZOMBIE_NOCHECK="1" +ENV CCACHE_DIR="/scratch/ccache" +ENV CCACHE_MAXSIZE="10G" +EOF + generate_apt_dockerfile_clean +} + +# Generate 'csit_dut' class apt dockerfile +csit_dut_generate_apt_dockerfile() { + local executor_class="$1" + local executor_os_name="$2" + local executor_image="$3" + + csit_dut_generate_docker_build_files + generate_apt_dockerfile_common "$executor_class" "$executor_image" + cat <<EOF >>"$DOCKERFILE" + +# Install csit_dut specific packages +RUN apt-get update -qq \\ + && apt-get install -y \\ + net-tools \\ + openssh-server \\ + pciutils \\ + rsyslog \\ + supervisor \\ + && rm -r /var/lib/apt/lists/* + +# Fix permissions +RUN chown root:syslog /var/log \\ + && chmod 755 /etc/default + +# Create directory structure +RUN mkdir -p /var/run/sshd + +# SSH settings +RUN echo 'root:Csit1234' | chpasswd \\ + && sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config \\ + && sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd + +EXPOSE 2222 + +COPY files/supervisord.conf /etc/supervisor/supervisord.conf + +CMD ["sh", "-c", "rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api; /usr/bin/supervisord -c /etc/supervisor/supervisord.conf; /usr/sbin/sshd -D -p 2222"] +EOF + generate_apt_dockerfile_clean +} + +# Generate 'csit_shim' class apt dockerfile +csit_shim_generate_apt_dockerfile() { + local executor_class="$1" + local executor_os_name="$2" + local executor_image="$3" + + csit_shim_generate_docker_build_files + cat <<EOF >>"$DOCKERFILE" + +# Copy-in temporary build tree containing +# ci-management, vpp, & csit git repos +WORKDIR $DOCKER_BUILD_DIR +COPY . . + +# Build Environment Variables +ENV DEBIAN_FRONTEND="noninteractive" +ENV FDIOTOOLS_IMAGE="$executor_image" +ENV FDIOTOOLS_EXECUTOR_CLASS="$executor_class" +ENV CIMAN_ROOT="$DOCKER_CIMAN_ROOT" +ENV PATH="\$PATH:$DOCKER_CIMAN_ROOT/docker/scripts" + +# Configure locales & timezone +RUN apt-get update -qq \\ + && apt-get install -y \\ + apt-utils \\ + locales \\ + && sed -i 's/# \(en_US\.UTF-8 .*\)/\1/' /etc/locale.gen \\ + && locale-gen en_US.UTF-8 \\ + && dpkg-reconfigure --frontend=noninteractive locales \\ + && update-locale LANG=en_US.UTF-8 \\ + && TZ=Etc/UTC && ln -snf /usr/share/zoneinfo/\$TZ /etc/localtime && echo \$TZ > /etc/timezone \\ + && rm -r /var/lib/apt/lists/* +ENV LANG=en_US.UTF-8 LANGUAGE=en_US LC_ALL=en_US.UTF-8 + +COPY files/wrapdocker /usr/local/bin/wrapdocker +RUN chmod +x /usr/local/bin/wrapdocker + +# Install packages and Docker +RUN apt-get update -qq \\ + && apt-get install -y \\ + bash \\ + curl \\ + iproute2 \\ + locales \\ + ssh \\ + sudo \\ + tzdata \\ + uuid-runtime \\ + && curl -fsSL https://get.docker.com | sh \\ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir /var/run/sshd +RUN echo 'root:Csit1234' | chpasswd +RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config + +# SSH login fix. Otherwise user is kicked off after login +RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd + +# Need volume for sidecar docker launches +VOLUME /var/lib/docker + +# SSH to listen on port 6022 in shim +RUN echo 'Port 6022' >>/etc/ssh/sshd_config +RUN echo 'Port 6023' >>/etc/ssh/sshd_config + +# TODO: Verify why badkeypub is required & figure out how to avoid it. +COPY files/badkeypub /root/.ssh/authorized_keys +COPY files/sshconfig /root/.ssh/config + +# Clean up copy-in build tree +RUN rm -rf /tmp/* $DOCKER_BUILD_FILES_DIR + +# Start sshd by default +EXPOSE 22 +CMD ["/usr/sbin/sshd", "-D"] +EOF +} + +generate_apt_dockerfile() { + local executor_class="$1" + local executor_os_name="$2" + local from_image="$3" + local executor_image="$4" + + cat <<EOF >"$DOCKERIGNOREFILE" +**/__pycache__ +*.pyc +EOF + cat <<EOF >"$DOCKERFILE" +FROM $from_image AS ${executor_class}-executor-image +LABEL Description="FD.io CI '$executor_class' executor docker image for $executor_os_name/$OS_ARCH" +LABEL Vendor="fd.io" +LABEL Version="$DOCKER_TAG" +EOF + ${executor_class}_generate_apt_dockerfile "$executor_class" \ + "$executor_os_name" "$executor_image" +} diff --git a/docker/scripts/lib_common.sh b/docker/scripts/lib_common.sh new file mode 100644 index 000000000..150441bb7 --- /dev/null +++ b/docker/scripts/lib_common.sh @@ -0,0 +1,271 @@ +# lib_common.sh - Docker build script common library. +# For import only. + +# Copyright (c) 2022 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Don't import more than once. +if [ -n "$(alias lib_common_imported 2> /dev/null)" ] ; then + return 0 +fi +alias lib_common_imported="true" + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +export CIMAN_ROOT="$(dirname $(dirname $CIMAN_DOCKER_SCRIPTS))" + +must_be_run_as_root_or_docker_group() { + set_opts="$-" + set +e # disable exit on errors + + # test if the user is root or id is in the 'docker' group + if [ "${EUID:-$(id -u)}" -eq "0" ] || grep -q "docker" <<< "$(id)" ; then + grep -q e <<< "$set_opts" && set -e # re-enable exit on errors + else + set +x + echo -e "\nERROR: Must be run as root or '$USER' must be in the group 'docker'!" + if [ -n "$(declare -f usage)" ] ; then + usage + fi + grep -q e <<< "$set_opts" && set -e # re-enable exit on errors + exit 1 + fi +} + +must_be_run_in_docker_build() { + if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then + set +x + echo -e "\nERROR: $(basename $0) must be run in 'docker build'\n" + exit 1 + fi +} + +echo_log() { + if [ "$#" -eq "0" ] ; then + if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then + echo + else + echo | tee -a "$FDIOTOOLS_IMAGE_BUILD_LOG" 1>&2 + fi + return 0 + fi + + local echo_opts="" + case "$1" in + -[en]) + echo_opts="$1 " + shift + ;; + esac + if [ -z "$(alias running_in_docker_build 2> /dev/null)" ] ; then + echo ${echo_opts}"####> $@" + else + echo ${echo_opts}"####> $(date -u): $@" | tee -a $FDIOTOOLS_IMAGE_BUILD_LOG 1>&2 + fi +} + +dump_echo_log() { + [ -z "$(alias running_in_docker_build 2> /dev/null)" ] && return 0 + echo -e "\n\n####> $(date -u) Build log ($FDIOTOOLS_IMAGE_BUILD_LOG):" + cat "$FDIOTOOLS_IMAGE_BUILD_LOG" +} + +do_git_config() { + if [ "$#" -ne "1" ] ; then + echo_log "ERROR: do_git_config(): Invalid number of arguments ($#)!" + return 1 + fi + cd "$DOCKER_BUILD_DIR/$1" + + # Add user to git config so git commands don't fail + local git_config_list="$(git config -l)" + if [ -z "$(grep 'user\.email' <<<$git_config_list)" ] ; then + git config user.email "ci-management-dev@lists.fd.io" + fi + if [ -z "$(grep 'user\.name' <<<$git_config_list)" ] ; then + git config user.name "ci-management" + fi +} + +do_git_branch() { + local branch="$1" + + echo_log " Checking out '$branch' in $(pwd)" + if [ -n "$(git branch | grep $branch)" ] ; then + git checkout "$branch" + else + git checkout -b "$branch" --track "origin/$branch" + fi + git pull -q + echo_log -e " 'git log --oneline | head':\n----- %< -----\n$(git log --oneline | head)\n----- %< -----" +} + +clean_git_repo() { + pushd "$1" >& /dev/null + git clean -qfdx + git checkout -q master + git pull -q + popd >& /dev/null +} + +remove_pyc_files_and_pycache_dirs() { + find . -type f -name '*.pyc' -exec rm -f {} \; 2>/dev/null || true + find . -type d -name __pycache__ -exec echo -n "Removing " \; \ + -print -exec rm -rf {} \; 2>/dev/null || true +} + +# OS type variables +export OS_ID="$(grep '^ID=' /etc/os-release | cut -d= -f2 | sed -e 's/\"//g')" +export OS_VERSION_ID="$(grep '^VERSION_ID=' /etc/os-release | cut -d= -f2 | sed -e 's/\"//g')" +export OS_CODENAME="$(grep 'VERSION_CODENAME=' /etc/os-release | cut -d= -f2)" +export OS_NAME="${OS_ID}-${OS_VERSION_ID}" +export OS_ARCH="$(uname -m)" +case "$OS_ARCH" in + x86_64) + export DEB_ARCH="amd64" + ;; + aarch64) + export DEB_ARCH="arm64" + ;; + *) + echo "ERROR: Unsupported OS architecture '$OS_ARCH'!" + return 1 + ;; +esac + +# Executor attribute variables +# Note: the role 'prod' is only applied and uploaded using the script +# update_dockerhub_prod_tags.sh to avoid accidentally pushing +# an untested docker image into production. +export EXECUTOR_ROLES="sandbox test" +export EXECUTOR_DEFAULT_CLASS="builder" +export EXECUTOR_CLASS=${EXECUTOR_CLASS:-"$EXECUTOR_DEFAULT_CLASS"} +export EXECUTOR_CLASS_ARCH="$EXECUTOR_DEFAULT_CLASS-$OS_ARCH" +export EXECUTOR_CLASSES="$EXECUTOR_DEFAULT_CLASS csit_dut csit_shim" +export EXECUTOR_ARCHS="aarch64 x86_64" +declare -A EXECUTOR_CLASS_ARCH_OS_NAMES +EXECUTOR_CLASS_ARCH_OS_NAMES["builder-aarch64"]="ubuntu-20.04 ubuntu-22.04" +EXECUTOR_CLASS_ARCH_OS_NAMES["builder-x86_64"]="debian-11 ubuntu-20.04 ubuntu-22.04" +EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-aarch64"]="ubuntu-22.04" +EXECUTOR_CLASS_ARCH_OS_NAMES["csit_dut-x86_64"]="ubuntu-22.04" +EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-aarch64"]="ubuntu-22.04" +EXECUTOR_CLASS_ARCH_OS_NAMES["csit_shim-x86_64"]="ubuntu-22.04" +export EXECUTOR_CLASS_ARCH_OS_NAMES + +executor_list_roles() { + local set_opts="$-" + set +u # disable undefined variable check + local indent=${1:-" "} + grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check + + for role in $EXECUTOR_ROLES ; do + echo -e "${indent}$role" + done +} + +executor_verify_role() { + for role in $EXECUTOR_ROLES ; do + if [ "$role" = "$1" ] ; then + return 0 + fi + done + return 1 +} + +executor_list_classes() { + local set_opts="$-" + set +u # disable undefined variable check + local indent=${1:-" "} + grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check + + for class in $EXECUTOR_CLASSES ; do + echo -e "${indent}$class" + done +} + +executor_verify_class() { + for class in $EXECUTOR_CLASSES ; do + if [ "$class" = "$1" ] ; then + return 0 + fi + done + return 1 +} + +executor_list_os_names() { + local set_opts="$-" + set +u # disable undefined variable check + local indent=${1:-" "} + grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check + + echo + echo "Valid executor OS names for class '$EXECUTOR_CLASS':" + for os in ${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]} ; do + echo "${indent}$os" + done | sort +} + +executor_verify_os_name() { + for os in ${EXECUTOR_CLASS_ARCH_OS_NAMES[$EXECUTOR_CLASS_ARCH]} ; do + if [ "$os" = "$1" ] ; then + return 0 + fi + done + return 1 +} + +# Docker variables +export DOCKER_DATE=${DOCKER_DATE:-"$(date -u +%Y_%m_%d_%H%M%S_UTC)"} +export DOCKER_BUILD_DIR="/scratch/docker-build" +export DOCKER_CIMAN_ROOT="$DOCKER_BUILD_DIR/ci-management" +export DOCKERFILE="$DOCKER_BUILD_DIR/Dockerfile" +export DOCKERIGNOREFILE="$DOCKER_BUILD_DIR/.dockerignore" +export DOCKERFILE_FROM=${DOCKERFILE_FROM:="${OS_ID}:${OS_VERSION_ID}"} +export DOCKER_TAG="$DOCKER_DATE-$OS_ARCH" +export DOCKER_VPP_DIR="$DOCKER_BUILD_DIR/vpp" +export DOCKER_CSIT_DIR="$DOCKER_BUILD_DIR/csit" +export DOCKER_DOWNLOADS_DIR="/root/Downloads" +export DOCKER_BUILD_FILES_DIR="$DOCKER_BUILD_DIR/files" + +docker_build_setup_ciman() { + if [ "$(dirname $CIMAN_ROOT)" != "$DOCKER_BUILD_DIR" ] ; then + echo_log "Updating $CIMAN_ROOT git submodules..." + pushd "$CIMAN_ROOT" + git submodule update --init --recursive + popd + if [ -d "$DOCKER_BUILD_DIR" ] ; then + echo_log "Removing existing DOCKER_BUILD_DIR: $DOCKER_BUILD_DIR..." + local sudo_cmd="" + if [ "$(whoami)" != "$(stat -c %U $DOCKER_BUILD_DIR)" ] ; then + sudo_cmd="sudo" + fi + ${sudo_cmd} rm -rf "$DOCKER_BUILD_DIR" + fi + echo_log "Syncing $CIMAN_ROOT into $DOCKER_CIMAN_ROOT..." + mkdir -p "$DOCKER_BUILD_DIR" + rsync -a "$CIMAN_ROOT/." "$DOCKER_CIMAN_ROOT" + else + mkdir -p "$DOCKER_BUILD_DIR" + fi +} + +# Variables used in docker build environment +set_opts="$-" +set +u # disable undefined variable check +if [ -n "$FDIOTOOLS_IMAGE" ] ; then + alias running_in_docker_build=true + export DOCKER_BUILD_LOG_DIR="$DOCKER_BUILD_DIR/logs" + export FDIOTOOLS_IMAGENAME="$(echo $FDIOTOOLS_IMAGE | sed -e 's/:/-/' -e 's,/,_,g')" + export FDIOTOOLS_IMAGE_BUILD_LOG="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME.log" + mkdir -p $DOCKER_BUILD_LOG_DIR +fi +grep -q u <<< "$set_opts" && set -u # re-enable undefined variable check diff --git a/docker/scripts/lib_csit.sh b/docker/scripts/lib_csit.sh new file mode 100644 index 000000000..2e877f803 --- /dev/null +++ b/docker/scripts/lib_csit.sh @@ -0,0 +1,390 @@ +# lib_csit.sh - Docker build script CSIT library. +# For import only. + +# Copyright (c) 2023 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Don't import more than once. +if [ -n "$(alias lib_csit_imported 2> /dev/null)" ] ; then + return 0 +fi +alias lib_csit_imported=true + +export CIMAN_DOCKER_SCRIPTS="${CIMAN_DOCKER_SCRIPTS:-$(dirname ${BASH_SOURCE[0]})}" +. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh" +. "$CIMAN_DOCKER_SCRIPTS/lib_apt.sh" + +CSIT_SUPPORTED_EXECUTOR_CLASSES="builder csit_dut" +csit_supported_executor_class() { + if ! grep -q "${1:-}" <<< "$CSIT_SUPPORTED_EXECUTOR_CLASSES" ; then + return 1 + fi + return 0 +} + +csit_supported_os() { + case "$1" in + ubuntu-22.04) return 0 ;; + *) ;; + esac + return 1 +} + +csit_checkout_branch_for_vpp() { + local vpp_branch="$1" + local csit_dir="$DOCKER_CSIT_DIR" + local csit_bash_function_dir="$csit_dir/resources/libraries/bash/function" + + # import checkout_csit_for_vpp() if not defined + set +e && [ -z "$(declare -f checkout_csit_for_vpp)" ] \ + && source "$csit_bash_function_dir/branch.sh" + CSIT_DIR="$csit_dir" checkout_csit_for_vpp "$vpp_branch" + + # shellcheck disable=SC2034,SC2063 + csit_branch="$(git branch | grep -e '^*' | mawk '{print $2}')" +} + +csit_install_packages() { + local branch="$1" + local branchname + branchname="$(echo $branch | sed -e 's,/,_,')" + local csit_dir="$DOCKER_CSIT_DIR" + local csit_ansible_dir="$csit_dir/fdio.infra.ansible" + if [ ! -d "$csit_ansible_dir" ] ; then + csit_ansible_dir="$csit_dir/resources/tools/testbed-setup/ansible" + fi + local bld_log="$DOCKER_BUILD_LOG_DIR/$FDIOTOOLS_IMAGENAME" + bld_log="${bld_log}-$branchname-csit_install_packages-bld.log" + + git clean -qfdx + python3 -m pip install pyyaml + + local exclude_roles="-e calibration -e kernel -e mellanox -e nomad -e consul -e aws -e vpp" + [ "$OS_ARCH" = "aarch64" ] && exclude_roles="$exclude_roles -e iperf" + + # Not in double quotes to let bash remove newline characters + local yaml_files + yaml_files="$(grep -r packages_by $csit_ansible_dir | cut -d: -f1 | sort -u | grep -v $exclude_roles)" + packages="$(dbld_csit_find_ansible_packages.py --$OS_ID --$OS_ARCH $yaml_files)" + packages="${packages/jammy /}" + packages="${packages/focal /}" + packages="${packages/libmbedcrypto1/libmbedcrypto3}" + packages="${packages/libmbedtls10/libmbedtls12}" + packages="$(echo ${packages//python\-/python3\-} | tr ' ' '\n' | sort -u | xargs)" + + if [ -n "$packages" ] ; then + case "$OS_NAME" in + ubuntu*) + apt_install_packages "$packages" 2>&1 | tee -a "$bld_log" + ;; + debian*) + apt_install_packages "$packages" 2>&1 | tee -a "$bld_log" + ;; + *) + echo "Unsupported OS ($OS_ID): CSIT packages NOT INSTALLED!" + ;; + esac + fi +} + +csit_install_hugo() { + local branch="$1" + CSIT_DIR="$DOCKER_CSIT_DIR" + + if [ -f "$CSIT_DIR/VPP_REPO_URL" ] \ + && [ -f "$CSIT_DIR/requirements.txt" ]; then + + local branchname + # use bash variable substitution to replace '/' with '_' to convert from + # vpp to csit branch name nomenclature + branchname="${branch////_}" + local csit_bash_function_dir="$CSIT_DIR/resources/libraries/bash/function" + local bld_log="$DOCKER_BUILD_LOG_DIR" + bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname-csit_install_hugo-bld.log" + + description="Install CSIT hugo packages from $branch branch" + echo_log " Starting $description..." + git clean -qfdx + + source "$csit_bash_function_dir"/hugo.sh + go_install 2>&1 | tee -a "$bld_log" + hugo_install 2>&1 | tee -a "$bld_log" + + else + echo_log "ERROR: Missing or invalid CSIT_DIR: '$CSIT_DIR'!" + return 1 + fi +} + +csit_pip_cache() { + local branch="$1" + # ensure PS1 is defined (used by virtualenv activate script) + PS1=${PS1:-"#"} + CSIT_DIR="$DOCKER_CSIT_DIR" + + if [ -f "$CSIT_DIR/VPP_REPO_URL" ] \ + && [ -f "$CSIT_DIR/requirements.txt" ]; then + + local branchname + # use bash variable substitution to replace '/' with '_' to convert from + # vpp to csit branch name nomenclature + branchname="${branch////_}" + local csit_bash_function_dir="$CSIT_DIR/resources/libraries/bash/function" + local bld_log="$DOCKER_BUILD_LOG_DIR" + bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname-csit_pip_cache-bld.log" + export PYTHONPATH=$CSIT_DIR + + description="Install CSIT python packages from $branch branch" + echo_log " Starting $description..." 2>&1 | tee -a "$bld_log" + git clean -qfdx 2>&1 | tee -a "$bld_log" + rm -rf "$PYTHONPATH/env" + + # Activate / install CSIT python virtualenv ($CSIT_DIR/requirements.txt) + local common_sh="$csit_bash_function_dir/common.sh" + # shellcheck disable=1090 + source "$common_sh" + activate_virtualenv "${CSIT_DIR}" "${CSIT_DIR}/requirements.txt" 2>&1 | tee -a "$bld_log" + + # Install tox python requirements + activate_virtualenv "${CSIT_DIR}" "${CSIT_DIR}/tox-requirements.txt" 2>&1 |\ + tee -a "$bld_log" + + # Clean up virtualenv directories + git checkout -q -- . + git clean -qfdx + echo_log " Completed $description!" 2>&1 | tee -a "$bld_log" + else + echo_log "ERROR: Missing or invalid CSIT_DIR: '$CSIT_DIR'!" + return 1 + fi +} + +docker_build_setup_csit() { + if csit_supported_executor_class "$EXECUTOR_CLASS" ; then + if [ ! -d "$DOCKER_CSIT_DIR" ] ; then + echo_log "Cloning CSIT into $DOCKER_CSIT_DIR..." + git clone -q https://gerrit.fd.io/r/csit "$DOCKER_CSIT_DIR" + fi + clean_git_repo "$DOCKER_CSIT_DIR" + fi +} + +csit_dut_generate_docker_build_files() { + local build_files_dir="$DOCKER_BUILD_FILES_DIR" + + mkdir -p "$build_files_dir" + cat <<EOF >"$build_files_dir/supervisord.conf" +[unix_http_server] +file = /tmp/supervisor.sock +chmod = 0777 + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl = unix:///tmp/supervisor.sock + +[supervisord] +pidfile = /tmp/supervisord.pid +identifier = supervisor +directory = /tmp +logfile = /tmp/supervisord.log +loglevel = debug +nodaemon = false + +[program:vpp] +command = /usr/bin/vpp -c /etc/vpp/startup.conf +autostart = false +autorestart = true +redirect_stderr = true +priority = 1 +EOF +} + +csit_builder_generate_docker_build_files() { + local build_files_dir="$DOCKER_BUILD_FILES_DIR" + local dashes="-----" + local dbeg="${dashes}BEGIN" + local dend="${dashes}END" + local pvt="PRIVATE" + local kd="KEY$dashes" + + # TODO: Verify why badkey is required & figure out how to avoid it. + mkdir -p "$build_files_dir" + cat <<EOF >"$build_files_dir/badkey" +$dbeg RSA $pvt $kd +MIIEowIBAAKCAQEAslDXf4kZOQI8OGQQdIF8o83nBM0B4fzHLYLxxiY2rKiQ5MGM +mQa7p1KKzmd5/NlvFRnXefnjSDQljjPxEY7mh457rX2nXvqHD4GUXZPpBIE73rQ1 +TViIAXdDzFXJ6ee4yX8ewmVakzYBnlUPDidkWyRnjm/xCgKUCO+CD5AH3ND0onks +OYAtHqhDh29/QMIKdMnK87FBxfzhInHwpqPur76zBnpw3u36ylKEymDFrO5dwzsh +QvDWjsYRg9ydTXubtwP6+MOpjdR1SNKxcCHKJrPrdAeJW9jg1imYmYpEHZ/P3qsL +Jm0hGWbFjdxZLIYIz0vN/nTalcAeqT2OWKrXuwIDAQABAoIBAQCcj1g2FOR9ZlYD +WPANqucJVy4/y9OcXHlwnyiyRjj47WOSRdGxRfUa2uEeikHT3ACo8TB8WwfQDGDw +8u/075e+az5xvAJo5OQSnD3sz4Hmv6UWSvkFuPZo+xMe5C/M2/QljiQuoBifaeqP +3rTCQ5ncYCFAMU7b8BmTot551Ybhu2jCbDMHU7nFHEFOvYinkwfVcaqkrVDUuH+D +c3NkAEH9Jz2MEYA2Va4uqFpGt5lfGiED2kMenwPa8eS5LS5HJsxkfMHGlaHXHFUb +D+dG/qJtSslVxdzVPgEGvzswo6TgtY1nZTQcB8U63rktFg38B7QGtOkvswAYzxyk +HdMIiU3RAoGBAOdIEQRcAThj9eiIFywtBgLBOSg4SoOnvELLr6lgUg2+ICmx06LQ +yaai1QRdOWw1VwZ6apNCD00kaUhBu+ou93yLSDnR2uYftkylhcnVuhDyIeNyb81V +hV2z0WuNv3aKBFlBxaq391S7WW1XxhpAAagm8fZZur73wV390EVd/hZJAoGBAMVf +negT2bg5PVKWvsiEU6eZ00W97tlEDLclkiZawXNnM2/c+2x1Tks6Yf1E/j2FFTB4 +r0fesbwN346hCejtq5Bup5YEdFA3KtwT5UyeQQLFGYlCtRmBtOd10wkRS93D0tpX +iIqkf43Gpx6iFdvBWY5A7N+ZmojCy9zpL5TJ4G3jAoGADOGEoRuGrd9TWMoLkFhJ +l2mvhz/rVn3HDGlPtT06FK3cGLZgtRavxGoZNw8CHbayzBeRS/ZH5+H5Qx72GkrX +WcZgFWhMqrhlbMtjMiSHIl556LL86xCyRs+3ACh6211AdMAnBCUOz1dH2cEjtV6P +ORBCNZg1wGEIEfYK3XIorpECgYBubXfQj8KhUs0fdx3Y3Ehdni/ZdlG7F1qx4YBq +mx5e7d+Wd6Hn5Z3fcxO9+yrvypS3YN5YrJzuZSiuCSWdP9RcY7y5r1ZQRv1g0nTZ +MDWZUiNea4cddTd8xKxFB3tV4SkIZi8LustuzDVWa0Mlh4EOmP6uf6c5WxtqRsEL +UwORFwKBgEjZsfmZGBurjOtSrcsteulOB0D2nOqPVRWXmbSNJT/l73DkEllvVyA/ +wdW39nyFrA2Qw1K2F+l8DkzMd/WEjmioSWCsvTkXlvrqPfByKg01zCbYy/mhRW7d +7sQrPOIl8ygsc3JrxmvzibdWmng1MehvpAM1ogWeTUa1lsDTNJ/6 +$dend RSA $pvt $kd +EOF + chmod 600 "$build_files_dir/badkey" + cat <<EOF >"$build_files_dir/sshconfig" +Host 172.17.0.* + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null +EOF +} + +csit_shim_generate_docker_build_files() { + local build_files_dir="$DOCKER_BUILD_FILES_DIR" + # TODO: Verify why badkey is required & figure out how to avoid it. + local badkey='AAAAB3NzaC1yc2EAAAADAQABAAABAQCyUNd/iRk5Ajw4ZBB0gXyjzecEzQHh/MctgvHGJjasqJDkwYyZBrunUorOZ3n82W8VGdd5+eNINCWOM/ERjuaHjnutfade+ocPgZRdk+kEgTvetDVNWIgBd0PMVcnp57jJfx7CZVqTNgGeVQ8OJ2RbJGeOb/EKApQI74IPkAfc0PSieSw5gC0eqEOHb39Awgp0ycrzsUHF/OEicfCmo+6vvrMGenDe7frKUoTKYMWs7l3DOyFC8NaOxhGD3J1Ne5u3A/r4w6mN1HVI0rFwIcoms+t0B4lb2ODWKZiZikQdn8/eqwsmbSEZZsWN3FkshgjPS83+dNqVwB6pPY5Yqte7' + + mkdir -p "$build_files_dir" + # TODO: Verify why badkeypub is required & figure out how to avoid it. + echo "ssh-rsa $badkey ejk@bhima.local" >"$build_files_dir/badkeypub" + + cat <<EOF >"$build_files_dir/sshconfig" +Host 172.17.0.* + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null +EOF + cat <<EOF >"$build_files_dir/wrapdocker" +#!/bin/bash + +# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver +dmsetup mknodes + +# First, make sure that cgroups are mounted correctly. +CGROUP=/sys/fs/cgroup +: {LOG:=stdio} + +[ -d \$CGROUP ] || + mkdir \$CGROUP + +mountpoint -q \$CGROUP || + mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup \$CGROUP || { + echo "Could not make a tmpfs mount. Did you use --privileged?" + exit 1 + } + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security +then + mount -t securityfs none /sys/kernel/security || { + echo "Could not mount /sys/kernel/security." + echo "AppArmor detection and --privileged mode might break." + } +fi + +# Mount the cgroup hierarchies exactly as they are in the parent system. +for SUBSYS in \$(cut -d: -f2 /proc/1/cgroup) +do + [ -d \$CGROUP/\$SUBSYS ] || mkdir \$CGROUP/\$SUBSYS + mountpoint -q \$CGROUP/\$SUBSYS || + mount -n -t cgroup -o \$SUBSYS cgroup \$CGROUP/\$SUBSYS + + # The two following sections address a bug which manifests itself + # by a cryptic "lxc-start: no ns_cgroup option specified" when + # trying to start containers withina container. + # The bug seems to appear when the cgroup hierarchies are not + # mounted on the exact same directories in the host, and in the + # container. + + # Named, control-less cgroups are mounted with "-o name=foo" + # (and appear as such under /proc/<pid>/cgroup) but are usually + # mounted on a directory named "foo" (without the "name=" prefix). + # Systemd and OpenRC (and possibly others) both create such a + # cgroup. To avoid the aforementioned bug, we symlink "foo" to + # "name=foo". This shouldn't have any adverse effect. + echo \$SUBSYS | grep -q ^name= && { + NAME=\$(echo \$SUBSYS | sed s/^name=//) + ln -s \$SUBSYS \$CGROUP/\$NAME + } + + # Likewise, on at least one system, it has been reported that + # systemd would mount the CPU and CPU accounting controllers + # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" + # but on a directory called "cpu,cpuacct" (note the inversion + # in the order of the groups). This tries to work around it. + [ \$SUBSYS = cpuacct,cpu ] && ln -s \$SUBSYS \$CGROUP/cpu,cpuacct +done + +# Note: as I write those lines, the LXC userland tools cannot setup +# a "sub-container" properly if the "devices" cgroup is not in its +# own hierarchy. Let's detect this and issue a warning. +grep -q :devices: /proc/1/cgroup || + echo "WARNING: the 'devices' cgroup should be in its own hierarchy." +grep -qw devices /proc/1/cgroup || + echo "WARNING: it looks like the 'devices' cgroup is not mounted." + +# Now, close extraneous file descriptors. +pushd /proc/self/fd >/dev/null +for FD in * +do + case "\$FD" in + # Keep stdin/stdout/stderr + [012]) + ;; + # Nuke everything else + *) + eval exec "\$FD>&-" + ;; + esac +done +popd >/dev/null + + +# If a pidfile is still around (for example after a container restart), +# delete it so that docker can start. +rm -rf /var/run/docker.pid + +# If we were given a PORT environment variable, start as a simple daemon; +# otherwise, spawn a shell as well +if [ "\$PORT" ] +then + exec dockerd -H 0.0.0.0:\$PORT -H unix:///var/run/docker.sock \ + \$DOCKER_DAEMON_ARGS +else + if [ "\$LOG" == "file" ] + then + dockerd \$DOCKER_DAEMON_ARGS &>/var/log/docker.log & + else + dockerd \$DOCKER_DAEMON_ARGS & + fi + (( timeout = 60 + SECONDS )) + until docker info >/dev/null 2>&1 + do + if (( SECONDS >= timeout )); then + echo 'Timed out trying to connect to internal docker host.' >&2 + break + fi + sleep 1 + done + [[ \$1 ]] && exec "\$@" + exec bash --login +fi +EOF +} diff --git a/docker/scripts/lib_vpp.sh b/docker/scripts/lib_vpp.sh new file mode 100644 index 000000000..39e2ca192 --- /dev/null +++ b/docker/scripts/lib_vpp.sh @@ -0,0 +1,102 @@ +# lib_vpp.sh - Docker build script VPP library. +# For import only. + +# Copyright (c) 2023 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Don't import more than once. +if [ -n "$(alias lib_vpp_imported 2> /dev/null)" ] ; then + return 0 +fi +alias lib_vpp_imported=true + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. $CIMAN_DOCKER_SCRIPTS/lib_common.sh + + +VPP_SUPPORTED_EXECUTOR_CLASSES="builder" +vpp_supported_executor_class() { + if ! grep -q "${1:-}" <<< $VPP_SUPPORTED_EXECUTOR_CLASSES ; then + return 1 + fi + return 0 +} + +make_vpp() { + local target=$1 + local branch=${2:-"master"} + local branchname="$(echo $branch | sed -e 's,/,_,')" + local bld_log="$DOCKER_BUILD_LOG_DIR" + bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname" + bld_log="${bld_log}-make_vpp_${target}-bld.log" + + makefile_target="^${target}:" + if [ -z "$(grep $makefile_target Makefile)" ] ; then + echo "Make target '$target' does not exist in VPP branch '$branch'!" + return + fi + git clean -qfdx + description="'make UNATTENDED=yes $target' in $(pwd) ($branch)" + echo_log -e " Starting $description..." + make UNATTENDED=yes $target 2>&1 | tee -a "$bld_log" + git checkout -q -- . + echo_log " Completed $description!" +} + +make_vpp_test() { + local target=$1 + local branch=${2:-"master"} + local branchname="$(echo $branch | sed -e 's,/,_,')" + local bld_log="$DOCKER_BUILD_LOG_DIR" + bld_log="${bld_log}/$FDIOTOOLS_IMAGENAME-$branchname" + bld_log="${bld_log}-make_vpp_test_${target}-bld.log" + + makefile_target="^${target}:" + if [ -z "$(grep -e $makefile_target test/Makefile)" ] ; then + echo "Make test target '$target' does not exist in VPP branch '$branch'!" + return + fi + git clean -qfdx + description="'make -C test $target' in $(pwd) ($branch)" + echo_log " Starting $description..." + make WS_ROOT="$DOCKER_VPP_DIR" BR="$DOCKER_VPP_DIR/build-root" \ + TEST_DIR="$DOCKER_VPP_DIR/test" -C test $target 2>&1 | tee -a $bld_log + remove_pyc_files_and_pycache_dirs + git checkout -q -- . + echo_log " Completed $description!" +} + +docker_build_setup_vpp() { + if vpp_supported_executor_class "$EXECUTOR_CLASS" ; then + if [ ! -d "$DOCKER_VPP_DIR" ] ; then + echo_log "Cloning VPP into $DOCKER_VPP_DIR..." + git clone -q https://gerrit.fd.io/r/vpp $DOCKER_VPP_DIR + fi + clean_git_repo $DOCKER_VPP_DIR + fi +} + +# Branches must be listed in chronological order -- oldest stable branch +# first and master last. +# +# Note: CI Jobs for each architecture are maintained in +# .../ci-management/jjb/vpp/vpp.yaml +# All OS's and branches are included in the 'os' and 'stream' +# definitions respectively, then the exclude list maintained +# to create an enumerated set of jobs jobs that match the +# definitions here. +declare -A VPP_BRANCHES +VPP_BRANCHES["debian-11"]="stable/2310 stable/2402 master" +VPP_BRANCHES["ubuntu-20.04"]="stable/2310 stable/2402 master" +VPP_BRANCHES["ubuntu-22.04"]="stable/2310 stable/2402 master" +export VPP_BRANCHES diff --git a/docker/scripts/update_dockerhub_prod_tags.sh b/docker/scripts/update_dockerhub_prod_tags.sh new file mode 100755 index 000000000..01dac644c --- /dev/null +++ b/docker/scripts/update_dockerhub_prod_tags.sh @@ -0,0 +1,402 @@ +#! /bin/bash + +# Copyright (c) 2022 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail +shopt -s extglob + +# Log all output to stdout & stderr to a log file +logname="/tmp/$(basename $0).$(date -u +%Y_%m_%d_%H%M%S).log" +echo -e "\n*** Logging output to $logname ***\n" +exec > >(tee -a $logname) 2>&1 + +export CIMAN_DOCKER_SCRIPTS=${CIMAN_DOCKER_SCRIPTS:-"$(dirname $BASH_SOURCE)"} +. "$CIMAN_DOCKER_SCRIPTS/lib_common.sh" + +# Global variables +long_bar="################################################################" +short_bar="-----" +image_not_found="" +image_user="" +image_repo="" +image_version="" +image_arch="" +image_name_prod="" +image_name_prev="" +image_name_new="" +image_realname="" +image_realname_prod="" +image_realname_prev="" +image_tags="" +image_tags_prod="" +image_tags_prev="" +image_tags_new="" +docker_id_prod="" +docker_id_prev="" +docker_id_new="" +digest_prod="" +digest_prev="" +digest_new="" +restore_cmd="" + +usage() { + local script="$(basename $0)" + echo + echo "Usage: $script r[evert] <prod image>" + echo " $script p[romote] <new image> [<new image>]" + echo " $script i[nspect] <prod image>" + echo + echo " revert: swaps 'prod-<arch>' and 'prod-prev-<arch>' images" + echo " <prod image>: e.g. fdiotools/builder-ubuntu2204:prod-x86_64" + echo + echo " promote: moves 'prod-<arch>' image to 'prod-prev-<arch>' tag and" + echo " tags <new image> with 'prod-<arch>'" + echo " <new image>: e.g. fdiotools/builder-ubuntu2204:2022_07_23_151655-x86_64" + echo " inspect: prints out all tags for prod-<arch> and prod-prev-<arch>" + echo + exit 1 +} + +echo_restore_cmd() { + echo -e "\n$long_bar\n" + echo "To restore tags to original state, issue the following command:" + echo -e "\n$restore_cmd\n\n$long_bar\n" +} + +push_to_dockerhub() { + echo_restore_cmd + for image in "$@" ; do + set +e + echo "Pushing '$image' to docker hub..." + if ! docker push "$image" ; then + echo "ERROR: 'docker push $image' failed!" + exit 1 + fi + done +} + +parse_image_name() { + image_user="$(echo $1 | cut -d'/' -f1)" + image_repo="$(echo $1 | cut -d'/' -f2 | cut -d':' -f1)" + local tag="$(echo $1 | cut -d':' -f2)" + image_version="$(echo $tag | cut -d'-' -f1)" + image_arch="$(echo $tag | sed -e s/$image_version-//)" + image_name_new="${image_user}/${image_repo}:${image_version}-${image_arch}" + if [ "$1" != "$image_name_new" ] ; then + echo "ERROR: Image name parsing failed: $1 != '$image_name_new'" + usage + fi + if [[ "$image_version" =~ "prod" ]] ; then + image_name_new="" + fi + image_name_prod="${image_user}/${image_repo}:prod-${image_arch}" + image_name_prev="${image_user}/${image_repo}:prod-prev-${image_arch}" +} + +format_image_tags() { + # Note: 'grep $image_arch' & grep -v 'prod-curr' is required due to a + # bug in docker hub which returns old tags which were deleted via + # the webUI, but are still retrieved by 'docker pull -a' + image_tags="$(docker images | grep $1 | grep $image_arch | grep -v prod-curr | sort -r | mawk '{print $1":"$2}' | tr '\n' ' ')" + image_realname="$(docker images | grep $1 | grep $image_arch | sort -r | grep -v prod | mawk '{print $1":"$2}' || true)" + if [ -z "${image_realname:-}" ] ; then + image_realname="$image_tags" + fi +} + +get_image_id_tags() { + for image in "$image_name_new" "$image_name_prod" "$image_name_prev" ; do + if [ -z "$image" ] ; then + continue + fi + # ensure image exists + set +e + local image_found="$(docker images | mawk '{print $1":"$2}' | grep $image)" + set -e + if [ -z "$image_found" ] ; then + if [ "$image" = "$image_name_prev" ] ; then + if [ "$action" = "revert" ] ; then + echo "ERROR: Image '$image' not found!" + echo "Unable to revert production image '$image_name_prod'!" + usage + else + continue + fi + else + echo "ERROR: Image '$image' not found!" + usage + fi + fi + set +e + local id="$(docker image inspect $image | mawk -F':' '/Id/{print $3}')" + local digest="$(docker image inspect $image | grep -A1 RepoDigests | grep -v RepoDigests | mawk -F':' '{print $2}')" + local retval="$?" + set -e + if [ "$retval" -ne "0" ] ; then + echo "ERROR: Docker ID not found for '$image'!" + usage + fi + if [ "$image" = "$image_name_prod" ] ; then + docker_id_prod="${id::12}" + digest_prod="${digest::12}" + format_image_tags "$docker_id_prod" + image_tags_prod="$image_tags" + if [ -z "$image_realname_prod" ] ; then + image_realname_prod="$image_realname" + fi + elif [ "$image" = "$image_name_prev" ] ; then + docker_id_prev="${id::12}" + digest_prev="${digest::12}" + format_image_tags "$docker_id_prev" + image_tags_prev="$image_tags" + if [ -z "$image_realname_prev" ] ; then + image_realname_prev="$image_realname" + fi + else + docker_id_new="${id::12}" + digest_new="${digest::12}" + format_image_tags "$docker_id_new" "NEW" + image_tags_new="$image_tags" + fi + done + if [ -z "$restore_cmd" ] ; then + restore_cmd="sudo $0 p $image_realname_prev $image_realname_prod" + fi +} + +get_all_tags_from_dockerhub() { + local dh_repo="$image_user/$image_repo" + echo -e "Pulling all tags from docker hub repo '$dh_repo':\n$long_bar" + if ! docker pull -a "$dh_repo" ; then + echo "ERROR: Repository '$dh_repo' not found on docker hub!" + usage + fi + echo "$long_bar" +} + +verify_image_version_date_format() { + version="$1" + local ver_regex="^[0-9]{4}_[0-1][0-9]_[0-3][0-9]_[0-2][0-9][0-5][0-9][0-5][0-9]_UTC$" + if [[ "$version" =~ $ver_regex ]]; then + return 0 + fi + return 1 +} + +verify_image_name() { + image_not_found="" + # Invalid user + if [ "$image_user" != "fdiotools" ] ; then + image_not_found="true" + echo "ERROR: invalid user '$image_user' in '$image_name_new'!" + fi + # Invalid version + if [ -z "$image_not_found" ] \ + && [ "$image_version" != "prod" ] \ + && ! verify_image_version_date_format "$image_version" ]] ; then + image_not_found="true" + echo "ERROR: invalid version '$image_version' in '$image_name_new'!" + fi + # Invalid arch + if [ -z "$image_not_found" ] \ + && ! [[ "$EXECUTOR_ARCHS" =~ .*"$image_arch".* ]] ; then + image_not_found="true" + echo "ERROR: invalid arch '$image_arch' in '$image_name_new'!" + fi + if [ -n "$image_not_found" ] ; then + echo "ERROR: Invalid image '$image_name_new'!" + usage + fi +} + +docker_tag_image() { + echo ">>> docker tag $1 $2" + set +e + docker tag "$1" "$2" + local retval="$?" + set -e + if [ "$retval" -ne "0" ] ; then + echo "WARNING: 'docker tag $1 $2' failed!" + fi +} + +docker_rmi_tag() { + set +e + echo ">>> docker rmi $1" + docker rmi "$1" + local retval="$?" + set -e + if [ "$retval" -ne "0" ] ; then + echo "WARNING: 'docker rmi $1' failed!" + fi +} + +print_image_list() { + if [ -z "$2" ] ; then + echo "$1 Image Not Found" + return + fi + echo "$1 (Id $2, Digest $3):" + for image in $4 ; do + echo -e "\t$image" + done +} + +inspect_images() { + echo -e "\n${1}Production Docker Images:" + echo "$short_bar" + if [ -n "$image_tags_new" ] ; then + print_image_list "NEW" "$docker_id_new" "$digest_new" "$image_tags_new" + echo + fi + print_image_list "prod-$image_arch" "$docker_id_prod" "$digest_prod" \ + "$image_tags_prod" + echo + print_image_list "prod-prev-$image_arch" "$docker_id_prev" "$digest_prev" \ + "$image_tags_prev" + echo -e "$short_bar\n" +} + +revert_prod_image() { + inspect_images "EXISTING " + docker_tag_image "$docker_id_prod" "$image_name_prev" + docker_tag_image "$docker_id_prev" "$image_name_prod" + get_image_id_tags + inspect_images "REVERTED " + + local yn="" + while true; do + read -p "Push Reverted tags to '$image_user/$image_repo' (yes/no)? " yn + case ${yn:0:1} in + y|Y ) + break ;; + n|N ) + echo -e "\nABORTING REVERT!\n" + docker_tag_image $docker_id_prev $image_name_prod + docker_tag_image $docker_id_prod $image_name_prev + get_image_id_tags + inspect_images "RESTORED LOCAL " + exit 1 ;; + * ) + echo "Please answer yes or no." ;; + esac + done + echo + push_to_dockerhub $image_name_prev $image_name_prod + inspect_images "" + echo_restore_cmd +} + +promote_new_image() { + inspect_images "EXISTING " + docker_tag_image "$docker_id_prod" "$image_name_prev" + docker_tag_image "$docker_id_new" "$image_name_prod" + get_image_id_tags + inspect_images "PROMOTED " + + local yn="" + while true; do + read -p "Push promoted tags to '$image_user/$image_repo' (yes/no)? " yn + case "${yn:0:1}" in + y|Y ) + break ;; + n|N ) + echo -e "\nABORTING PROMOTION!\n" + docker_tag_image "$docker_id_prev" "$image_name_prod" + local restore_both="$(echo $restore_cmd | mawk '{print $5}')" + if [[ -n "$restore_both" ]] ; then + docker_tag_image "$image_realname_prev" "$image_name_prev" + else + docker_rmi_tag "$image_name_prev" + image_name_prev="" + docker_id_prev="" + fi + get_image_id_tags + inspect_images "RESTORED " + exit 1 ;; + * ) + echo "Please answer yes or no." ;; + esac + done + echo + push_to_dockerhub "$image_name_new" "$image_name_prev" "$image_name_prod" + inspect_images "" + echo_restore_cmd +} + +must_be_run_as_root_or_docker_group + +# Validate arguments +num_args="$#" +if [ "$num_args" -lt "1" ] ; then + usage +fi +action="" +case "$1" in + r?(evert)) + action="revert" + if [ "$num_args" -ne "2" ] ; then + echo "ERROR: Invalid number of arguments: $#" + usage + fi ;; + p?(romote)) + if [ "$num_args" -eq "2" ] || [ "$num_args" -eq "3" ] ; then + action="promote" + else + echo "ERROR: Invalid number of arguments: $#" + usage + fi ;; + i?(nspect)) + action="inspect" + if [ "$num_args" -ne "2" ] ; then + echo "ERROR: Invalid number of arguments: $#" + usage + fi ;; + *) + echo "ERROR: Invalid option '$1'!" + usage ;; +esac +shift +docker login >& /dev/null + +# Update local tags +tags_to_push="" +for image in "$@" ; do + parse_image_name "$image" + verify_image_name "$image" + get_all_tags_from_dockerhub + get_image_id_tags + if [ "$action" = "promote" ] ; then + if [ -n "$image_name_new" ] ; then + promote_new_image + else + echo "ERROR: No new image specified to promote!" + usage + fi + elif [ "$action" = "revert" ] ; then + if [ "$image_version" = "prod" ] ; then + revert_prod_image + else + echo "ERROR: Non-production image '$image' specified!" + usage + fi + else + if [ "$image_version" = "prod" ] ; then + inspect_images "" + else + echo "ERROR: Non-production image '$image' specified!" + usage + fi + fi +done |