aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/bash/function
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries/bash/function')
-rw-r--r--resources/libraries/bash/function/README.txt20
-rw-r--r--resources/libraries/bash/function/ansible.sh54
-rw-r--r--resources/libraries/bash/function/artifacts.sh83
-rw-r--r--resources/libraries/bash/function/branch.sh6
-rw-r--r--resources/libraries/bash/function/common.sh705
-rw-r--r--resources/libraries/bash/function/device.sh354
-rw-r--r--resources/libraries/bash/function/dpdk.sh101
-rw-r--r--resources/libraries/bash/function/eb_version.sh159
-rw-r--r--resources/libraries/bash/function/gather.sh32
-rw-r--r--resources/libraries/bash/function/hugo.sh113
-rwxr-xr-xresources/libraries/bash/function/nginx.sh136
-rw-r--r--resources/libraries/bash/function/per_patch.sh216
-rw-r--r--resources/libraries/bash/function/terraform.sh183
13 files changed, 1603 insertions, 559 deletions
diff --git a/resources/libraries/bash/function/README.txt b/resources/libraries/bash/function/README.txt
deleted file mode 100644
index 5215e5085b..0000000000
--- a/resources/libraries/bash/function/README.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Files in this directory system are to be executed indirectly,
-sourced from other scripts.
-
-In fact, the files should only define functions,
-except perhaps some minimal logic needed to import dependencies.
-The originating function calls should be executed from elsewhere,
-typically from entry scripts.
diff --git a/resources/libraries/bash/function/ansible.sh b/resources/libraries/bash/function/ansible.sh
index 1263412dd5..6cf4d16f43 100644
--- a/resources/libraries/bash/function/ansible.sh
+++ b/resources/libraries/bash/function/ansible.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -22,10 +22,23 @@ function ansible_adhoc () {
#
# Variable read:
# - ${WORKING_TOPOLOGY} - Reserved working topology.
- # - ${TOOLS_DIR} - CSIT tools directory, where testbed-setup is located.
+ # - ${CSIT_DIR} - CSIT main directory, where ansible playbooks are located.
+ # - ${FLAVOR} - Node flavor string, see common.sh
set -exuo pipefail
+ case "$FLAVOR" in
+ "aws" | "c6in" | "c6gn" | "c7gn")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ "x-"*)
+ INVENTORY_PATH="external_inventory"
+ ;;
+ *)
+ INVENTORY_PATH="lf_inventory"
+ ;;
+ esac
+
if ! installed sshpass; then
die "Please install sshpass!"
fi
@@ -33,13 +46,14 @@ function ansible_adhoc () {
hosts=($(fgrep host "${WORKING_TOPOLOGY}" | cut -d ":" -f 2)) || {
die "Failed to read hosts from working topology!"
}
- pushd "${TOOLS_DIR}"/testbed-setup/ansible || die "Pushd failed!"
- ANSIBLE_STDOUT_CALLBACK=yaml \
- ANSIBLE_PIPELINING=true \
- ansible \
+ pushd "${CSIT_DIR}"/fdio.infra.ansible || die "Pushd failed!"
+ export ANSIBLE_HOST_KEY_CHECKING=False
+ export ANSIBLE_STDOUT_CALLBACK=yaml
+ export ANSIBLE_PIPELINING=true
+ ansible-playbook \
--vault-password-file=vault_pass \
--extra-vars '@vault.yml' \
- --inventory inventories/lf_inventory/hosts site.yaml \
+ --inventory inventories/$INVENTORY_PATH/hosts site.yaml \
--limit "$(echo ${hosts[@]//\"})" \
--module-name shell \
--args \"$(echo $@)\" || die "Failed to run ansible on host!"
@@ -53,10 +67,23 @@ function ansible_playbook () {
#
# Variable read:
# - ${WORKING_TOPOLOGY} - Reserved working topology.
- # - ${TOOLS_DIR} - CSIT tools directory, where testbed-setup is located.
+ # - ${CSIT_DIR} - CSIT main directory, where ansible playbooks are located.
+ # - ${FLAVOR} - Node flavor string, see common.sh
set -exuo pipefail
+ case "$FLAVOR" in
+ "aws" | "c6in" | "c6gn" | "c7gn")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ "x-"*)
+ INVENTORY_PATH="external_inventory"
+ ;;
+ *)
+ INVENTORY_PATH="lf_inventory"
+ ;;
+ esac
+
if ! installed sshpass; then
die "Please install sshpass!"
fi
@@ -64,13 +91,14 @@ function ansible_playbook () {
hosts=($(fgrep host "${WORKING_TOPOLOGY}" | cut -d ":" -f 2)) || {
die "Failed to read hosts from working topology!"
}
- pushd "${TOOLS_DIR}"/testbed-setup/ansible || die "Pushd failed!"
- ANSIBLE_STDOUT_CALLBACK=yaml \
- ANSIBLE_PIPELINING=true \
- ansible-playbook \
+ pushd "${CSIT_DIR}"/fdio.infra.ansible || die "Pushd failed!"
+ export ANSIBLE_HOST_KEY_CHECKING=False
+ export ANSIBLE_STDOUT_CALLBACK=yaml
+ export ANSIBLE_PIPELINING=true
+ ansible-playbook \
--vault-password-file=vault_pass \
--extra-vars '@vault.yml' \
- --inventory inventories/lf_inventory/hosts site.yaml \
+ --inventory inventories/$INVENTORY_PATH/hosts site.yaml \
--limit "$(echo ${hosts[@]//\"})" \
--tags "$(echo $@)" || die "Failed to run ansible on host!"
popd || die "Popd failed!"
diff --git a/resources/libraries/bash/function/artifacts.sh b/resources/libraries/bash/function/artifacts.sh
index 0a08d7311e..e4d5dd6fc6 100644
--- a/resources/libraries/bash/function/artifacts.sh
+++ b/resources/libraries/bash/function/artifacts.sh
@@ -1,7 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Copyright (c) 2019 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -26,8 +25,6 @@ function download_artifacts () {
# - REPO_URL - FD.io Packagecloud repository.
# Functions conditionally called (see their documentation for side effects):
# - download_ubuntu_artifacts
- # - download_centos_artifacts
- # - download_opensuse_artifacts
set -exuo pipefail
@@ -46,10 +43,6 @@ function download_artifacts () {
if [ "${os_id}" == "ubuntu" ]; then
download_ubuntu_artifacts || die
- elif [ "${os_id}" == "centos" ]; then
- download_centos_artifacts || die
- elif [ "${os_id}" == "opensuse" ]; then
- download_opensuse_artifacts || die
else
die "${os_id} is not yet supported."
fi
@@ -86,7 +79,7 @@ function download_ubuntu_artifacts () {
repository installation was not successful."
fi
- packages=$(apt-cache -o Dir::Etc::SourceList=${apt_fdio_repo_file} \
+ pkgs=$(apt-cache -o Dir::Etc::SourceList=${apt_fdio_repo_file} \
-o Dir::Etc::SourceParts=${apt_fdio_repo_file} dumpavail \
| grep Package: | cut -d " " -f 2 | grep vpp) || {
die "Retrieval of available VPP packages failed."
@@ -102,7 +95,7 @@ function download_ubuntu_artifacts () {
fi
set +x
- for package in ${packages}; do
+ for package in ${pkgs}; do
# Filter packages with given version
pkg_info=$(apt-cache show -- ${package}) || {
die "apt-cache show on ${package} failed."
@@ -129,73 +122,3 @@ function download_ubuntu_artifacts () {
}
fi
}
-
-function download_centos_artifacts () {
-
- # Download or install CentOS VPP artifacts from packagecloud.io.
- #
- # Variables read:
- # - REPO_URL - FD.io Packagecloud repository.
- # - VPP_VERSION - VPP version.
- # - INSTALL - Whether install packages (if set to "true") or download only.
- # Default: "false".
-
- set -exuo pipefail
-
- curl -s "${REPO_URL}"/script.rpm.sh | sudo -E bash || {
- die "Packagecloud FD.io repo fetch failed."
- }
- # If version is set we will add suffix.
- artifacts=()
- packages=(vpp vpp-selinux-policy vpp-devel vpp-lib vpp-plugins vpp-api-python)
- if [ -z "${VPP_VERSION-}" ]; then
- artifacts+=(${packages[@]})
- else
- artifacts+=(${packages[@]/%/-${VPP_VERSION-}})
- fi
-
- if [[ "${INSTALL:-false}" == "true" ]]; then
- sudo yum -y install "${artifacts[@]}" || {
- die "Install VPP artifact failed."
- }
- else
- sudo yum -y install --downloadonly --downloaddir=. "${artifacts[@]}" || {
- die "Download VPP artifacts failed."
- }
- fi
-}
-
-function download_opensuse_artifacts () {
-
- # Download or install OpenSuSE VPP artifacts from packagecloud.io.
- #
- # Variables read:
- # - REPO_URL - FD.io Packagecloud repository.
- # - VPP_VERSION - VPP version.
- # - INSTALL - Whether install packages (if set to "true") or download only.
- # Default: "false".
-
- set -exuo pipefail
-
- curl -s "${REPO_URL}"/script.rpm.sh | sudo -E bash || {
- die "Packagecloud FD.io repo fetch failed."
- }
- # If version is set we will add suffix.
- artifacts=()
- packages=(vpp vpp-devel vpp-lib vpp-plugins libvpp0)
- if [ -z "${VPP_VERSION-}" ]; then
- artifacts+=(${packages[@]})
- else
- artifacts+=(${packages[@]/%/-${VPP_VERSION-}})
- fi
-
- if [[ "${INSTALL:-false}" == "true" ]]; then
- sudo yum -y install "${artifacts[@]}" || {
- die "Install VPP artifact failed."
- }
- else
- sudo yum -y install --downloadonly --downloaddir=. "${artifacts[@]}" || {
- die "Download VPP artifacts failed."
- }
- fi
-}
diff --git a/resources/libraries/bash/function/branch.sh b/resources/libraries/bash/function/branch.sh
index ba9cc39c67..37900eab01 100644
--- a/resources/libraries/bash/function/branch.sh
+++ b/resources/libraries/bash/function/branch.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -57,10 +57,6 @@ function checkout_csit_for_vpp () {
set -exuo pipefail
case "${1}" in
- "stable/2009")
- # LTS branch
- branch_id="origin/${1/stable\//oper-rls}_lts"
- ;;
"stable/"*)
branch_id="origin/${1/stable\//oper-rls}"
;;
diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh
index b0eaf6cd25..4f104dbfd3 100644
--- a/resources/libraries/bash/function/common.sh
+++ b/resources/libraries/bash/function/common.sh
@@ -1,5 +1,5 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Copyright (c) 2020 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -45,7 +45,7 @@ function activate_docker_topology () {
device_image="$(< ${CSIT_DIR}/${IMAGE_VER_FILE})"
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
- "1n_skx" | "1n_tx2")
+ "1n_skx" | "1n_alt" | "1n_spr")
# We execute reservation over csit-shim-dcr (ssh) which runs sourced
# script's functions. Env variables are read from ssh output
# back to localhost for further processing.
@@ -79,18 +79,26 @@ function activate_docker_topology () {
die "Trap attempt failed, please cleanup manually. Aborting!"
}
+ parse_env_variables || die "Parse of environment variables failed!"
+
# Replace all variables in template with those in environment.
source <(echo 'cat <<EOF >topo.yml'; cat ${TOPOLOGIES[0]}; echo EOF;) || {
die "Topology file create failed!"
}
- WORKING_TOPOLOGY="/tmp/topology.yaml"
+ WORKING_TOPOLOGY="${CSIT_DIR}/topologies/available/vpp_device.yaml"
mv topo.yml "${WORKING_TOPOLOGY}" || {
die "Topology move failed!"
}
cat ${WORKING_TOPOLOGY} | grep -v password || {
die "Topology read failed!"
}
+
+ # Subfunctions to update data that may depend on topology reserved.
+ set_environment_variables || die
+ select_tags || die
+ compose_robot_arguments || die
+
}
@@ -116,7 +124,7 @@ function activate_virtualenv () {
env_dir="${root_path}/env"
req_path=${2-$CSIT_DIR/requirements.txt}
rm -rf "${env_dir}" || die "Failed to clean previous virtualenv."
- pip3 install virtualenv==20.0.20 || {
+ pip3 install virtualenv==20.15.1 || {
die "Virtualenv package install failed."
}
virtualenv --no-download --python=$(which python3) "${env_dir}" || {
@@ -135,19 +143,19 @@ function activate_virtualenv () {
function archive_tests () {
- # Create .tar.xz of generated/tests for archiving.
+ # Create .tar.gz of generated/tests for archiving.
# To be run after generate_tests, kept separate to offer more flexibility.
# Directory read:
# - ${GENERATED_DIR}/tests - Tree of executed suites to archive.
# File rewriten:
- # - ${ARCHIVE_DIR}/tests.tar.xz - Archive of generated tests.
+ # - ${ARCHIVE_DIR}/generated_tests.tar.gz - Archive of generated tests.
set -exuo pipefail
- tar c "${GENERATED_DIR}/tests" | xz -3 > "${ARCHIVE_DIR}/tests.tar.xz" || {
- die "Error creating archive of generated tests."
- }
+ pushd "${ARCHIVE_DIR}" || die
+ tar czf "generated_tests.tar.gz" "${GENERATED_DIR}/tests" || true
+ popd || die
}
@@ -156,7 +164,7 @@ function check_download_dir () {
# Fail if there are no files visible in ${DOWNLOAD_DIR}.
#
# Variables read:
- # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
# Directories read:
# - ${DOWNLOAD_DIR} - Has to be non-empty to proceed.
# Functions called:
@@ -225,7 +233,7 @@ function common_dirs () {
TOPOLOGIES_DIR=$(readlink -e "${CSIT_DIR}/topologies/available") || {
die "Readlink failed."
}
- JOB_SPECS_DIR=$(readlink -e "${CSIT_DIR}/docs/job_specs") || {
+ JOB_SPECS_DIR=$(readlink -e "${CSIT_DIR}/resources/job_specs") || {
die "Readlink failed."
}
RESOURCES_DIR=$(readlink -e "${CSIT_DIR}/resources") || {
@@ -234,9 +242,6 @@ function common_dirs () {
TOOLS_DIR=$(readlink -e "${RESOURCES_DIR}/tools") || {
die "Readlink failed."
}
- DOC_GEN_DIR=$(readlink -e "${TOOLS_DIR}/doc_gen") || {
- die "Readlink failed."
- }
PYTHON_SCRIPTS_DIR=$(readlink -e "${TOOLS_DIR}/scripts") || {
die "Readlink failed."
}
@@ -256,8 +261,12 @@ function common_dirs () {
}
-function compose_pybot_arguments () {
+function compose_robot_arguments () {
+ # This function is called by run_tests function.
+ # The reason is that some jobs (bisect) perform reservation multiple times,
+ # so WORKING_TOPOLOGY can be different each time.
+ #
# Variables read:
# - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
# - DUT - CSIT test/ subdirectory, set while processing tags.
@@ -266,21 +275,23 @@ function compose_pybot_arguments () {
# - TEST_CODE - The test selection string from environment or argument.
# - SELECTION_MODE - Selection criteria [test, suite, include, exclude].
# Variables set:
- # - PYBOT_ARGS - String holding part of all arguments for pybot.
- # - EXPANDED_TAGS - Array of strings pybot arguments compiled from tags.
+ # - ROBOT_ARGS - String holding part of all arguments for robot.
+ # - EXPANDED_TAGS - Array of strings robot arguments compiled from tags.
set -exuo pipefail
# No explicit check needed with "set -u".
- PYBOT_ARGS=("--loglevel" "TRACE")
- PYBOT_ARGS+=("--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}")
+ ROBOT_ARGS=("--loglevel" "TRACE")
+ ROBOT_ARGS+=("--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}")
+ # TODO: The rest does not need to be recomputed on each reservation.
+ # Refactor TEST_CODE so this part can be called only once.
case "${TEST_CODE}" in
*"device"*)
- PYBOT_ARGS+=("--suite" "tests.${DUT}.device")
+ ROBOT_ARGS+=("--suite" "tests.${DUT}.device")
;;
- *"perf"*)
- PYBOT_ARGS+=("--suite" "tests.${DUT}.perf")
+ *"perf"* | *"bisect"*)
+ ROBOT_ARGS+=("--suite" "tests.${DUT}.perf")
;;
*)
die "Unknown specification: ${TEST_CODE}"
@@ -317,7 +328,7 @@ function deactivate_docker_topology () {
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
- "1n_skx" | "1n_tx2")
+ "1n_skx" | "1n_alt" | "1n_spr")
ssh="ssh root@172.17.0.1 -p 6022"
env_vars=$(env | grep CSIT_ | tr '\n' ' ' ) || die
# The "declare -f" output is long and boring.
@@ -358,19 +369,19 @@ function die () {
}
-function die_on_pybot_error () {
+function die_on_robot_error () {
# Source this fragment if you want to abort on any failed test case.
#
# Variables read:
- # - PYBOT_EXIT_STATUS - Set by a pybot running fragment.
+ # - ROBOT_EXIT_STATUS - Set by a robot running fragment.
# Functions called:
# - die - Print to stderr and exit.
set -exuo pipefail
- if [[ "${PYBOT_EXIT_STATUS}" != "0" ]]; then
- die "Test failures are present!" "${PYBOT_EXIT_STATUS}"
+ if [[ "${ROBOT_EXIT_STATUS}" != "0" ]]; then
+ die "Test failures are present!" "${ROBOT_EXIT_STATUS}"
fi
}
@@ -383,7 +394,7 @@ function generate_tests () {
# within any subdirectory after copying.
# This is a separate function, because this code is called
- # both by autogen checker and entries calling run_pybot.
+ # both by autogen checker and entries calling run_robot.
# Directories read:
# - ${CSIT_DIR}/tests - Used as templates for the generated tests.
@@ -429,54 +440,109 @@ function get_test_code () {
fi
case "${TEST_CODE}" in
- *"1n-vbox"*)
+ *"1n-vbox")
NODENESS="1n"
FLAVOR="vbox"
;;
- *"1n-skx"*)
+ *"1n-skx")
NODENESS="1n"
FLAVOR="skx"
;;
- *"1n-tx2"*)
+ *"1n-spr")
NODENESS="1n"
- FLAVOR="tx2"
+ FLAVOR="spr"
+ ;;
+ *"1n-alt")
+ NODENESS="1n"
+ FLAVOR="alt"
;;
- *"2n-skx"*)
+ *"1n-aws")
+ NODENESS="1n"
+ FLAVOR="aws"
+ ;;
+ *"2n-aws")
NODENESS="2n"
- FLAVOR="skx"
+ FLAVOR="aws"
;;
- *"2n-zn2"*)
+ *"3n-aws")
+ NODENESS="3n"
+ FLAVOR="aws"
+ ;;
+ *"2n-c7gn")
NODENESS="2n"
- FLAVOR="zn2"
+ FLAVOR="c7gn"
;;
- *"3n-skx"*)
+ *"3n-c7gn")
NODENESS="3n"
- FLAVOR="skx"
+ FLAVOR="c7gn"
;;
- *"2n-clx"*)
+ *"1n-c6in")
+ NODENESS="1n"
+ FLAVOR="c6in"
+ ;;
+ *"2n-c6in")
+ NODENESS="2n"
+ FLAVOR="c6in"
+ ;;
+ *"3n-c6in")
+ NODENESS="3n"
+ FLAVOR="c6in"
+ ;;
+ *"2n-zn2")
+ NODENESS="2n"
+ FLAVOR="zn2"
+ ;;
+ *"2n-clx")
NODENESS="2n"
FLAVOR="clx"
;;
- *"2n-dnv"*)
+ *"2n-icx")
+ NODENESS="2n"
+ FLAVOR="icx"
+ ;;
+ *"2n-spr")
NODENESS="2n"
- FLAVOR="dnv"
+ FLAVOR="spr"
+ ;;
+ *"3n-icx")
+ NODENESS="3n"
+ FLAVOR="icx"
+ ;;
+ *"3na-spr")
+ NODENESS="3na"
+ FLAVOR="spr"
+ ;;
+ *"3nb-spr")
+ NODENESS="3nb"
+ FLAVOR="spr"
+ ;;
+ *"3n-snr")
+ NODENESS="3n"
+ FLAVOR="snr"
;;
- *"3n-dnv"*)
+ *"3n-icxd")
NODENESS="3n"
- FLAVOR="dnv"
+ FLAVOR="icxd"
;;
- *"2n-tx2"*)
+ *"2n-tx2")
NODENESS="2n"
FLAVOR="tx2"
;;
- *"3n-tsh"*)
+ *"3n-tsh")
NODENESS="3n"
FLAVOR="tsh"
;;
- *)
- # Fallback to 3-node Haswell by default (backward compatibility)
+ *"3n-alt")
NODENESS="3n"
- FLAVOR="hsw"
+ FLAVOR="alt"
+ ;;
+ *"2n-x-"*)
+ NODENESS="2n"
+ FLAVOR="${TEST_CODE#*2n-}"
+ ;;
+ *"3n-x-"*)
+ NODENESS="3n"
+ FLAVOR="${TEST_CODE#*3n-}"
;;
esac
}
@@ -491,6 +557,10 @@ function get_test_tag_string () {
# Variables set:
# - TEST_TAG_STRING - The string following trigger word in gerrit comment.
# May be empty, or even not set on event types not adding comment.
+ # - GIT_BISECT_FROM - If bisecttest, the commit hash to bisect from.
+ # Else not set.
+ # Variables exported optionally:
+ # - GRAPH_NODE_VARIANT - Node variant to test with, set if found in trigger.
# TODO: ci-management scripts no longer need to perform this.
@@ -498,6 +568,10 @@ function get_test_tag_string () {
if [[ "${GERRIT_EVENT_TYPE-}" == "comment-added" ]]; then
case "${TEST_CODE}" in
+ # Order matters, bisect job contains "perf" in its name.
+ *"bisect"*)
+ trigger="bisecttest"
+ ;;
*"device"*)
trigger="devicetest"
;;
@@ -518,10 +592,23 @@ function get_test_tag_string () {
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
if [[ -z "${TEST_TAG_STRING-}" ]]; then
# Probably we got a base64 encoded comment.
- comment=$(base64 --decode <<< "${GERRIT_EVENT_COMMENT_TEXT}" || true)
+ comment="${GERRIT_EVENT_COMMENT_TEXT}"
+ comment=$(base64 --decode <<< "${comment}" || true)
comment=$(fgrep "${trigger}" <<< "${comment}" || true)
TEST_TAG_STRING=$("${cmd[@]}" <<< "${comment}" || true)
fi
+ if [[ "${trigger}" == "bisecttest" ]]; then
+ # Intentionally without quotes, so spaces delimit elements.
+ test_tag_array=(${TEST_TAG_STRING}) || die "How could this fail?"
+ # First "argument" of bisecttest is a commit hash.
+ GIT_BISECT_FROM="${test_tag_array[0]}" || {
+ die "Bisect job requires commit hash."
+ }
+ # Update the tag string (tag expressions only, no commit hash).
+ TEST_TAG_STRING="${test_tag_array[@]:1}" || {
+ die "Bisect job needs a single test, no default."
+ }
+ fi
if [[ -n "${TEST_TAG_STRING-}" ]]; then
test_tag_array=(${TEST_TAG_STRING})
if [[ "${test_tag_array[0]}" == "icl" ]]; then
@@ -530,9 +617,6 @@ function get_test_tag_string () {
elif [[ "${test_tag_array[0]}" == "skx" ]]; then
export GRAPH_NODE_VARIANT="skx"
TEST_TAG_STRING="${test_tag_array[@]:1}" || true
- elif [[ "${test_tag_array[0]}" == "hsw" ]]; then
- export GRAPH_NODE_VARIANT="hsw"
- TEST_TAG_STRING="${test_tag_array[@]:1}" || true
fi
fi
fi
@@ -598,12 +682,65 @@ function move_archives () {
}
+function prepare_topology () {
+
+ # Prepare virtual testbed topology if needed based on flavor.
+
+ # Variables read:
+ # - TEST_CODE - String affecting test selection, usually jenkins job name.
+ # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
+ # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
+ # Variables set:
+ # - TERRAFORM_MODULE_DIR - Terraform module directory.
+ # Functions called:
+ # - die - Print to stderr and exit.
+ # - terraform_init - Terraform init topology.
+ # - terraform_apply - Terraform apply topology.
+
+ set -exuo pipefail
+
+ case_text="${NODENESS}_${FLAVOR}"
+ case "${case_text}" in
+ "1n_aws" | "2n_aws" | "3n_aws")
+ export TF_VAR_testbed_name="${TEST_CODE}"
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-${FLAVOR}-c5n"
+ terraform_init || die "Failed to call terraform init."
+ trap "terraform_destroy" ERR EXIT || {
+ die "Trap attempt failed, please cleanup manually. Aborting!"
+ }
+ terraform_apply || die "Failed to call terraform apply."
+ ;;
+ "2n_c7gn" | "3n_c7gn")
+ export TF_VAR_testbed_name="${TEST_CODE}"
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-c7gn"
+ terraform_init || die "Failed to call terraform init."
+ trap "terraform_destroy" ERR EXIT || {
+ die "Trap attempt failed, please cleanup manually. Aborting!"
+ }
+ terraform_apply || die "Failed to call terraform apply."
+ ;;
+ "1n_c6in" | "2n_c6in" | "3n_c6in")
+ export TF_VAR_testbed_name="${TEST_CODE}"
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-c6in"
+ terraform_init || die "Failed to call terraform init."
+ trap "terraform_destroy" ERR EXIT || {
+ die "Trap attempt failed, please cleanup manually. Aborting!"
+ }
+ terraform_apply || die "Failed to call terraform apply."
+ ;;
+ esac
+}
+
+
function reserve_and_cleanup_testbed () {
# Reserve physical testbed, perform cleanup, register trap to unreserve.
# When cleanup fails, remove from topologies and keep retrying
# until all topologies are removed.
#
+ # Multiple other functions are called from here,
+ # as they set variables that depend on reserved topology data.
+ #
# Variables read:
# - TOPOLOGIES - Array of paths to topology yaml to attempt reservation on.
# - PYTHON_SCRIPTS_DIR - Path to directory holding the reservation script.
@@ -641,7 +778,7 @@ function reserve_and_cleanup_testbed () {
}
die "Trap attempt failed, unreserve succeeded. Aborting."
}
- # Cleanup + calibration checks.
+ # Cleanup + calibration checks
set +e
ansible_playbook "cleanup, calibration"
result="$?"
@@ -672,42 +809,50 @@ function reserve_and_cleanup_testbed () {
echo "Sleeping ${sleep_time}"
sleep "${sleep_time}" || die "Sleep failed."
done
+
+ # Subfunctions to update data that may depend on topology reserved.
+ set_environment_variables || die
+ select_tags || die
+ compose_robot_arguments || die
}
-function run_pybot () {
+function run_robot () {
- # Run pybot with options based on input variables. Create output_info.xml
+ # Run robot with options based on input variables.
+ #
+ # Testbed has to be reserved already,
+ # as some data may have changed between reservations,
+ # for example excluded NICs.
#
# Variables read:
# - CSIT_DIR - Path to existing root of local CSIT git repository.
# - ARCHIVE_DIR - Path to store robot result files in.
- # - PYBOT_ARGS, EXPANDED_TAGS - See compose_pybot_arguments.sh
+ # - ROBOT_ARGS, EXPANDED_TAGS - See compose_robot_arguments.sh
# - GENERATED_DIR - Tests are assumed to be generated under there.
+ # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
+ # - DUT - CSIT test/ subdirectory, set while processing tags.
+ # - TAGS - Array variable holding selected tag boolean expressions.
+ # - TOPOLOGIES_TAGS - Tag boolean expression filtering tests for topology.
+ # - TEST_CODE - The test selection string from environment or argument.
# Variables set:
- # - PYBOT_EXIT_STATUS - Exit status of most recent pybot invocation.
+ # - ROBOT_ARGS - String holding part of all arguments for robot.
+ # - EXPANDED_TAGS - Array of string robot arguments compiled from tags.
+ # - ROBOT_EXIT_STATUS - Exit status of most recent robot invocation.
# Functions called:
# - die - Print to stderr and exit.
set -exuo pipefail
- all_options=("--outputdir" "${ARCHIVE_DIR}" "${PYBOT_ARGS[@]}")
- all_options+=("--noncritical" "EXPECTED_FAILING")
+ all_options=("--outputdir" "${ARCHIVE_DIR}" "${ROBOT_ARGS[@]}")
all_options+=("${EXPANDED_TAGS[@]}")
pushd "${CSIT_DIR}" || die "Change directory operation failed."
set +e
robot "${all_options[@]}" "${GENERATED_DIR}/tests/"
- PYBOT_EXIT_STATUS="$?"
+ ROBOT_EXIT_STATUS="$?"
set -e
- # Generate INFO level output_info.xml for post-processing.
- all_options=("--loglevel" "INFO")
- all_options+=("--log" "none")
- all_options+=("--report" "none")
- all_options+=("--output" "${ARCHIVE_DIR}/output_info.xml")
- all_options+=("${ARCHIVE_DIR}/output.xml")
- rebot "${all_options[@]}" || true
popd || die "Change directory operation failed."
}
@@ -723,23 +868,23 @@ function select_arch_os () {
set -exuo pipefail
- os_id=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') || {
- die "Get OS release failed."
- }
+ source /etc/os-release || die "Get OS release failed."
- case "${os_id}" in
+ case "${ID}" in
"ubuntu"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
- VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC"
- PKG_SUFFIX="deb"
- ;;
- "centos"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS"
- VPP_VER_FILE="VPP_STABLE_VER_CENTOS"
- PKG_SUFFIX="rpm"
+ case "${VERSION}" in
+ *"LTS (Jammy Jellyfish)"*)
+ IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU_JAMMY"
+ VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_JAMMY"
+ PKG_SUFFIX="deb"
+ ;;
+ *)
+ die "Unsupported Ubuntu version!"
+ ;;
+ esac
;;
*)
- die "Unable to identify distro or os from ${os_id}"
+ die "Unsupported distro or OS!"
;;
esac
@@ -759,6 +904,9 @@ function select_arch_os () {
function select_tags () {
+ # Only to be called from the reservation function,
+ # as resulting tags may change based on topology data.
+ #
# Variables read:
# - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
# - TEST_CODE - String affecting test selection, usually jenkins job name.
@@ -774,35 +922,69 @@ function select_tags () {
set -exuo pipefail
# NIC SELECTION
- start_pattern='^ TG:'
+ case "${TEST_CODE}" in
+ *"1n-aws"* | *"1n-c6in"*)
+ start_pattern='^ SUT:'
+ ;;
+ *)
+ start_pattern='^ TG:'
+ ;;
+ esac
end_pattern='^ \? \?[A-Za-z0-9]\+:'
- # Remove the TG section from topology file
+ # Remove the sections from topology file
sed_command="/${start_pattern}/,/${end_pattern}/d"
- # All topologies DUT NICs
+ # All topologies NICs
available=$(sed "${sed_command}" "${TOPOLOGIES_DIR}"/* \
| grep -hoP "model: \K.*" | sort -u)
- # Selected topology DUT NICs
+ # Selected topology NICs
reserved=$(sed "${sed_command}" "${WORKING_TOPOLOGY}" \
| grep -hoP "model: \K.*" | sort -u)
- # All topologies DUT NICs - Selected topology DUT NICs
+ # All topologies NICs - Selected topology NICs
exclude_nics=($(comm -13 <(echo "${reserved}") <(echo "${available}"))) || {
die "Computation of excluded NICs failed."
}
# Select default NIC tag.
case "${TEST_CODE}" in
- *"3n-dnv"* | *"2n-dnv"*)
- default_nic="nic_intel-x553"
+ *"3n-snr")
+ default_nic="nic_intel-e822cq"
;;
- *"3n-tsh"*)
+ *"3n-icxd")
+ default_nic="nic_intel-e823c"
+ ;;
+ *"3n-tsh")
default_nic="nic_intel-x520-da2"
;;
- *"3n-skx"* | *"2n-skx"* | *"2n-clx"* | *"2n-zn2"*)
+ *"3n-icx" | *"2n-icx")
+ default_nic="nic_intel-e810cq"
+ ;;
+ *"3na-spr")
+ default_nic="nic_mellanox-cx7veat"
+ ;;
+ *"3nb-spr")
+ default_nic="nic_intel-e810cq"
+ ;;
+ *"2n-spr")
+ default_nic="nic_intel-e810cq"
+ ;;
+ *"2n-clx" | *"2n-zn2")
default_nic="nic_intel-xxv710"
;;
- *"3n-hsw"* | *"2n-tx2"* | *"mrr-daily-master")
+ *"2n-tx2" | *"3n-alt")
default_nic="nic_intel-xl710"
;;
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
+ default_nic="nic_amazon-nitro-50g"
+ ;;
+ *"2n-c7gn" | *"3n-c7gn")
+ default_nic="nic_amazon-nitro-100g"
+ ;;
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
+ default_nic="nic_amazon-nitro-200g"
+ ;;
+ *"2n-x-"* | *"3n-x-"*)
+ default_nic="nic_intel-e810cq"
+ ;;
*)
default_nic="nic_intel-x710"
;;
@@ -814,22 +996,47 @@ function select_tags () {
awk_nics_sub_cmd+='gsub("x710","10ge2p1x710");'
awk_nics_sub_cmd+='gsub("xl710","40ge2p1xl710");'
awk_nics_sub_cmd+='gsub("x520-da2","10ge2p1x520");'
- awk_nics_sub_cmd+='gsub("x553","10ge2p1x553");'
- awk_nics_sub_cmd+='gsub("cx556a","10ge2p1cx556a");'
+ awk_nics_sub_cmd+='gsub("cx556a","100ge2p1cx556a");'
+ awk_nics_sub_cmd+='gsub("2p1cx7veat","200ge2p1cx7veat");'
+ awk_nics_sub_cmd+='gsub("6p3cx7veat","200ge6p3cx7veat");'
+ awk_nics_sub_cmd+='gsub("cx6dx","100ge2p1cx6dx");'
+ awk_nics_sub_cmd+='gsub("e810cq","100ge2p1e810cq");'
+ awk_nics_sub_cmd+='gsub("e822cq","25ge2p1e822cq");'
+ awk_nics_sub_cmd+='gsub("e823c","25ge2p1e823c");'
awk_nics_sub_cmd+='gsub("vic1227","10ge2p1vic1227");'
- awk_nics_sub_cmd+='gsub("vic1385","10ge2p1vic1385");'
+ awk_nics_sub_cmd+='gsub("vic1385","40ge2p1vic1385");'
+ awk_nics_sub_cmd+='gsub("nitro-50g","50ge1p1ENA");'
+ awk_nics_sub_cmd+='gsub("nitro-100g","100ge1p1ENA");'
+ awk_nics_sub_cmd+='gsub("nitro-200g","200ge1p1ENA");'
+ awk_nics_sub_cmd+='gsub("virtual","1ge1p82540em");'
awk_nics_sub_cmd+='if ($9 =="drv_avf") drv="avf-";'
awk_nics_sub_cmd+='else if ($9 =="drv_rdma_core") drv ="rdma-";'
+ awk_nics_sub_cmd+='else if ($9 =="drv_mlx5_core") drv ="mlx5-";'
+ awk_nics_sub_cmd+='else if ($9 =="drv_af_xdp") drv ="af-xdp-";'
awk_nics_sub_cmd+='else drv="";'
- awk_nics_sub_cmd+='print "*"$7"-" drv $11"-"$5"."$3"-"$1"-" drv $11"-"$5'
+ awk_nics_sub_cmd+='if ($1 =="-") cores="";'
+ awk_nics_sub_cmd+='else cores=$1;'
+ awk_nics_sub_cmd+='print "*"$7"-" drv $11"-"$5"."$3"-" cores "-" drv $11"-"$5'
# Tag file directory shorthand.
tfd="${JOB_SPECS_DIR}"
case "${TEST_CODE}" in
# Select specific performance tests based on jenkins job type variable.
+ *"device"* )
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/vpp_device/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "devicetest") || die
+ SELECTION_MODE="--test"
+ ;;
+ *"hoststack-daily"* )
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/hoststack_daily/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
+ ;;
*"ndrpdr-weekly"* )
readarray -t test_tag_array <<< $(grep -v "#" \
- ${tfd}/mlr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+ ${tfd}/ndrpdr_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
SELECTION_MODE="--test"
;;
@@ -845,6 +1052,12 @@ function select_tags () {
awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
SELECTION_MODE="--test"
;;
+ *"soak-weekly"* )
+ readarray -t test_tag_array <<< $(grep -v "#" \
+ ${tfd}/soak_weekly/${DUT}-${NODENESS}-${FLAVOR}.md |
+ awk {"$awk_nics_sub_cmd"} || echo "perftest") || die
+ SELECTION_MODE="--test"
+ ;;
*"report-iterative"* )
test_sets=(${TEST_TAG_STRING//:/ })
# Run only one test set per run
@@ -867,11 +1080,11 @@ function select_tags () {
if [[ -z "${TEST_TAG_STRING-}" ]]; then
# If nothing is specified, we will run pre-selected tests by
# following tags.
- test_tag_array=("mrrAND${default_nic}AND1cAND64bANDip4base"
- "mrrAND${default_nic}AND1cAND78bANDip6base"
- "mrrAND${default_nic}AND1cAND64bANDl2bdbase"
- "mrrAND${default_nic}AND1cAND64bANDl2xcbase"
- "!dot1q" "!drv_avf")
+ test_tag_array=("mrrAND${default_nic}AND1cAND64bANDethip4-ip4base"
+ "mrrAND${default_nic}AND1cAND78bANDethip6-ip6base"
+ "mrrAND${default_nic}AND1cAND64bANDeth-l2bdbasemaclrn"
+ "mrrAND${default_nic}AND1cAND64bANDeth-l2xcbase"
+ "!drv_af_xdp" "!drv_avf")
else
# If trigger contains tags, split them into array.
test_tag_array=(${TEST_TAG_STRING//:/ })
@@ -884,56 +1097,58 @@ function select_tags () {
#
# Reasons for blacklisting:
# - ipsechw - Blacklisted on testbeds without crypto hardware accelerator.
- # TODO: Add missing reasons here (if general) or where used (if specific).
case "${TEST_CODE}" in
- *"2n-skx"*)
- test_tag_array+=("!ipsec")
+ *"1n-vbox")
+ test_tag_array+=("!avf")
+ test_tag_array+=("!vhost")
+ test_tag_array+=("!flow")
;;
- *"3n-skx"*)
+ *"1n-alt")
+ test_tag_array+=("!flow")
+ ;;
+ *"2n-clx")
test_tag_array+=("!ipsechw")
- # Not enough nic_intel-xxv710 to support double link tests.
- test_tag_array+=("!3_node_double_link_topoANDnic_intel-xxv710")
;;
- *"2n-clx"*)
- test_tag_array+=("!ipsec")
+ *"2n-icx")
+ test_tag_array+=("!ipsechw")
;;
- *"2n-zn2"*)
- test_tag_array+=("!ipsec")
+ *"2n-spr")
;;
- *"2n-dnv"*)
+ *"2n-tx2")
test_tag_array+=("!ipsechw")
- test_tag_array+=("!memif")
- test_tag_array+=("!srv6_proxy")
- test_tag_array+=("!vhost")
- test_tag_array+=("!vts")
- test_tag_array+=("!drv_avf")
;;
- *"2n-tx2"*)
+ *"2n-zn2")
test_tag_array+=("!ipsechw")
;;
- *"3n-dnv"*)
- test_tag_array+=("!memif")
- test_tag_array+=("!srv6_proxy")
- test_tag_array+=("!vhost")
- test_tag_array+=("!vts")
- test_tag_array+=("!drv_avf")
+ *"3n-alt")
+ test_tag_array+=("!ipsechw")
;;
- *"3n-tsh"*)
- # 3n-tsh only has x520 NICs which don't work with AVF
- test_tag_array+=("!drv_avf")
+ *"3n-icx")
test_tag_array+=("!ipsechw")
+ test_tag_array+=("!3_node_double_link_topoANDnic_intel-xxv710")
;;
- *"3n-hsw"*)
- test_tag_array+=("!drv_avf")
- # All cards have access to QAT. But only one card (xl710)
- # resides in same NUMA as QAT. Other cards must go over QPI
- # which we do not want to even run.
- test_tag_array+=("!ipsechwNOTnic_intel-xl710")
+ *"3n-snr")
;;
- *)
- # Default to 3n-hsw due to compatibility.
+ *"3n-icxd")
+ ;;
+ *"3na-spr")
+ ;;
+ *"3nb-spr")
+ ;;
+ *"3n-tsh")
test_tag_array+=("!drv_avf")
- test_tag_array+=("!ipsechwNOTnic_intel-xl710")
+ test_tag_array+=("!ipsechw")
+ ;;
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
+ test_tag_array+=("!ipsechw")
+ ;;
+ *"2n-c7gn" | *"3n-c7gn")
+ test_tag_array+=("!ipsechw")
+ ;;
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
+ test_tag_array+=("!ipsechw")
+ ;;
+ *"2n-x-"* | *"3n-x-"*)
;;
esac
@@ -942,17 +1157,17 @@ function select_tags () {
TAGS=()
prefix=""
-
- set +x
- if [[ "${TEST_CODE}" == "vpp-"* ]]; then
- # Automatic prefixing for VPP jobs to limit the NIC used and
- # traffic evaluation to MRR.
- if [[ "${TEST_TAG_STRING-}" == *"nic_"* ]]; then
- prefix="${prefix}mrrAND"
- else
- prefix="${prefix}mrrAND${default_nic}AND"
+ if [[ "${TEST_CODE}" != *"daily"* ]]; then
+ if [[ "${TEST_CODE}" == "vpp-"* ]]; then
+ if [[ "${TEST_CODE}" != *"device"* ]]; then
+ # Automatic prefixing for VPP perf jobs to limit the NIC used.
+ if [[ "${TEST_TAG_STRING-}" != *"nic_"* ]]; then
+ prefix="${default_nic}AND"
+ fi
+ fi
fi
fi
+ set +x
for tag in "${test_tag_array[@]}"; do
if [[ "${tag}" == "!"* ]]; then
# Exclude tags are not prefixed.
@@ -984,7 +1199,7 @@ function select_topology () {
# Variables read:
# - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
- # - FLAVOR - Node flavor string, currently either "hsw" or "skx".
+ # - FLAVOR - Node flavor string, e.g. "clx" or "skx".
# - CSIT_DIR - Path to existing root of local CSIT git repository.
# - TOPOLOGIES_DIR - Path to existing directory with available topologies.
# Variables set:
@@ -997,54 +1212,105 @@ function select_topology () {
case_text="${NODENESS}_${FLAVOR}"
case "${case_text}" in
- # TODO: Move tags to "# Blacklisting certain tags per topology" section.
- # TODO: Double link availability depends on NIC used.
- "1n_vbox")
+ "1n_aws")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*1n-aws*.yaml )
+ TOPOLOGIES_TAGS="1_node_single_link_topo"
+ ;;
+ "1n_c6in")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*1n-c6in*.yaml )
+ TOPOLOGIES_TAGS="1_node_single_link_topo"
+ ;;
+ "1n_alt" | "1n_spr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "1n_skx" | "1n_tx2")
+ "1n_vbox")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_skx")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_skx*.yaml )
- TOPOLOGIES_TAGS="2_node_*_link_topo"
+ "2n_aws")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-aws*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_zn2")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_zn2*.yaml )
- TOPOLOGIES_TAGS="2_node_*_link_topo"
+ "2n_c7gn")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-c7gn*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "3n_skx")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_skx*.yaml )
- TOPOLOGIES_TAGS="3_node_*_link_topo"
+ "2n_c6in")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-c6in*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
"2n_clx")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_clx*.yaml )
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_clx_*.yaml )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
+ "2n_icx")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_icx_*.yaml )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
+ "2n_spr")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_spr_*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
;;
- "2n_dnv")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_dnv*.yaml )
+ "2n_tx2")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_tx2_*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "3n_dnv")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_dnv*.yaml )
+ "2n_zn2")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_zn2_*.yaml )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
+ "3n_alt")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_alt_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_hsw")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_hsw*.yaml )
+ "3n_aws")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-aws*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
+ "3n_c7gn")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-c7gn*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
+ "3n_c6in")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-c6in*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
+ "3n_icx")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_icx_*.yaml )
+ # Trailing underscore is needed to distinguish from 3n_icxd.
+ TOPOLOGIES_TAGS="3_node_*_link_topo"
+ ;;
+ "3n_icxd")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_icxd_*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
+ "3n_snr")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_snr_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
"3n_tsh")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh*.yaml )
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "2n_tx2")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_tx2*.yaml )
+ "3na_spr")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3na_spr_*.yaml )
+ TOPOLOGIES_TAGS="3_node_*_link_topo"
+ ;;
+ "3nb_spr")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3nb_spr_*.yaml )
+ TOPOLOGIES_TAGS="3_node_*_link_topo"
+ ;;
+ "2n_x"*)
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_"${FLAVOR}"*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
+ "3n_x"*)
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_"${FLAVOR}"*.yaml )
+ TOPOLOGIES_TAGS="3_node_single_link_topo"
+ ;;
*)
- # No falling back to 3n_hsw default, that should have been done
+ # No falling back to default, that should have been done
# by the function which has set NODENESS and FLAVOR.
die "Unknown specification: ${case_text}"
esac
@@ -1055,66 +1321,54 @@ function select_topology () {
}
-function select_vpp_device_tags () {
+function set_environment_variables () {
+ # Depending on testbed topology, overwrite defaults set in the
+ # resources/libraries/python/Constants.py file
+ #
+ # Only to be called from the reservation function,
+ # as resulting values may change based on topology data.
+ #
# Variables read:
# - TEST_CODE - String affecting test selection, usually jenkins job name.
- # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
- # Can be unset.
# Variables set:
- # - TAGS - Array of processed tag boolean expressions.
+ # See specific cases
set -exuo pipefail
case "${TEST_CODE}" in
- # Select specific device tests based on jenkins job type variable.
- * )
- if [[ -z "${TEST_TAG_STRING-}" ]]; then
- # If nothing is specified, we will run pre-selected tests by
- # following tags. Items of array will be concatenated by OR
- # in Robot Framework.
- test_tag_array=()
- else
- # If trigger contains tags, split them into array.
- test_tag_array=(${TEST_TAG_STRING//:/ })
- fi
- SELECTION_MODE="--include"
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
+ export TREX_RX_DESCRIPTORS_COUNT=1024
+ export TREX_EXTRA_CMDLINE="--mbuf-factor 19"
+ export TREX_CORE_COUNT=6
+ # Settings to prevent duration stretching.
+ export PERF_TRIAL_STL_DELAY=0.1
;;
- esac
-
- # Blacklisting certain tags per topology.
- #
- # Reasons for blacklisting:
- # - avf - AVF is not possible to run on enic driver of VirtualBox.
- # - vhost - VirtualBox does not support nesting virtualization on Intel CPU.
- case "${TEST_CODE}" in
- *"1n-vbox"*)
- test_tag_array+=("!avf")
- test_tag_array+=("!vhost")
+ *"2n-c7gn" | *"3n-c7gn")
+ export TREX_RX_DESCRIPTORS_COUNT=1024
+ export TREX_EXTRA_CMDLINE="--mbuf-factor 19"
+ export TREX_CORE_COUNT=6
+ # Settings to prevent duration stretching.
+ export PERF_TRIAL_STL_DELAY=0.1
;;
- *)
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
+ export TREX_RX_DESCRIPTORS_COUNT=1024
+ export TREX_EXTRA_CMDLINE="--mbuf-factor 19"
+ export TREX_CORE_COUNT=6
+ # Settings to prevent duration stretching.
+ export PERF_TRIAL_STL_DELAY=0.1
+ ;;
+ *"2n-zn2")
+ # Maciek's workaround for Zen2 with lower amount of cores.
+ export TREX_CORE_COUNT=14
+ ;;
+ *"2n-x-"* | *"3n-x-"* )
+ export TREX_CORE_COUNT=2
;;
esac
-
- TAGS=()
-
- # We will prefix with devicetest to prevent running other tests
- # (e.g. Functional).
- prefix="devicetestAND"
- if [[ "${TEST_CODE}" == "vpp-"* ]]; then
- # Automatic prefixing for VPP jobs to limit testing.
- prefix="${prefix}"
- fi
- for tag in "${test_tag_array[@]}"; do
- if [[ ${tag} == "!"* ]]; then
- # Exclude tags are not prefixed.
- TAGS+=("${tag}")
- else
- TAGS+=("${prefix}${tag}")
- fi
- done
}
+
function untrap_and_unreserve_testbed () {
# Use this as a trap function to ensure testbed does not remain reserved.
@@ -1129,7 +1383,8 @@ function untrap_and_unreserve_testbed () {
# Variables read (by inner function):
# - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
# - PYTHON_SCRIPTS_DIR - Path to directory holding Python scripts.
- # Variables written:
+ # Variables set:
+ # - TERRAFORM_MODULE_DIR - Terraform module directory.
# - WORKING_TOPOLOGY - Set to empty string on successful unreservation.
# Trap unregistered:
# - EXIT - Failure to untrap is reported, but ignored otherwise.
@@ -1149,6 +1404,22 @@ function untrap_and_unreserve_testbed () {
python3 "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
die "${1:-FAILED TO UNRESERVE, FIX MANUALLY.}" 2
}
+ case "${TEST_CODE}" in
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-${FLAVOR}-c5n"
+ terraform_destroy || die "Failed to call terraform destroy."
+ ;;
+ *"2n-c7gn" | *"3n-c7gn")
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-${FLAVOR}"
+ terraform_destroy || die "Failed to call terraform destroy."
+ ;;
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
+ TERRAFORM_MODULE_DIR="terraform-aws-${NODENESS}-${FLAVOR}"
+ terraform_destroy || die "Failed to call terraform destroy."
+ ;;
+ *)
+ ;;
+ esac
WORKING_TOPOLOGY=""
set -eu
fi
diff --git a/resources/libraries/bash/function/device.sh b/resources/libraries/bash/function/device.sh
index cd987cafd2..4d39cd2de6 100644
--- a/resources/libraries/bash/function/device.sh
+++ b/resources/libraries/bash/function/device.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -30,6 +30,7 @@ function activate_wrapper () {
enter_mutex || die
get_available_interfaces "${1}" "${2}" || die
+ bind_dut_interfaces_to_vpp_driver || die
start_topology_containers "${3}" || die
bind_interfaces_to_containers || die
set_env_variables || die
@@ -38,6 +39,29 @@ function activate_wrapper () {
}
+function bind_dut_interfaces_to_vpp_driver () {
+
+ # Bind DUT network interfaces to the driver that vpp will use
+ #
+ # Variables read:
+ # - DUT1_NETDEVS - List of network devices allocated to DUT1 container.
+ # Variables set:
+ # - NETDEV - Linux network interface.
+ # - DRIVER - Kernel driver to bind the interface to.
+ # - KRN_DRIVER - The original kernel driver of the network interface.
+
+ for NETDEV in "${DUT1_NETDEVS[@]}"; do
+ get_pci_addr || die
+ get_krn_driver || die
+ if [[ ${KRN_DRIVER} == "iavf" ]]; then
+ DRIVER="vfio-pci"
+ ADDR=${PCI_ADDR}
+ bind_interfaces_to_driver || die
+ fi
+ done
+}
+
+
function bind_interfaces_to_containers () {
# Bind linux network interface to container and create symlink for PCI
@@ -51,36 +75,42 @@ function bind_interfaces_to_containers () {
# - TG_NETDEVS - List of network devices allocated to TG container.
# Variables set:
# - NETDEV - Linux network interface.
+ # - KRN_DRIVER - Kernel driver of network device.
set -exuo pipefail
- for NETDEV in "${TG_NETDEVS[@]}"; do
- get_pci_addr || die
+ for PCI_ADDR in "${TG_PCIDEVS[@]}"; do
+ get_netdev_name || die
link_target=$(readlink -f /sys/bus/pci/devices/"${PCI_ADDR}") || {
die "Reading symlink for PCI address failed!"
}
cmd="ln -s ${link_target} /sys/bus/pci/devices/${PCI_ADDR}"
- sudo ip link set ${NETDEV} netns ${DCR_CPIDS[tg]} || {
- die "Moving interface to ${DCR_CPIDS[tg]} namespace failed!"
- }
docker exec "${DCR_UUIDS[tg]}" ${cmd} || {
die "Linking PCI address in container failed!"
}
+
+ sudo ip link set ${NETDEV} netns ${DCR_CPIDS[tg]} || {
+ die "Moving interface to ${DCR_CPIDS[tg]} namespace failed!"
+ }
done
- for NETDEV in "${DUT1_NETDEVS[@]}"; do
- get_pci_addr || die
+ for PCI_ADDR in "${DUT1_PCIDEVS[@]}"; do
link_target=$(readlink -f /sys/bus/pci/devices/"${PCI_ADDR}") || {
die "Reading symlink for PCI address failed!"
}
cmd="ln -s ${link_target} /sys/bus/pci/devices/${PCI_ADDR}"
- sudo ip link set ${NETDEV} netns ${DCR_CPIDS[dut1]} || {
- die "Moving interface to ${DCR_CPIDS[dut1]} namespace failed!"
- }
- docker exec "${DCR_UUIDS[dut1]}" ${cmd} || {
+ docker exec "${DCR_UUIDS[dut1]}" ${cmd} || {
die "Linking PCI address in container failed!"
}
+
+ get_krn_driver
+ if [[ ${KRN_DRIVER} != "vfio-pci" ]]; then
+ get_netdev_name || die
+ sudo ip link set ${NETDEV} netns ${DCR_CPIDS[dut1]} || {
+ die "Moving interface to ${DCR_CPIDS[dut1]} namespace failed!"
+ }
+ fi
done
}
@@ -99,13 +129,22 @@ function bind_interfaces_to_driver () {
pci_path="/sys/bus/pci/devices/${ADDR}"
drv_path="/sys/bus/pci/drivers/${DRIVER}"
if [ -d "${pci_path}/driver" ]; then
- echo ${ADDR} | sudo tee ${pci_path}/driver/unbind || {
+ echo ${ADDR} | sudo tee ${pci_path}/driver/unbind > /dev/null || {
die "Failed to unbind interface ${ADDR}!"
}
fi
- echo ${ADDR} | sudo tee ${drv_path}/bind || {
+
+ echo ${DRIVER} | sudo tee /sys/bus/pci/devices/${ADDR}/driver_override \
+ > /dev/null || {
+ die "Failed to override driver to ${DRIVER} for ${ADDR}!"
+ }
+
+ echo ${ADDR} | sudo tee ${drv_path}/bind > /dev/null || {
die "Failed to bind interface ${ADDR}!"
}
+
+ echo | sudo tee /sys/bus/pci/devices/${ADDR}/driver_override > /dev/null \
+ || die "Failed to reset driver override for ${ADDR}!"
}
@@ -134,13 +173,17 @@ function clean_environment () {
}
# Rebind interfaces back to kernel drivers.
+ i=0
for ADDR in ${TG_PCIDEVS[@]}; do
- DRIVER="${TG_DRIVERS[0]}"
+ DRIVER="${TG_DRIVERS[${i}]}"
bind_interfaces_to_driver || die
+ ((i++))
done
+ i=0
for ADDR in ${DUT1_PCIDEVS[@]}; do
- DRIVER="${DUT1_DRIVERS[0]}"
+ DRIVER="${DUT1_DRIVERS[${i}]}"
bind_interfaces_to_driver || die
+ ((i++))
done
}
@@ -245,16 +288,20 @@ function get_available_interfaces () {
# - DUT1_PCIDEVS - List of PCI addresses allocated to DUT1 container.
# - DUT1_NETMACS - List of MAC addresses allocated to DUT1 container.
# - DUT1_DRIVERS - List of interface drivers to DUT1 container.
+ # - DUT1_VLANS - List of interface vlans to TG container.
+ # - DUT1_MODEL - List of interface models to TG container.
# - TG_NETDEVS - List of network devices allocated to TG container.
# - TG_PCIDEVS - List of PCI addresses allocated to TG container.
# - TG_NETMACS - List of MAC addresses allocated to TG container.
# - TG_DRIVERS - List of interface drivers to TG container.
+ # - TG_VLANS - List of interface vlans to TG container.
+ # - TG_MODEL - List of interface models to TG container.
set -exuo pipefail
- # Following code is specifing VFs ID based on nodeness and flavor.
+ # Following code is specifying VFs ID based on nodeness and flavor.
# As there is great variability in hardware configuration outside LF,
- # from bootstrap architecure point of view these are considered as flavors.
+ # from bootstrap architecture point of view these are considered as flavors.
# Anyone can override flavor for its own machine and add condition here.
# See http://pci-ids.ucw.cz/v2.2/pci.ids for more info.
case_text="${1}_${2}"
@@ -262,22 +309,30 @@ function get_available_interfaces () {
"1n_skx")
# Add Intel Corporation XL710/X710 Virtual Function to the
# whitelist.
- pci_id="0x154c"
- tg_netdev=(enp24)
- dut1_netdev=(enp59)
+ # Add Intel Corporation E810 Virtual Function to the
+ # whitelist.
+ pci_id="0x154c\|0x1889"
+ tg_netdev=(ens1 enp134)
+ dut1_netdev=(ens5 enp175)
+ ports_per_nic=2
;;
- "1n_tx2")
+ "1n_alt")
# Add Intel Corporation XL710/X710 Virtual Function to the
# whitelist.
- pci_id="0x154c"
- tg_netdev=(enp5s2 enp5s3 enp5s4 enp5s5
- enp5s6 enp5s7 enp5s8 enp5s9)
- tg_netdev+=(enp5s10 enp5s11 enp5s12 enp5s13
- enp5s14 enp5s15 enp5s16 enp5s17)
- dut1_netdev=(enp145s2 enp145s3 enp145s4 enp145s5
- enp145s6 enp145s7 enp145s8 enp145s9)
- dut1_netdev+=(enp145s10 enp145s11 enp145s12 enp145s13
- enp145s14 enp145s15 enp145s16 enp145s17)
+ # Add MT2892 Family [ConnectX-6 Dx] Virtual Function to the
+ # whitelist.
+ pci_id="0x154c\|0x101e"
+ tg_netdev=(enp1s0f0 enp1s0f1 enP1p1s0f0)
+ dut1_netdev=(enP3p2s0f0 enP3p2s0f1 enP1p1s0f1)
+ ports_per_nic=2
+ ;;
+ "1n_spr")
+ # Add Intel Corporation E810 Virtual Function to the
+ # whitelist.
+ pci_id="0x1889"
+ tg_netdev=(enp42s0 enp44s0)
+ dut1_netdev=(enp63s0 enp61s0)
+ ports_per_nic=1
;;
"1n_vbox")
# Add Intel Corporation 82545EM Gigabit Ethernet Controller to the
@@ -285,43 +340,47 @@ function get_available_interfaces () {
pci_id="0x100f"
tg_netdev=(enp0s8 enp0s9)
dut1_netdev=(enp0s16 enp0s17)
+ ports_per_nic=1
;;
*)
die "Unknown specification: ${case_text}!"
esac
- device_count=2
-
# TG side of connections.
TG_NETDEVS=()
TG_PCIDEVS=()
TG_NETMACS=()
TG_DRIVERS=()
TG_VLANS=()
+ TG_MODEL=()
# DUT1 side of connections.
DUT1_NETDEVS=()
DUT1_PCIDEVS=()
DUT1_NETMACS=()
DUT1_DRIVERS=()
DUT1_VLANS=()
+ DUT1_MODEL=()
# Find the first ${device_count} number of available TG Linux network
# VF device names. Only allowed VF PCI IDs are filtered.
for netdev in ${tg_netdev[@]}
do
+ ports=0
for netdev_path in $(grep -l "${pci_id}" \
/sys/class/net/${netdev}*/device/device \
2> /dev/null)
do
- if [[ ${#TG_NETDEVS[@]} -lt ${device_count} ]]; then
+ if [[ ${ports} -lt ${ports_per_nic} ]]; then
tg_netdev_name=$(dirname ${netdev_path})
tg_netdev_name=$(dirname ${tg_netdev_name})
TG_NETDEVS+=($(basename ${tg_netdev_name}))
+ ((ports++))
else
break
fi
done
- if [[ ${#TG_NETDEVS[@]} -eq ${device_count} ]]; then
+ ports_per_device=$((${ports_per_nic}*${#tg_netdev[@]}))
+ if [[ ${#TG_NETDEVS[@]} -eq ${ports_per_device} ]]; then
break
fi
done
@@ -345,29 +404,30 @@ function get_available_interfaces () {
get_mac_addr
get_krn_driver
get_vlan_filter
+ get_csit_model
TG_PCIDEVS+=(${PCI_ADDR})
TG_NETMACS+=(${MAC_ADDR})
TG_DRIVERS+=(${KRN_DRIVER})
TG_VLANS+=(${VLAN_ID})
+ TG_MODELS+=(${MODEL})
done
for NETDEV in "${DUT1_NETDEVS[@]}"; do
get_pci_addr
get_mac_addr
get_krn_driver
get_vlan_filter
+ get_csit_model
DUT1_PCIDEVS+=(${PCI_ADDR})
DUT1_NETMACS+=(${MAC_ADDR})
DUT1_DRIVERS+=(${KRN_DRIVER})
DUT1_VLANS+=(${VLAN_ID})
+ DUT1_MODELS+=(${MODEL})
done
# We need at least two interfaces for TG/DUT1 for building topology.
- if [ "${#TG_NETDEVS[@]}" -ne 2 ] || [ "${#DUT1_NETDEVS[@]}" -ne 2 ]; then
+ if [ "${#TG_NETDEVS[@]}" -lt 2 ] || [ "${#DUT1_NETDEVS[@]}" -lt 2 ]; then
die "Not enough linux network interfaces found!"
fi
- if [ "${#TG_PCIDEVS[@]}" -ne 2 ] || [ "${#DUT1_PCIDEVS[@]}" -ne 2 ]; then
- die "Not enough pci interfaces found!"
- fi
}
@@ -408,6 +468,57 @@ function get_mac_addr () {
}
+function get_netdev_name () {
+
+ # Get Linux network device name.
+ #
+ # Variables read:
+ # - PCI_ADDR - PCI address of the device.
+ # Variables set:
+ # - NETDEV - Linux network device name.
+
+ set -exuo pipefail
+
+ if [ -d /sys/bus/pci/devices/${PCI_ADDR}/net ]; then
+ NETDEV="$(basename /sys/bus/pci/devices/${PCI_ADDR}/net/*)" || {
+ die "Failed to get Linux interface name of ${PCI_ADDR}"
+ }
+ fi
+}
+
+
+function get_csit_model () {
+
+ # Get CSIT model name from linux network device name.
+ #
+ # Variables read:
+ # - NETDEV - Linux network device name.
+ # Variables set:
+ # - MODEL - CSIT model name of network device.
+
+ set -exuo pipefail
+
+ if [ -d /sys/class/net/${NETDEV}/device ]; then
+ ID="$(</sys/class/net/${NETDEV}/device/device)" || {
+ die "Failed to get device id of linux network interface!"
+ }
+ case "${ID}" in
+ "0x1592"|"0x1889")
+ MODEL="Intel-E810CQ"
+ ;;
+ "0x1572"|"0x154c")
+ MODEL="Intel-X710"
+ ;;
+ "0x101e")
+ MODEL="Mellanox-CX6DX"
+ ;;
+ *)
+ MODEL="virtual"
+ esac
+ fi
+}
+
+
function get_pci_addr () {
# Get PCI address in <domain>:<bus:<device>.<func> format from linux network
@@ -424,13 +535,33 @@ function get_pci_addr () {
PCI_ADDR=$(basename $(readlink /sys/class/net/${NETDEV}/device)) || {
die "Failed to get PCI address of linux network interface!"
}
- fi
- if [ ! -d /sys/bus/pci/devices/${PCI_ADDR} ]; then
- die "PCI device ${NETDEV} doesn't exist!"
+ if [ ! -d /sys/bus/pci/devices/${PCI_ADDR} ]; then
+ die "PCI device ${PCI_ADDR} doesn't exist!"
+ fi
+ else
+ die "Can't get device info of interface ${NETDEV}!"
fi
}
+function get_vfio_group () {
+
+ # Get the VFIO group of a pci device.
+ #
+ # Variables read:
+ # - PCI_ADDR - PCI address of a device.
+ # Variables set:
+ # - VFIO_GROUP - The VFIO group of the PCI device.
+
+ if [[ -d /sys/bus/pci/devices/${PCI_ADDR}/iommu_group ]]; then
+ VFIO_GROUP="$(basename\
+ $(readlink /sys/bus/pci/devices/${PCI_ADDR}/iommu_group)\
+ )" || {
+ die "PCI device ${PCI_ADDR} does not have an iommu group!"
+ }
+ fi
+}
+
function get_vlan_filter () {
# Get VLAN stripping filter from PF searched by mac adress.
@@ -467,6 +598,55 @@ function installed () {
}
+function parse_env_variables () {
+
+ # Parse environment variables.
+ #
+ # Variables read, set or exported: Multiple,
+ # see the code for the current list.
+
+ set -exuo pipefail
+
+ IFS=@ read -a TG_NETMACS <<< "${CSIT_TG_INTERFACES_PORT_MAC}"
+ IFS=@ read -a TG_PCIDEVS <<< "${CSIT_TG_INTERFACES_PORT_PCI}"
+ IFS=@ read -a TG_DRIVERS <<< "${CSIT_TG_INTERFACES_PORT_DRV}"
+ IFS=@ read -a TG_VLANS <<< "${CSIT_TG_INTERFACES_PORT_VLAN}"
+ IFS=@ read -a TG_MODELS <<< "${CSIT_TG_INTERFACES_PORT_MODEL}"
+ IFS=@ read -a DUT1_NETMACS <<< "${CSIT_DUT1_INTERFACES_PORT_MAC}"
+ IFS=@ read -a DUT1_PCIDEVS <<< "${CSIT_DUT1_INTERFACES_PORT_PCI}"
+ IFS=@ read -a DUT1_DRIVERS <<< "${CSIT_DUT1_INTERFACES_PORT_DRV}"
+ IFS=@ read -a DUT1_VLANS <<< "${CSIT_DUT1_INTERFACES_PORT_VLAN}"
+ IFS=@ read -a DUT1_MODELS <<< "${CSIT_DUT1_INTERFACES_PORT_MODEL}"
+
+ for port in $(seq "${#TG_NETMACS[*]}"); do
+ CSIT_TG_INTERFACES+=$(cat << EOF
+ port$((port-1)):
+ mac_address: "${TG_NETMACS[$((port-1))]}"
+ pci_address: "${TG_PCIDEVS[$((port-1))]}"
+ link: "link$((port-1))"
+ model: ${TG_MODELS[$((port-1))]}
+ driver: "${TG_DRIVERS[$((port-1))]}"
+ vlan: ${TG_VLANS[$((port-1))]}
+EOF
+ )
+ CSIT_TG_INTERFACES+=$'\n'
+ done
+ for port in $(seq "${#DUT1_NETMACS[*]}"); do
+ CSIT_DUT1_INTERFACES+=$(cat << EOF
+ port$((port-1)):
+ mac_address: "${DUT1_NETMACS[$((port-1))]}"
+ pci_address: "${DUT1_PCIDEVS[$((port-1))]}"
+ link: "link$((port-1))"
+ model: ${DUT1_MODELS[$((port-1))]}
+ driver: "${DUT1_DRIVERS[$((port-1))]}"
+ vlan: ${DUT1_VLANS[$((port-1))]}
+EOF
+ )
+ CSIT_DUT1_INTERFACES+=$'\n'
+ done
+}
+
+
function print_env_variables () {
# Get environment variables prefixed by CSIT_.
@@ -485,7 +665,6 @@ function read_env_variables () {
# - ${@} - Variables passed as an argument.
# Variables read, set or exported: Multiple,
# see the code for the current list.
- # TODO: Do we need to list them and their meanings?
set -exuo pipefail
@@ -495,18 +674,17 @@ function read_env_variables () {
declare -gA DCR_UUIDS
DCR_UUIDS+=([tg]="${CSIT_TG_UUID}")
DCR_UUIDS+=([dut1]="${CSIT_DUT1_UUID}")
- TG_PCIDEVS=("${CSIT_TG_INTERFACES_PORT1_PCI}")
- TG_DRIVERS=("${CSIT_TG_INTERFACES_PORT1_DRV}")
- TG_VLANS+=("${CSIT_TG_INTERFACES_PORT1_VLAN}")
- TG_PCIDEVS+=("${CSIT_TG_INTERFACES_PORT2_PCI}")
- TG_DRIVERS+=("${CSIT_TG_INTERFACES_PORT2_DRV}")
- TG_VLANS+=("${CSIT_TG_INTERFACES_PORT2_VLAN}")
- DUT1_PCIDEVS=("${CSIT_DUT1_INTERFACES_PORT1_PCI}")
- DUT1_DRIVERS=("${CSIT_DUT1_INTERFACES_PORT1_DRV}")
- DUT1_VLANS+=("${CSIT_DUT1_INTERFACES_PORT1_VLAN}")
- DUT1_PCIDEVS+=("${CSIT_DUT1_INTERFACES_PORT2_PCI}")
- DUT1_DRIVERS+=("${CSIT_DUT1_INTERFACES_PORT2_DRV}")
- DUT1_VLANS+=("${CSIT_DUT1_INTERFACES_PORT2_VLAN}")
+
+ IFS=@ read -a TG_NETMACS <<< "${CSIT_TG_INTERFACES_PORT_MAC}"
+ IFS=@ read -a TG_PCIDEVS <<< "${CSIT_TG_INTERFACES_PORT_PCI}"
+ IFS=@ read -a TG_DRIVERS <<< "${CSIT_TG_INTERFACES_PORT_DRV}"
+ IFS=@ read -a TG_VLANS <<< "${CSIT_TG_INTERFACES_PORT_VLAN}"
+ IFS=@ read -a TG_MODELS <<< "${CSIT_TG_INTERFACES_PORT_MODEL}"
+ IFS=@ read -a DUT1_NETMACS <<< "${CSIT_DUT1_INTERFACES_PORT_MAC}"
+ IFS=@ read -a DUT1_PCIDEVS <<< "${CSIT_DUT1_INTERFACES_PORT_PCI}"
+ IFS=@ read -a DUT1_DRIVERS <<< "${CSIT_DUT1_INTERFACES_PORT_DRV}"
+ IFS=@ read -a DUT1_VLANS <<< "${CSIT_DUT1_INTERFACES_PORT_VLAN}"
+ IFS=@ read -a DUT1_MODELS <<< "${CSIT_DUT1_INTERFACES_PORT_MODEL}"
}
@@ -517,13 +695,18 @@ function set_env_variables () {
# Variables read:
# - DCR_UUIDS - Docker Container UUIDs.
# - DCR_PORTS - Docker Container's SSH ports.
- # - DUT1_NETMACS - List of network devices MAC addresses of DUT1 container.
- # - DUT1_PCIDEVS - List of PCI addresses of devices of DUT1 container.
+ # - DUT1_NETDEVS - List of network devices allocated to DUT1 container.
+ # - DUT1_PCIDEVS - List of PCI addresses allocated to DUT1 container.
+ # - DUT1_NETMACS - List of MAC addresses allocated to DUT1 container.
# - DUT1_DRIVERS - List of interface drivers to DUT1 container.
- # - TG_NETMACS - List of network devices MAC addresses of TG container.
- # - TG_PCIDEVS - List of PCI addresses of devices of TG container.
+ # - DUT1_VLANS - List of interface vlans to TG container.
+ # - DUT1_MODEL - List of interface models to TG container.
+ # - TG_NETDEVS - List of network devices allocated to TG container.
+ # - TG_PCIDEVS - List of PCI addresses allocated to TG container.
+ # - TG_NETMACS - List of MAC addresses allocated to TG container.
# - TG_DRIVERS - List of interface drivers to TG container.
- # Variables set: TODO.
+ # - TG_VLANS - List of interface vlans to TG container.
+ # - TG_MODEL - List of interface models to TG container.
set -exuo pipefail
@@ -531,7 +714,7 @@ function set_env_variables () {
CSIT_TG_HOST="$(hostname --all-ip-addresses | awk '{print $1}')" || {
die "Reading hostname IP address failed!"
}
- CSIT_TG_PORT="${DCR_PORTS[tg]#*:}"
+ CSIT_TG_PORT="${DCR_PORTS[tg]##*:}"
CSIT_TG_UUID="${DCR_UUIDS[tg]}"
CSIT_TG_ARCH="$(uname -i)" || {
die "Reading machine architecture failed!"
@@ -539,28 +722,25 @@ function set_env_variables () {
CSIT_DUT1_HOST="$(hostname --all-ip-addresses | awk '{print $1}')" || {
die "Reading hostname IP address failed!"
}
- CSIT_DUT1_PORT="${DCR_PORTS[dut1]#*:}"
+ CSIT_DUT1_PORT="${DCR_PORTS[dut1]##*:}"
CSIT_DUT1_UUID="${DCR_UUIDS[dut1]}"
CSIT_DUT1_ARCH="$(uname -i)" || {
die "Reading machine architecture failed!"
}
- CSIT_TG_INTERFACES_PORT1_MAC="${TG_NETMACS[0]}"
- CSIT_TG_INTERFACES_PORT1_PCI="${TG_PCIDEVS[0]}"
- CSIT_TG_INTERFACES_PORT1_DRV="${TG_DRIVERS[0]}"
- CSIT_TG_INTERFACES_PORT1_VLAN="${TG_VLANS[0]}"
- CSIT_TG_INTERFACES_PORT2_MAC="${TG_NETMACS[1]}"
- CSIT_TG_INTERFACES_PORT2_PCI="${TG_PCIDEVS[1]}"
- CSIT_TG_INTERFACES_PORT2_DRV="${TG_DRIVERS[1]}"
- CSIT_TG_INTERFACES_PORT2_VLAN="${TG_VLANS[1]}"
- CSIT_DUT1_INTERFACES_PORT1_MAC="${DUT1_NETMACS[0]}"
- CSIT_DUT1_INTERFACES_PORT1_PCI="${DUT1_PCIDEVS[0]}"
- CSIT_DUT1_INTERFACES_PORT1_DRV="${DUT1_DRIVERS[0]}"
- CSIT_DUT1_INTERFACES_PORT1_VLAN="${DUT1_VLANS[0]}"
- CSIT_DUT1_INTERFACES_PORT2_MAC="${DUT1_NETMACS[1]}"
- CSIT_DUT1_INTERFACES_PORT2_PCI="${DUT1_PCIDEVS[1]}"
- CSIT_DUT1_INTERFACES_PORT2_DRV="${DUT1_DRIVERS[1]}"
- CSIT_DUT1_INTERFACES_PORT2_VLAN="${DUT1_VLANS[1]}"
+ OIFS="$IFS" IFS=@
+ set -a
+ CSIT_TG_INTERFACES_PORT_MAC="${TG_NETMACS[*]}"
+ CSIT_TG_INTERFACES_PORT_PCI="${TG_PCIDEVS[*]}"
+ CSIT_TG_INTERFACES_PORT_DRV="${TG_DRIVERS[*]}"
+ CSIT_TG_INTERFACES_PORT_VLAN="${TG_VLANS[*]}"
+ CSIT_TG_INTERFACES_PORT_MODEL="${TG_MODELS[*]}"
+ CSIT_DUT1_INTERFACES_PORT_MAC="${DUT1_NETMACS[*]}"
+ CSIT_DUT1_INTERFACES_PORT_PCI="${DUT1_PCIDEVS[*]}"
+ CSIT_DUT1_INTERFACES_PORT_DRV="${DUT1_DRIVERS[*]}"
+ CSIT_DUT1_INTERFACES_PORT_VLAN="${DUT1_VLANS[*]}"
+ CSIT_DUT1_INTERFACES_PORT_MODEL="${DUT1_MODELS[*]}"
set +a
+ IFS="$OIFS"
}
@@ -598,9 +778,21 @@ function start_topology_containers () {
# Override access to PCI bus by attaching a filesystem mount to the
# container.
dcr_stc_params+="--mount type=tmpfs,destination=/sys/bus/pci/devices "
- # Mount vfio to be able to bind to see bound interfaces. We cannot use
- # --device=/dev/vfio as this does not see newly bound interfaces.
- dcr_stc_params+="--volume /dev/vfio:/dev/vfio "
+ # Mount vfio devices to be able to use VFs inside the container.
+ vfio_bound="false"
+ for PCI_ADDR in ${DUT1_PCIDEVS[@]}; do
+ get_krn_driver
+ if [[ ${KRN_DRIVER} == "vfio-pci" ]]; then
+ get_vfio_group
+ dcr_stc_params+="--device /dev/vfio/${VFIO_GROUP} "
+ vfio_bound="true"
+ fi
+ done
+ if ! ${vfio_bound}; then
+ dcr_stc_params+="--volume /dev/vfio:/dev/vfio "
+ fi
+ # Disable manipulation with hugepages by VPP.
+ dcr_stc_params+="--volume /dev/null:/etc/sysctl.d/80-vpp.conf "
# Mount docker.sock to be able to use docker deamon of the host.
dcr_stc_params+="--volume /var/run/docker.sock:/var/run/docker.sock "
# Mount /opt/boot/ where VM kernel and initrd are located.
diff --git a/resources/libraries/bash/function/dpdk.sh b/resources/libraries/bash/function/dpdk.sh
index 0b148aa83d..86abb84a02 100644
--- a/resources/libraries/bash/function/dpdk.sh
+++ b/resources/libraries/bash/function/dpdk.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -86,24 +86,18 @@ function dpdk_compile () {
pushd "${DPDK_DIR}" || die "Pushd failed"
- # Patch ARM.
- sed_cmd="s/'RTE_MAX_LCORE', [0-9]*/'RTE_MAX_LCORE', $(nproc --all)/"
- sed_file="config/arm/meson.build"
- sed -i "${sed_cmd}" "${sed_file}" || die "Patch failed"
+ # enable l3fwd
+ meson_options="-Dexamples=l3fwd "
- # Patch L3FWD.
- sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 1024/g"
- sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 1024/g"
- sed_file="./main.c"
- pushd examples/l3fwd || die "Pushd failed"
- sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
- sed -i "${sed_txd}" "${sed_file}" || die "Patch failed"
- popd || die "Popd failed"
+ # i40e specific options
+ meson_options="${meson_options} \
+ -Dc_args=-DRTE_LIBRTE_I40E_16BYTE_RX_DESC=y"
+
+ # Configure generic build - the same used by VPP
+ meson_options="${meson_options} -Dplatform=generic"
# Compile using Meson and Ninja.
- export CFLAGS=""
- CFLAGS+="-DRTE_LIBRTE_I40E_16BYTE_RX_DESC=y"
- meson -Dexamples=l3fwd build || {
+ meson setup ${meson_options} build || {
die "Failed to compile DPDK!"
}
ninja -C build || die "Failed to compile DPDK!"
@@ -196,7 +190,6 @@ function dpdk_l3fwd_compile () {
#
# Variables read:
# - DPDK_DIR - Path to DPDK framework.
- # - CSIT_DIR - Path to CSIT framework.
# Functions called:
# - die - Print to stderr and exit.
@@ -204,12 +197,7 @@ function dpdk_l3fwd_compile () {
pushd "${DPDK_DIR}" || die "Pushd failed"
# Patch L3FWD.
- sed_rxd="s/^#define RTE_TEST_RX_DESC_DEFAULT 128/#define RTE_TEST_RX_DESC_DEFAULT 2048/g"
- sed_txd="s/^#define RTE_TEST_TX_DESC_DEFAULT 512/#define RTE_TEST_TX_DESC_DEFAULT 2048/g"
- sed_file="./main.c"
pushd examples/l3fwd || die "Pushd failed"
- sed -i "${sed_rxd}" "${sed_file}" || die "Patch failed"
- sed -i "${sed_txd}" "${sed_file}" || die "Patch failed"
chmod +x ${1} && source ${1} || die "Patch failed"
popd || die "Popd failed"
@@ -238,6 +226,28 @@ function dpdk_l3fwd () {
for attempt in {1..60}; do
echo "Checking if l3fwd is alive, attempt nr ${attempt}"
if fgrep "L3FWD: entering main loop on lcore" screenlog.0; then
+ cat screenlog.0
+ exit 0
+ fi
+ sleep 1
+ done
+ cat screenlog.0
+
+ exit 1
+}
+
+
+function dpdk_l3fwd_check () {
+
+ # DPDK l3fwd check state.
+
+ set -exuo pipefail
+
+ for attempt in {1..60}; do
+ echo "Checking if l3fwd state is ok, attempt nr ${attempt}"
+ if fgrep "Link up" screenlog.0; then
+ cat screenlog.0
+ dpdk_l3fwd_pid
exit 0
fi
sleep 1
@@ -248,6 +258,16 @@ function dpdk_l3fwd () {
}
+function dpdk_l3fwd_pid () {
+ l3fwd_pid="$(pidof dpdk-l3fwd)"
+ if [ ! -z "${l3fwd_pid}" ]; then
+ echo "L3fwd process ID: ${l3fwd_pid}"
+ else
+ echo "L3fwd not running!"
+ fi
+}
+
+
function dpdk_precheck () {
# Precheck system settings (nr_hugepages, max_map_count).
@@ -292,9 +312,10 @@ function dpdk_testpmd () {
for attempt in {1..60}; do
echo "Checking if testpmd is alive, attempt nr ${attempt}"
- if fgrep "Press enter to exit" screenlog.0; then
- cat screenlog.0
- exit 0
+ if fgrep "Press enter to exit" screenlog.0; then
+ cat screenlog.0
+ dpdk_testpmd_pid
+ exit 0
fi
sleep 1
done
@@ -302,3 +323,33 @@ function dpdk_testpmd () {
exit 1
}
+
+
+function dpdk_testpmd_check () {
+
+ # DPDK testpmd check links state.
+
+ set -exuo pipefail
+
+ for attempt in {1..60}; do
+ echo "Checking if testpmd links state changed, attempt nr ${attempt}"
+ if fgrep "link state change event" screenlog.0; then
+ cat screenlog.0
+ exit 0
+ fi
+ sleep 1
+ done
+ cat screenlog.0
+
+ exit 1
+}
+
+
+function dpdk_testpmd_pid () {
+ testpmd_pid="$(pidof dpdk-testpmd)"
+ if [ ! -z "${testpmd_pid}" ]; then
+ echo "Testpmd process ID: ${testpmd_pid}"
+ else
+ echo "Testpmd not running!"
+ fi
+}
diff --git a/resources/libraries/bash/function/eb_version.sh b/resources/libraries/bash/function/eb_version.sh
new file mode 100644
index 0000000000..0393030065
--- /dev/null
+++ b/resources/libraries/bash/function/eb_version.sh
@@ -0,0 +1,159 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+
+function die_on_error () {
+
+ # Source this fragment if you want to abort on any failure.
+ #
+ # Variables read:
+ # - ${CODE_EXIT_STATUS} - Exit status of report generation.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ if [[ "${CODE_EXIT_STATUS}" != "0" ]]; then
+ die "Failed to generate docs!" "${CODE_EXIT_STATUS}"
+ fi
+}
+
+
+function eb_version_deploy () {
+
+ # Deploy Elastic Beanstalk CDash content.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory.
+ # - ${TERRAFORM_OUTPUT_VAL} - Terraform output value.
+ # Variables set:
+ # - ${CODE_EXIT_STATUS} - Exit status of report generation.
+ # - ${TERRAFORM_OUTPUT_VAR} - Register Terraform output variable name.
+ # Functions called:
+ # - eb_version_verify - Build and verify EB version.
+ # - terraform_apply - Apply EB version by Terraform.
+ # - terraform_output - Get the application name string from Terraform.
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ eb_version_build_verify || die "Failed to call Elastic Beanstalk verify!"
+ terraform_apply || die "Failed to call Terraform apply!"
+
+ TERRAFORM_OUTPUT_VAR="application_version"
+ terraform_output || die "Failed to call Terraform output!"
+
+ #aws --region eu-central-1 elasticbeanstalk update-environment \
+ # --environment-name fdio-csit-dash-env \
+ # --version-label "${TERRAFORM_OUTPUT_VAL}"
+}
+
+
+function eb_version_build_verify () {
+
+ # Build and verify Elastic Beanstalk CDash integrity.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory.
+ # Variables set:
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module sub-directory.
+ # Functions called:
+ # - hugo_init_modules - Initialize Hugo modules.
+ # - hugo_build_site - Build static site with Hugo.
+ # - terraform_init - Initialize Terraform modules.
+ # - terraform_validate - Validate Terraform code.
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ if ! installed zip; then
+ die "Please install zip!"
+ fi
+
+ hugo_init_modules || die "Failed to call Hugo initialize!"
+ hugo_build_site || die "Failed to call Hugo build!"
+
+ pushd "${CSIT_DIR}"/csit.infra.dash || die "Pushd failed!"
+ pushd app || die "Pushd failed!"
+ find . -type d -name "__pycache__" -exec rm -rf "{}" \;
+ find . -type d -name ".webassets-cache" -exec rm -rf "{}" \;
+ zip -r ../app.zip . || die "Compress failed!"
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+
+ TERRAFORM_MODULE_DIR="terraform-aws-fdio-csit-dash-app-base"
+
+ export TF_VAR_application_version="${BUILD_ID}"
+ terraform_init || die "Failed to call Terraform init!"
+ terraform_validate || die "Failed to call Terraform validate!"
+}
+
+
+function generate_report () {
+
+ # Generate report content.
+ #
+ # Variable read:
+ # - ${TOOLS_DIR} - Path to existing resources subdirectory "tools".
+ # - ${GERRIT_BRANCH} - Gerrit branch used for release tagging.
+ # Variables set:
+ # - ${CODE_EXIT_STATUS} - Exit status of report generation.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ pushd "${TOOLS_DIR}"/presentation || die "Pushd failed!"
+
+ # Set default values in config array.
+ typeset -A CFG
+ typeset -A DIR
+
+ DIR[WORKING]="_tmp"
+
+ # Create working directories.
+ mkdir "${DIR[WORKING]}" || die "Mkdir failed!"
+
+ export PYTHONPATH=`pwd`:`pwd`/../../../ || die "Export failed!"
+
+ all_options=("pal.py")
+ all_options+=("--specification" "specifications/report")
+ all_options+=("--release" "${GERRIT_BRANCH:-master}")
+ all_options+=("--week" $(date "+%V"))
+ all_options+=("--logging" "INFO")
+ all_options+=("--force")
+
+ set +e
+ python "${all_options[@]}"
+ CODE_EXIT_STATUS="$?"
+ set -e
+}
+
+function installed () {
+
+ # Check if the given utility is installed. Fail if not installed.
+ #
+ # Arguments:
+ # - ${1} - Utility to check.
+ # Returns (implicitly):
+ # - 0 - If command is installed.
+ # - 1 - If command is not installed.
+
+ set -exuo pipefail
+
+ command -v "${1}"
+}
diff --git a/resources/libraries/bash/function/gather.sh b/resources/libraries/bash/function/gather.sh
index e0829400b0..e432777e32 100644
--- a/resources/libraries/bash/function/gather.sh
+++ b/resources/libraries/bash/function/gather.sh
@@ -1,5 +1,5 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Copyright (c) 2019 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2023 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -26,7 +26,7 @@ function gather_build () {
# Variables read:
# - TEST_CODE - String affecting test selection, usually jenkins job name.
- # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
# Variables set:
# - DUT - CSIT test/ subdirectory containing suites to execute.
# Directories updated:
@@ -57,6 +57,10 @@ function gather_build () {
DUT="dpdk"
gather_dpdk || die "The function should have died on error."
;;
+ *"trex"*)
+ DUT="trex"
+ gather_trex || die "The function should have died on error."
+ ;;
*)
die "Unable to identify DUT type from: ${TEST_CODE}"
;;
@@ -88,7 +92,8 @@ function gather_dpdk () {
then
echo "Downloading latest DPDK packages from repo..."
# URL is not in quotes, calling command from variable keeps them.
- wget_command=("wget" "--no-check-certificate" "-nv" "-O" "-")
+ wget_command=("wget" "--no-check-certificate" "--compression=auto")
+ wget_command+=("-nv" "-O" "-")
wget_command+=("${dpdk_repo}")
dpdk_stable_ver="$("${wget_command[@]}" | grep -v "2015"\
| grep -Eo 'dpdk-[^\"]+xz' | tail -1)" || {
@@ -110,13 +115,23 @@ function gather_dpdk () {
fi
}
+function gather_trex () {
+
+ # This function is required to bypass download dir check.
+ # Currently it creates empty file in download dir.
+ # TODO: Add required packages
+
+ set -exuo pipefail
+
+ touch trex-download-to-be-added.txt
+}
function gather_vpp () {
# Variables read:
# - BASH_FUNCTION_DIR - Bash directory with functions.
# - TEST_CODE - The test selection string from environment or argument.
- # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
# - CSIT_DIR - Path to existing root of local CSIT git repository.
# Variables set:
# - VPP_VERSION - VPP stable version under test.
@@ -124,7 +139,8 @@ function gather_vpp () {
# - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
# by csit-vpp not-timed jobs.
# - ${CSIT_DIR}/${VPP_VER_FILE} - Ubuntu VPP version to use.
- # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR}, copied for vpp-csit jobs.
+ # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR},
+ # copied for vpp-csit jobs.
# Directories updated:
# - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
# - ./ - Assumed ${DOWNLOAD_DIR}, *vpp*.deb|rpm files
@@ -157,8 +173,10 @@ function gather_vpp () {
download_artifacts || die
;;
"vpp-csit-"*)
+ # Shorten line.
+ pkgs="${PKG_SUFFIX}"
# Use locally built packages.
- mv "${DOWNLOAD_DIR}"/../*vpp*."${PKG_SUFFIX}" "${DOWNLOAD_DIR}"/ || {
+ mv "${DOWNLOAD_DIR}"/../*vpp*."${pkgs}" "${DOWNLOAD_DIR}"/ || {
die "Move command failed."
}
;;
diff --git a/resources/libraries/bash/function/hugo.sh b/resources/libraries/bash/function/hugo.sh
new file mode 100644
index 0000000000..052e8333fb
--- /dev/null
+++ b/resources/libraries/bash/function/hugo.sh
@@ -0,0 +1,113 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+
+function go_install () {
+
+ # Install Go.
+
+ OS_ARCH=$(uname -m) || die "Failed to get arch."
+ case "${OS_ARCH}" in
+ x86_64) architecture="amd64" ;;
+ aarch64) architecture="arm64" ;;
+ esac
+
+ go_version="go1.20.2.linux-${architecture}.tar.gz"
+ go_url="https://go.dev/dl"
+ wget "${go_url}/${go_version}"
+ rm -rf "/usr/local/go"
+ tar -C "/usr/local" -xzf "go1.20.2.linux-${architecture}.tar.gz"
+ rm "go1.20.2.linux-${architecture}.tar.gz"
+ export PATH=$PATH:/usr/local/go/bin
+}
+
+
+function hugo_build_site () {
+
+ # Build site via Hugo.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ if ! installed hugo; then
+ die "Please install Hugo!"
+ fi
+
+ pushd "${CSIT_DIR}"/docs || die "Pushd failed!"
+ hugo || die "Failed to run Hugo build!"
+ popd || die "Popd failed!"
+}
+
+
+function hugo_init_modules () {
+
+ # Initialize Hugo modules.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ if ! installed hugo; then
+ die "Please install Hugo!"
+ fi
+
+ hugo_book_url="github.com/alex-shpak/hugo-book"
+ hugo_book_version="v0.0.0-20230424134111-d86d5e70c7c0"
+ hugo_book_link="${hugo_book_url}@${hugo_book_version}"
+ pushd "${CSIT_DIR}"/docs || die "Pushd failed!"
+ export PATH=$PATH:/usr/local/go/bin
+ hugo mod get "${hugo_book_link}" || die "Failed to run Hugo mod!"
+ popd || die "Popd failed!"
+}
+
+
+function hugo_install () {
+
+ # Install Hugo Extended.
+
+ OS_ARCH=$(uname -m) || die "Failed to get arch."
+ case "${OS_ARCH}" in
+ x86_64) architecture="amd64" ;;
+ aarch64) architecture="arm64" ;;
+ esac
+
+ hugo_version="v0.111.3/hugo_extended_0.111.3_linux-${architecture}.deb"
+ hugo_url="https://github.com/gohugoio/hugo/releases/download"
+ hugo_link="${hugo_url}/${hugo_version}"
+ wget -O "hugo.deb" "${hugo_link}" || die "Failed to install Hugo!"
+ dpkg -i "hugo.deb" || die "Failed to install Hugo!"
+ rm "hugo.deb" || die "Failed to install Hugo!"
+}
+
+
+function installed () {
+
+ # Check if the given utility is installed. Fail if not installed.
+ #
+ # Arguments:
+ # - ${1} - Utility to check.
+ # Returns (implicitly):
+ # - 0 - If command is installed.
+ # - 1 - If command is not installed.
+
+ set -exuo pipefail
+
+ command -v "${1}"
+}
diff --git a/resources/libraries/bash/function/nginx.sh b/resources/libraries/bash/function/nginx.sh
new file mode 100755
index 0000000000..a2cf8e6514
--- /dev/null
+++ b/resources/libraries/bash/function/nginx.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+
+function gather_nginx () {
+
+ # Ensure stable NGINX archive is downloaded.
+ #
+ # Variables read:
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
+ # - NGINX_VER - Version number of Nginx.
+ set -exuo pipefail
+ pushd "${DOWNLOAD_DIR}" || die "Pushd failed."
+ nginx_repo="http://nginx.org/download/"
+ # Use downloaded packages with specific version
+ echo "Downloading NGINX package of specific version from repo ..."
+ # Downloading NGINX version based on what VPP is using. Currently
+ # it is not easy way to detect from VPP version automatically.
+ nginx_stable_ver="${NGINX_VER}".tar.gz
+
+ if [[ ! -f "${nginx_stable_ver}" ]]; then
+ wget -nv --no-check-certificate \
+ "${nginx_repo}/${nginx_stable_ver}" || {
+ die "Failed to get NGINX package from: ${nginx_repo}."
+ }
+ fi
+ popd || die "Popd failed."
+}
+
+
+function common_dirs () {
+
+ # Set global variables, create some directories (without touching content).
+ # This function assumes running in remote testbed. It might override other
+ # functions if included from common.sh.
+
+ # Arguments:
+ # - ${1} - Version number of Nginx.
+ # Variables set:
+ # - BASH_FUNCTION_DIR - Path to existing directory this file is located in.
+ # - CSIT_DIR - Path to CSIT framework.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
+ # - NGINX_DIR - Path to NGINX framework.
+ # - NGINX_VER - Version number of Nginx.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+ NGINX_VER="${1}"
+ this_file=$(readlink -e "${BASH_SOURCE[0]}") || {
+ die "Some error during locating of this source file."
+ }
+ BASH_FUNCTION_DIR=$(dirname "${this_file}") || {
+ die "Some error during dirname call."
+ }
+ CSIT_DIR=$(readlink -e "/tmp/openvpp-testing") || {
+ die "Readlink failed."
+ }
+ DOWNLOAD_DIR=$(readlink -f "${CSIT_DIR}/download_dir") || {
+ die "Readlink failed."
+ }
+ mkdir -p "${CSIT_DIR}/${NGINX_VER}" || die "Mkdir failed."
+ NGINX_DIR=$(readlink -e "${CSIT_DIR}/${NGINX_VER}") || {
+ die "Readlink failed."
+ }
+}
+
+
+
+function nginx_compile () {
+
+ # Compile NGINX archive.
+ #
+ # Variables read:
+ # - NGINX_DIR - Path to NGINX framework.
+ # - CSIT_DIR - Path to CSIT framework.
+ # - NGINX_INS_PATH - Path to NGINX install path.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+ NGINX_INS_PATH="${DOWNLOAD_DIR}/${NGINX_VER}"
+ pushd "${NGINX_DIR}" || die "Pushd failed."
+
+ # Set installation prefix.
+ param="--prefix=${NGINX_INS_PATH} "
+ # Set nginx binary pathname.
+ param+="--sbin-path=${NGINX_INS_PATH}/sbin/nginx "
+ # Set nginx.conf pathname.
+ param+="--conf-path=${NGINX_INS_PATH}/conf/nginx.conf "
+ # Enable ngx_http_stub_status_module.
+ param+="--with-http_stub_status_module "
+ # Force PCRE library usage.
+ param+="--with-pcre "
+ # Enable ngx_http_realip_module.
+ param+="--with-http_realip_module "
+ params=(${param})
+ ./configure "${params[@]}" || die "Failed to configure NGINX!"
+ make -j 16;make install || die "Failed to compile NGINX!"
+}
+
+
+function nginx_extract () {
+
+ # Extract NGINX framework.
+ #
+ # Variables read:
+ # - NGINX_DIR - Path to NGINX framework.
+ # - CSIT_DIR - Path to CSIT framework.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
+ # - NGINX_VER - Version number of Nginx.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -exuo pipefail
+
+ pushd "${CSIT_DIR}" || die "Pushd failed."
+ tar -xvf ${DOWNLOAD_DIR}/${NGINX_VER}.tar.gz --strip=1 \
+ --directory "${NGINX_DIR}" || {
+ die "Failed to extract NGINX!"
+ }
+}
diff --git a/resources/libraries/bash/function/per_patch.sh b/resources/libraries/bash/function/per_patch.sh
index 43a3f971bf..44bd57da80 100644
--- a/resources/libraries/bash/function/per_patch.sh
+++ b/resources/libraries/bash/function/per_patch.sh
@@ -1,5 +1,5 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
-# Copyright (c) 2020 PANTHEON.tech s.r.o.
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2023 PANTHEON.tech s.r.o.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,59 +18,14 @@ set -exuo pipefail
# Generally, the functions assume "common.sh" library has been sourced already.
# Keep functions ordered alphabetically, please.
-function archive_test_results () {
- # Arguments:
- # - ${1}: Directory to archive to. Required. Parent has to exist.
- # Variable set:
- # - TARGET - Target directory.
- # Variables read:
- # - ARCHIVE_DIR - Path to where robot result files are created in.
- # - VPP_DIR - Path to existing directory, root for to relative paths.
- # Directories updated:
- # - ${1} - Created, and robot and parsing files are moved/created there.
- # Functions called:
- # - die - Print to stderr and exit, defined in common.sh
-
- set -exuo pipefail
-
- cd "${VPP_DIR}" || die "Change directory command failed."
- TARGET="$(readlink -f "$1")"
- mkdir -p "${TARGET}" || die "Directory creation failed."
- for filename in "output.xml" "log.html" "report.html"; do
- mv "${ARCHIVE_DIR}/${filename}" "${TARGET}/${filename}" || {
- die "Attempt to move '${filename}' failed."
- }
- done
-}
-
-
-function archive_parse_test_results () {
-
- # Arguments:
- # - ${1}: Directory to archive to. Required. Parent has to exist.
- # Variables read:
- # - TARGET - Target directory.
- # Functions called:
- # - die - Print to stderr and exit, defined in common.sh
- # - archive_test_results - Archiving results.
- # - parse_bmrr_results - See definition in this file.
-
- set -exuo pipefail
-
- archive_test_results "$1" || die
- parse_bmrr_results "${TARGET}" || {
- die "The function should have died on error."
- }
-}
-
-
-function build_vpp_ubuntu_amd64 () {
+function build_vpp_ubuntu () {
# This function is using make pkg-verify to build VPP with all dependencies
# that is ARCH/OS aware. VPP repo is SSOT for building mechanics and CSIT
# is consuming artifacts. This way if VPP will introduce change in building
# mechanics they will not be blocked by CSIT repo.
+ #
# Arguments:
# - ${1} - String identifier for echo, can be unset.
# Variables read:
@@ -96,7 +51,7 @@ function build_vpp_ubuntu_amd64 () {
"using build default ($(grep -c ^processor /proc/cpuinfo))."
fi
- make UNATTENDED=y pkg-verify || die "VPP build using make pkg-verify failed."
+ make UNATTENDED=y pkg-verify || die "VPP build with make pkg-verify failed."
echo "* VPP ${1-} BUILD SUCCESSFULLY COMPLETED" || {
die "Argument not found."
}
@@ -114,7 +69,6 @@ function compare_test_results () {
# of parent build.
# Functions called:
# - die - Print to stderr and exit, defined in common.sh
- # - parse_bmrr_results - See definition in this file.
# Exit code:
# - 0 - If the comparison utility sees no regression (nor data error).
# - 1 - If the comparison utility sees a regression (or data error).
@@ -135,50 +89,109 @@ function initialize_csit_dirs () {
# Variables read:
# - VPP_DIR - Path to WORKSPACE, parent of created directories.
# Directories created:
- # - csit_current - Holding test results of the patch under test (PUT).
- # - csit_parent - Holding test results of parent of PUT.
+ # - csit_{part} - See the caller what it is used for.
# Functions called:
# - die - Print to stderr and exit, defined in common.sh
set -exuo pipefail
cd "${VPP_DIR}" || die "Change directory operation failed."
- rm -rf "csit_current" "csit_parent" || {
- die "Directory deletion failed."
- }
- mkdir -p "csit_current" "csit_parent" || {
- die "Directory creation failed."
- }
+ while true; do
+ if [[ ${#} < 1 ]]; then
+ # All directories created.
+ break
+ fi
+ name_part="${1}" || die
+ shift || die
+ dir_name="csit_${name_part}" || die
+ rm -rf "${dir_name}" || die "Directory deletion failed."
+ mkdir -p "${dir_name}" || die "Directory creation failed."
+ done
}
-function parse_bmrr_results () {
+function main_bisect_loop () {
- # Currently "parsing" is just two greps.
- # TODO: Re-use PAL parsing code, make parsing more general and centralized.
+ # Perform the iterative part of bisect entry script.
+ #
+ # The logic is too complex to remain in the entry script.
#
+ # At the start, the loop assumes git bisect old/new has just been executed,
+ # and verified more iterations are needed.
+ # The iteration cleans the build directory and builds the new mid commit.
+ # Then, testbed is reserved, tests run, and testbed unreserved.
+ # Results are moved from default to archive location
+ # (indexed by iteration number) and analyzed.
+ # The new adjective ("old" or "new") is selected,
+ # and git bisect with the adjective is executed.
+ # The symlinks csit_early and csit_late are updated to tightest bounds.
+ # The git.log file is examined and if the bisect is finished, loop ends.
+
+ iteration=0
+ while true
+ do
+ let iteration+=1
+ git clean -dffx "build"/ "build-root"/ || die
+ build_vpp_ubuntu "MIDDLE" || die
+ select_build "build-root" || die
+ check_download_dir || die
+ reserve_and_cleanup_testbed || die
+ run_robot || die
+ move_test_results "csit_middle/${iteration}" || die
+ untrap_and_unreserve_testbed || die
+ rm -vf "csit_mid" || die
+ ln -s -T "csit_middle/${iteration}" "csit_mid" || die
+ set +e
+ python3 "${TOOLS_DIR}/integrated/compare_bisect.py"
+ bisect_rc="${?}"
+ set -e
+ if [[ "${bisect_rc}" == "3" ]]; then
+ adjective="new"
+ rm -v "csit_late" || die
+ ln -s -T "csit_middle/${iteration}" "csit_late" || die
+ elif [[ "${bisect_rc}" == "0" ]]; then
+ adjective="old"
+ rm -v "csit_early" || die
+ ln -s -T "csit_middle/${iteration}" "csit_early" || die
+ else
+ die "Unexpected return code: ${bisect_rc}"
+ fi
+ git bisect "${adjective}" | tee "git.log" || die
+ git describe || die
+ git status || die
+ if head -n 1 "git.log" | cut -b -11 | fgrep -q "Bisecting:"; then
+ echo "Still bisecting..."
+ else
+ echo "Bisecting done."
+ break
+ fi
+ done
+}
+
+
+function move_test_results () {
+
# Arguments:
- # - ${1} - Path to (existing) directory holding robot output.xml result.
- # Files read:
- # - output.xml - From argument location.
- # Files updated:
- # - results.txt - (Re)created, in argument location.
+ # - ${1}: Directory to archive to. Required. Parent has to exist.
+ # Variable set:
+ # - TARGET - Target archival directory, equivalent to the argument.
+ # Variables read:
+ # - ARCHIVE_DIR - Path to where robot result files are created in.
+ # - VPP_DIR - Path to existing directory, root for to relative paths.
+ # Directories updated:
+ # - ${1} - Created, and robot and parsing files are moved/created there.
# Functions called:
# - die - Print to stderr and exit, defined in common.sh
set -exuo pipefail
- rel_dir="$(readlink -e "${1}")" || die "Readlink failed."
- in_file="${rel_dir}/output.xml"
- out_file="${rel_dir}/results.txt"
- # TODO: Do we need to check echo exit code explicitly?
- echo "Parsing ${in_file} putting results into ${out_file}"
- echo "TODO: Re-use parts of PAL when they support subsample test parsing."
- pattern='Maximum Receive Rate trial results in packets'
- pattern+=' per second: .*\]</status>'
- grep -o "${pattern}" "${in_file}" | grep -o '\[.*\]' > "${out_file}" || {
- die "Some parsing grep command has failed."
- }
+ cd "${VPP_DIR}" || die "Change directory command failed."
+ TARGET="$(readlink -f "$1")"
+ mkdir -p "${TARGET}" || die "Directory creation failed."
+ file_list=("output.xml" "log.html" "report.html" "tests")
+ for filename in "${file_list[@]}"; do
+ mv "${ARCHIVE_DIR}/${filename}" "${TARGET}/${filename}" || die
+ done
}
@@ -207,56 +220,37 @@ function select_build () {
}
-function set_aside_commit_build_artifacts () {
+function set_aside_build_artifacts () {
- # Function is copying VPP built artifacts from actual checkout commit for
- # further use and clean git.
+ # Function used to save VPP .deb artifacts from currently finished build.
+ #
+ # After the artifacts are copied to the target directory,
+ # the main git tree is cleaned up to not interfere with next build.
+ #
+ # Arguments:
+ # - ${1} - String to derive the target directory name from. Required.
# Variables read:
# - VPP_DIR - Path to existing directory, parent to accessed directories.
# Directories read:
# - build-root - Existing directory with built VPP artifacts (also DPDK).
# Directories updated:
# - ${VPP_DIR} - A local git repository, parent commit gets checked out.
- # - build_current - Old contents removed, content of build-root copied here.
+ # - build_${1} - Old contents removed, content of build-root copied here.
# Functions called:
# - die - Print to stderr and exit, defined in common.sh
set -exuo pipefail
cd "${VPP_DIR}" || die "Change directory operation failed."
- rm -rf "build_current" || die "Remove operation failed."
- mkdir -p "build_current" || die "Directory creation failed."
- mv "build-root"/*".deb" "build_current"/ || die "Move operation failed."
+ dir_name="build_${1}" || die
+ rm -rf "${dir_name}" || die "Remove operation failed."
+ mkdir -p "${dir_name}" || die "Directory creation failed."
+ mv "build-root"/*".deb" "${dir_name}"/ || die "Move operation failed."
# The previous build could have left some incompatible leftovers,
# e.g. DPDK artifacts of different version (in build/external).
# Also, there usually is a copy of dpdk artifact in build-root.
git clean -dffx "build"/ "build-root"/ || die "Git clean operation failed."
- # Finally, check out the parent commit.
- git checkout HEAD~ || die "Git checkout operation failed."
- # Display any other leftovers.
- git status || die "Git status operation failed."
-}
-
-
-function set_aside_parent_build_artifacts () {
-
- # Function is copying VPP built artifacts from parent checkout commit for
- # further use. Checkout to parent is not part of this function.
- # Variables read:
- # - VPP_DIR - Path to existing directory, parent of accessed directories.
- # Directories read:
- # - build-root - Existing directory with built VPP artifacts (also DPDK).
- # Directories updated:
- # - build_parent - Old directory removed, build-root debs moved here.
- # Functions called:
- # - die - Print to stderr and exit, defined in common.sh
-
- set -exuo pipefail
-
- cd "${VPP_DIR}" || die "Change directory operation failed."
- rm -rf "build_parent" || die "Remove failed."
- mkdir -p "build_parent" || die "Directory creation operation failed."
- mv "build-root"/*".deb" "build_parent"/ || die "Move operation failed."
+ git status || die
}
diff --git a/resources/libraries/bash/function/terraform.sh b/resources/libraries/bash/function/terraform.sh
new file mode 100644
index 0000000000..2a0e0ed2be
--- /dev/null
+++ b/resources/libraries/bash/function/terraform.sh
@@ -0,0 +1,183 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+
+function terraform_apply () {
+
+ # Run terraform apply command to prepare topology.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory, where terraform modules are located.
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module directory.
+
+ set -exuo pipefail
+
+ if ! installed terraform; then
+ die "Please install terraform!"
+ fi
+
+ pushd "${CSIT_DIR}"/fdio.infra.terraform || die "Pushd failed!"
+ pushd "${TERRAFORM_MODULE_DIR}" || die "Pushd failed!"
+ export TF_LOG=INFO
+ terraform apply -no-color -auto-approve || die "Terraform apply failed!"
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+}
+
+
+function terraform_destroy () {
+
+ # Run terraform destroy command to prepare module.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory, where terraform modules are located.
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module directory.
+
+ set -exuo pipefail
+
+ if ! installed terraform; then
+ die "Please install terraform!"
+ fi
+
+ pushd "${CSIT_DIR}"/fdio.infra.terraform || die "Pushd failed!"
+ pushd "${TERRAFORM_MODULE_DIR}" || die "Pushd failed!"
+ export TF_LOG=INFO
+ terraform destroy -auto-approve -no-color || die "Terraform destroy failed!"
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+}
+
+
+function terraform_init () {
+
+ # Run terraform init command to prepare module.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory, where terraform modules are located.
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module directory.
+
+ set -exuo pipefail
+
+ if ! installed terraform; then
+ die "Please install terraform!"
+ fi
+
+ pushd "${CSIT_DIR}"/fdio.infra.terraform || die "Pushd failed!"
+ pushd "${TERRAFORM_MODULE_DIR}" || die "Pushd failed!"
+
+ #plugin_url="https://github.com/radekg/terraform-provisioner-ansible/"
+ #plugin_url+="releases/download/v2.5.0/"
+ #plugin_url+="terraform-provisioner-ansible-linux-amd64_v2.5.0"
+ #plugin_dir="${HOME}/.terraform.d/plugins/"
+ #plugin_path+="${plugin_dir}terraform-provisioner-ansible_v2.5.0"
+
+ #mkdir -p "${plugin_dir}" || die "Failed to create dir!"
+ #wget -O "${plugin_path}" "${plugin_url}" || die "Failed to download plugin!"
+ #chmod +x "${plugin_path}" || die "Failed to add execute rights!"
+
+ rm -f terraform.tfstate || die "Failed to clear terraform state!"
+ export TF_LOG=INFO
+ terraform init || die "Failed to run terraform init!"
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+}
+
+
+function terraform_install () {
+
+ # Install terraform.
+
+ OS_ARCH=$(uname -m) || die "Failed to get arch."
+ case "${OS_ARCH}" in
+ x86_64) architecture="amd64" ;;
+ aarch64) architecture="arm64" ;;
+ esac
+
+ terraform_version="1.4.2/terraform_1.4.2_linux_${architecture}.zip"
+ terraform_url="https://releases.hashicorp.com/terraform"
+ terraform_link="${terraform_url}/${terraform_version}"
+ wget "${terraform_link}" || die "Failed to install Terraform!"
+ unzip "terraform_1.4.2_linux_${architecture}.zip" || {
+ die "Failed to install Terraform!"
+ }
+ mv "terraform" "/usr/local/bin" || die "Failed to install Terraform!"
+ rm "terraform_1.4.2_linux_${architecture}.zip" || {
+ die "Failed to install Terraform!"
+ }
+}
+
+
+function terraform_output () {
+
+ # Run terraform output command to prepare module.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory, where terraform modules are located.
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module directory.
+ # - ${TERRAFORM_OUTPUT_VAR} - Terraform variable to export.
+
+ set -exuo pipefail
+
+ if ! installed terraform; then
+ die "Please install terraform!"
+ fi
+
+ pushd "${CSIT_DIR}"/fdio.infra.terraform || die "Pushd failed!"
+ pushd "${TERRAFORM_MODULE_DIR}" || die "Pushd failed!"
+ TERRAFORM_OUTPUT_VAL=$(terraform output --raw "${TERRAFORM_OUTPUT_VAR}")
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+}
+
+
+function terraform_validate () {
+
+ # Run terraform validate command to prepare module.
+ #
+ # Variable read:
+ # - ${CSIT_DIR} - CSIT main directory, where terraform modules are located.
+ # - ${TERRAFORM_MODULE_DIR} - Terraform module directory.
+
+ set -exuo pipefail
+
+ if ! installed terraform; then
+ die "Please install terraform!"
+ fi
+
+ pushd "${CSIT_DIR}"/fdio.infra.terraform || die "Pushd failed!"
+ pushd "${TERRAFORM_MODULE_DIR}" || die "Pushd failed!"
+ export TF_LOG=INFO
+ terraform validate || die "Terraform validate failed!"
+ popd || die "Popd failed!"
+ popd || die "Popd failed!"
+}
+
+
+function installed () {
+
+ # Check if the given utility is installed. Fail if not installed.
+ #
+ # Arguments:
+ # - ${1} - Utility to check.
+ # Returns (implicitly):
+ # - 0 - If command is installed.
+ # - 1 - If command is not installed.
+
+ set -exuo pipefail
+
+ command -v "${1}"
+}