aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/bash/function
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2018-08-10 10:20:30 +0200
committerVratko Polak <vrpolak@cisco.com>2018-08-30 16:48:56 +0200
commit7db6faf25da39820d321222f7f8fcb191585add9 (patch)
tree15f7a9ac56a48d820cc5aca337cffc2caaaf1e3f /resources/libraries/bash/function
parente105ab722bbc6c98bf76a863539c971be506859a (diff)
CSIT-1135: Scripts for VPP per-patch testing
+ Scripts do not rely on (other) bootstraps. + Perf verify bootstrap is also the new style sript now. + Scripts are divided to functions for better re-use. + Functions are sourced from small number of large "library" files. - Still using jumpavg from pipy. - Perpatch has specific simplified parsing (instead of the PAL one). - Bash style document is in a separate Change. Change-Id: If88fa528ce155ea86b614e3d77c0550b91bbdf11 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'resources/libraries/bash/function')
-rw-r--r--resources/libraries/bash/function/README.txt7
-rw-r--r--resources/libraries/bash/function/common.sh562
-rw-r--r--resources/libraries/bash/function/gather.sh307
-rw-r--r--resources/libraries/bash/function/per_patch.sh314
4 files changed, 1190 insertions, 0 deletions
diff --git a/resources/libraries/bash/function/README.txt b/resources/libraries/bash/function/README.txt
new file mode 100644
index 0000000000..055ebb4cdc
--- /dev/null
+++ b/resources/libraries/bash/function/README.txt
@@ -0,0 +1,7 @@
+Files in this directory system are to be executed indirectly,
+sourced from other scripts.
+
+In fact, the files should only define functions,
+except perhaps some minimal logic needed to import dependencies.
+The originating function calls should be executed from elsewhere,
+typically from entry scripts.
diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh
new file mode 100644
index 0000000000..b3a06d497b
--- /dev/null
+++ b/resources/libraries/bash/function/common.sh
@@ -0,0 +1,562 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used by multiple entry scripts.
+# Keep functions ordered alphabetically, please.
+
+# TODO: Add a link to bash style guide.
+# TODO: Consider putting every die into a {} block,
+# the code might become more readable (but longer).
+
+
+function activate_virtualenv () {
+
+ set -exuo pipefail
+
+ # Arguments:
+ # - ${1} - Non-empty path to existing directory for creating virtualenv in.
+ # Variables read:
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # Variables set:
+ # - ENV_DIR - Path to the created virtualenv subdirectory.
+ # Variables exported:
+ # - PYTHONPATH - CSIT_DIR, as CSIT Python scripts usually need this.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ # TODO: Do we really need to have ENV_DIR available as a global variable?
+
+ if [[ "${1-}" == "" ]]; then
+ die "Root location of virtualenv to create is not specified."
+ fi
+ ENV_DIR="${1}/env"
+ rm -rf "${ENV_DIR}" || die "Failed to clean previous virtualenv."
+
+ pip install --upgrade virtualenv || {
+ die "Virtualenv package install failed."
+ }
+ virtualenv --system-site-packages "${ENV_DIR}" || {
+ die "Virtualenv creation failed."
+ }
+ set +u
+ source "${ENV_DIR}/bin/activate" || die "Virtualenv activation failed."
+ set -u
+ pip install -r "${CSIT_DIR}/requirements.txt" || {
+ die "CSIT requirements installation failed."
+ }
+
+ # Most CSIT Python scripts assume PYTHONPATH is set and exported.
+ export PYTHONPATH="${CSIT_DIR}" || die "Export failed."
+}
+
+
+function check_download_dir () {
+
+ set -exuo pipefail
+
+ # Fail if there are no files visible in ${DOWNLOAD_DIR}.
+ # TODO: Do we need this as a function, if it is (almost) a one-liner?
+ #
+ # Variables read:
+ # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # Directories read:
+ # - ${DOWNLOAD_DIR} - Has to be non-empty to proceed.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ if [[ ! "$(ls -A "${DOWNLOAD_DIR}")" ]]; then
+ die "No artifacts downloaded!"
+ fi
+}
+
+
+function common_dirs () {
+
+ set -exuo pipefail
+
+ # Variables set:
+ # - BASH_FUNCTION_DIR - Path to existing directory this file is located in.
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+ # - RESOURCES_DIR - Path to existing CSIT subdirectory "resources".
+ # - TOOLS_DIR - Path to existing resources subdirectory "tools".
+ # - PYTHON_SCRIPTS_DIR - Path to existing tools subdirectory "scripts".
+ # - ARCHIVE_DIR - Path to created CSIT subdirectory "archive".
+ # - DOWNLOAD_DIR - Path to created CSIT subdirectory "download_dir".
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ BASH_FUNCTION_DIR="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")" || {
+ die "Some error during localizing this source directory."
+ }
+ # Current working directory could be in a different repo, e.g. VPP.
+ pushd "${BASH_FUNCTION_DIR}" || die "Pushd failed"
+ CSIT_DIR="$(readlink -e "$(git rev-parse --show-toplevel)")" || {
+ die "Readlink or git rev-parse failed."
+ }
+ popd || die "Popd failed."
+ TOPOLOGIES_DIR="$(readlink -e "${CSIT_DIR}/topologies/available")" || {
+ die "Readlink failed."
+ }
+ RESOURCES_DIR="$(readlink -e "${CSIT_DIR}/resources")" || {
+ die "Readlink failed."
+ }
+ TOOLS_DIR="$(readlink -e "${RESOURCES_DIR}/tools")" || {
+ die "Readlink failed."
+ }
+ PYTHON_SCRIPTS_DIR="$(readlink -e "${TOOLS_DIR}/scripts")" || {
+ die "Readlink failed."
+ }
+
+ ARCHIVE_DIR="$(readlink -f "${CSIT_DIR}/archive")" || {
+ die "Readlink failed."
+ }
+ mkdir -p "${ARCHIVE_DIR}" || die "Mkdir failed."
+ DOWNLOAD_DIR="$(readlink -f "${CSIT_DIR}/download_dir")" || {
+ die "Readlink failed."
+ }
+ mkdir -p "${DOWNLOAD_DIR}" || die "Mkdir failed."
+}
+
+
+function compose_pybot_arguments () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
+ # - DUT - CSIT test/ subdirectory, set while processing tags.
+ # - TAGS - Array variable holding selected tag boolean expressions.
+ # - TOPOLOGIES_TAGS - Tag boolean expression filtering tests for topology.
+ # Variables set:
+ # - PYBOT_ARGS - String holding part of all arguments for pybot.
+ # - EXPANDED_TAGS - Array of strings pybot arguments compiled from tags.
+
+ # No explicit check needed with "set -u".
+ PYBOT_ARGS=("--loglevel" "TRACE" "--variable" "TOPOLOGY_PATH:${WORKING_TOPOLOGY}")
+ PYBOT_ARGS+=("--suite" "tests.${DUT}.perf")
+
+ EXPANDED_TAGS=()
+ for tag in "${TAGS[@]}"; do
+ if [[ ${tag} == "!"* ]]; then
+ EXPANDED_TAGS+=("--exclude" "${tag#$"!"}")
+ else
+ EXPANDED_TAGS+=("--include" "${TOPOLOGIES_TAGS}AND${tag}")
+ fi
+ done
+}
+
+
+function copy_archives () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - WORKSPACE - Jenkins workspace, copy only if the value is not empty.
+ # Can be unset, then it speeds up manual testing.
+ # - ARCHIVE_DIR - Path to directory with content to be copied.
+ # Directories updated:
+ # - ${WORKSPACE}/archives/ - Created if does not exist.
+ # Content of ${ARCHIVE_DIR}/ is copied here.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ # We will create additional archive if workspace variable is set.
+ # This way if script is running in jenkins all will be
+ # automatically archived to logs.fd.io.
+ if [[ -n "${WORKSPACE-}" ]]; then
+ mkdir -p "${WORKSPACE}/archives/" || die "Archives dir create failed."
+ cp -r "${ARCHIVE_DIR}"/* "${WORKSPACE}/archives" || die "Copy failed."
+ fi
+}
+
+
+function die () {
+ # Print the message to standard error end exit with error code specified
+ # by the second argument.
+ #
+ # Hardcoded values:
+ # - The default error message.
+ # Arguments:
+ # - ${1} - The whole error message, be sure to quote. Optional
+ # - ${2} - the code to exit with, default: 1.
+
+ set -x
+ set +eu
+ warn "${1:-Unspecified run-time error occurred!}"
+ exit "${2:-1}"
+}
+
+
+function die_on_pybot_error () {
+
+ set -exuo pipefail
+
+ # Source this fragment if you want to abort on any failed test case.
+ #
+ # Variables read:
+ # - PYBOT_EXIT_STATUS - Set by a pybot running fragment.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ if [[ "${PYBOT_EXIT_STATUS}" != "0" ]]; then
+ die "${PYBOT_EXIT_STATUS}" "Test failures are present!"
+ fi
+}
+
+
+function get_test_code () {
+
+ set -exuo pipefail
+
+ # Arguments:
+ # - ${1} - Optional, argument of entry script (or empty as unset).
+ # Test code value to override job name from environment.
+ # Variables read:
+ # - JOB_NAME - String affecting test selection, default if not argument.
+ # Variables set:
+ # - TEST_CODE - The test selection string from environment or argument.
+ # - NODENESS - Node multiplicity of desired testbed.
+ # - FLAVOR - Node flavor string, usually describing the processor.
+
+ TEST_CODE="${1-}" || die "Reading optional argument failed, somehow."
+ if [[ -z "${TEST_CODE}" ]]; then
+ TEST_CODE="${JOB_NAME-}" || die "Reading job name failed, somehow."
+ fi
+
+ case "${TEST_CODE}" in
+ *"2n-skx"*)
+ NODENESS="2n"
+ FLAVOR="skx"
+ ;;
+ *"3n-skx"*)
+ NODENESS="3n"
+ FLAVOR="skx"
+ ;;
+ *)
+ # Fallback to 3-node Haswell by default (backward compatibility)
+ NODENESS="3n"
+ FLAVOR="hsw"
+ ;;
+ esac
+}
+
+
+function get_test_tag_string () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - GERRIT_EVENT_TYPE - Event type set by gerrit, can be unset.
+ # - GERRIT_EVENT_COMMENT_TEXT - Comment text, read for "comment-added" type.
+ # Variables set:
+ # - TEST_TAG_STRING - The string following "perftest" in gerrit comment,
+ # or empty.
+
+ # TODO: ci-management scripts no longer need to perform this.
+
+ trigger=""
+ if [[ "${GERRIT_EVENT_TYPE-}" == "comment-added" ]]; then
+ # On parsing error, ${trigger} stays empty.
+ trigger="$(echo "${GERRIT_EVENT_COMMENT_TEXT}" \
+ | grep -oE '(perftest$|perftest[[:space:]].+$)')" || true
+ fi
+ # Set test tags as string.
+ TEST_TAG_STRING="${trigger#$"perftest"}"
+}
+
+
+function reserve_testbed () {
+
+ set -exuo pipefail
+
+ # Reserve physical testbed, perform cleanup, register trap to unreserve.
+ #
+ # Variables read:
+ # - TOPOLOGIES - Array of paths to topology yaml to attempt reservation on.
+ # - PYTHON_SCRIPTS_DIR - Path to directory holding the reservation script.
+ # Variables set:
+ # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
+ # Functions called:
+ # - die - Print to stderr and exit.
+ # Traps registered:
+ # - EXIT - Calls cancel_all for ${WORKING_TOPOLOGY}.
+
+ while true; do
+ for topo in "${TOPOLOGIES[@]}"; do
+ set +e
+ python "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -t "${topo}"
+ result="$?"
+ set -e
+ if [[ "${result}" == "0" ]]; then
+ WORKING_TOPOLOGY="${topo}"
+ echo "Reserved: ${WORKING_TOPOLOGY}"
+ python "${PYTHON_SCRIPTS_DIR}/topo_cleanup.py" -t "${topo}" || {
+ die "Testbed cleanup failed."
+ }
+ trap "untrap_and_unreserve_testbed" EXIT || {
+ message="TRAP ATTEMPT AND UNRESERVE FAILED, FIX MANUALLY."
+ untrap_and_unreserve_testbed "${message}" || {
+ die "Teardown should have died, not failed."
+ }
+ die "Trap attempt failed, unreserve succeeded. Aborting."
+ }
+ break
+ fi
+ done
+
+ if [[ -n "${WORKING_TOPOLOGY-}" ]]; then
+ # Exit the infinite while loop if we made a reservation.
+ break
+ fi
+
+ # Wait ~3minutes before next try.
+ sleep_time="$[ ( $RANDOM % 20 ) + 180 ]s" || {
+ die "Sleep time calculation failed."
+ }
+ echo "Sleeping ${sleep_time}"
+ sleep "${sleep_time}" || die "Sleep failed."
+ done
+}
+
+
+function run_pybot () {
+
+ set -exuo pipefail
+
+ # Currently, VPP-1361 causes occasional test failures.
+ # If real result is more important than time, we can retry few times.
+ # TODO: We should be retrying on test case level instead.
+
+ # Arguments:
+ # - ${1} - Optional number of pybot invocations to try to avoid failures.
+ # Default: 1.
+ # Variables read:
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # - ARCHIVE_DIR - Path to store robot result files in.
+ # - PYBOT_ARGS, EXPANDED_TAGS - See compose_pybot_arguments.sh
+ # Variables set:
+ # - PYBOT_EXIT_STATUS - Exit status of most recent pybot invocation.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ # Set ${tries} as an integer variable, to fail on non-numeric input.
+ local -i "tries" || die "Setting type of variable failed."
+ tries="${1:-1}" || die "Argument evaluation failed."
+ all_options=("--outputdir" "${ARCHIVE_DIR}" "${PYBOT_ARGS[@]}")
+ all_options+=("${EXPANDED_TAGS[@]}")
+
+ while true; do
+ if [[ "${tries}" -le 0 ]]; then
+ break
+ else
+ tries="$((${tries} - 1))"
+ fi
+ pushd "${CSIT_DIR}" || die "Change directory operation failed."
+ set +e
+ # TODO: Make robot tests not require "$(pwd)" == "${CSIT_DIR}".
+ pybot "${all_options[@]}" "${CSIT_DIR}/tests/"
+ PYBOT_EXIT_STATUS="$?"
+ set -e
+ popd || die "Change directory operation failed."
+ if [[ "${PYBOT_EXIT_STATUS}" == "0" ]]; then
+ break
+ fi
+ done
+}
+
+
+function select_tags () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
+ # - TEST_CODE - String affecting test selection, usually jenkins job name.
+ # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
+ # Can be unset.
+ # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+ # Variables set:
+ # - TAGS - Array of processed tag boolean expressions.
+
+ # TODO: Empty exclude_nics (with failing grep) is expected,
+ # but others possible errors coule be checked explicitly.
+ # NIC SELECTION
+ # All topologies NICs
+ available=$(grep -hoPR "model: \K.*" "${TOPOLOGIES_DIR}"/* | sort -u)
+ # Selected topology NICs
+ reserved=$(grep -hoPR "model: \K.*" "${WORKING_TOPOLOGY}" | sort -u)
+ # All topologies NICs - Selected topology NICs
+ exclude_nics=($(comm -13 <(echo "${reserved}") <(echo "${available}")))
+
+ case "${TEST_CODE}" in
+ # Select specific performance tests based on jenkins job type variable.
+ *"ndrpdr-weekly"* )
+ test_tag_array=("ndrpdrAND64bAND1c"
+ "ndrpdrAND78bAND1c")
+ ;;
+ *"mrr-daily"* | *"mrr-weekly"* )
+ test_tag_array=("mrrAND64bAND1c"
+ "mrrAND64bAND2c"
+ "mrrAND64bAND4c"
+ "mrrAND78bAND1c"
+ "mrrAND78bAND2c"
+ "mrrAND78bAND4c"
+ "mrrANDimixAND1cANDvhost"
+ "mrrANDimixAND2cANDvhost"
+ "mrrANDimixAND4cANDvhost"
+ "mrrANDimixAND1cANDmemif"
+ "mrrANDimixAND2cANDmemif"
+ "mrrANDimixAND4cANDmemif")
+ ;;
+ * )
+ if [[ -z "${TEST_TAG_STRING-}" ]]; then
+ # If nothing is specified, we will run pre-selected tests by
+ # following tags. Items of array will be concatenated by OR
+ # in Robot Framework.
+ test_tag_array=("mrrANDnic_intel-x710AND1cAND64bANDip4base"
+ "mrrANDnic_intel-x710AND1cAND78bANDip6base"
+ "mrrANDnic_intel-x710AND1cAND64bANDl2bdbase"
+ "mrrANDnic_intel-x710AND1cAND64bANDl2xcbase")
+ else
+ # If trigger contains tags, split them into array.
+ test_tag_array=(${TEST_TAG_STRING//:/ })
+ fi
+ ;;
+ esac
+
+ # We will add excluded NICs.
+ test_tag_array+=("${exclude_nics[@]/#/!NIC_}")
+
+ TAGS=()
+
+ # We will prefix with perftest to prevent running other tests
+ # (e.g. Functional).
+ prefix="perftestAND"
+ if [[ "${TEST_CODE}" == "vpp-"* ]]; then
+ # Automatic prefixing for VPP jobs to limit the NIC used and
+ # traffic evaluation to MRR.
+ prefix="${prefix}mrrANDnic_intel-x710AND"
+ fi
+ for tag in "${test_tag_array[@]}"; do
+ if [[ ${tag} == "!"* ]]; then
+ # Exclude tags are not prefixed.
+ TAGS+=("${tag}")
+ else
+ TAGS+=("${prefix}${tag}")
+ fi
+ done
+}
+
+
+function select_topology () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - NODENESS - Node multiplicity of testbed, either "2n" or "3n".
+ # - FLAVOR - Node flavor string, currently either "hsw" or "skx".
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # - TOPOLOGIES_DIR - Path to existing directory with available tpologies.
+ # Variables set:
+ # - TOPOLOGIES - Array of paths to suitable topology yaml files.
+ # - TOPOLOGIES_TAGS - Tag expression selecting tests for the topology.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ case_text="${NODENESS}_${FLAVOR}"
+ case "${case_text}" in
+ "3n_hsw")
+ TOPOLOGIES=(
+ "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed1.yaml"
+ "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed2.yaml"
+ "${TOPOLOGIES_DIR}/lf_3n_hsw_testbed3.yaml"
+ )
+ TOPOLOGIES_TAGS="3_node_*_link_topo"
+ ;;
+ "2n_skx")
+ TOPOLOGIES=(
+ "${TOPOLOGIES_DIR}/lf_2n_skx_testbed21.yaml"
+ #"${TOPOLOGIES_DIR}/lf_2n_skx_testbed22.yaml"
+ #"${TOPOLOGIES_DIR}/lf_2n_skx_testbed23.yaml"
+ "${TOPOLOGIES_DIR}/lf_2n_skx_testbed24.yaml"
+ )
+ TOPOLOGIES_TAGS="2_node_*_link_topo"
+ ;;
+ "3n_skx")
+ TOPOLOGIES=(
+ "${TOPOLOGIES_DIR}/lf_3n_skx_testbed31.yaml"
+ "${TOPOLOGIES_DIR}/lf_3n_skx_testbed32.yaml"
+ )
+ TOPOLOGIES_TAGS="3_node_*_link_topo"
+ ;;
+ *)
+ # No falling back to 3n_hsw default, that should have been done
+ # by the function which has set NODENESS and FLAVOR.
+ die "Unknown specification: ${case_text}"
+ esac
+
+ if [[ -z "${TOPOLOGIES-}" ]]; then
+ die "No applicable topology found!"
+ fi
+}
+
+
+function untrap_and_unreserve_testbed () {
+ # Use this as a trap function to ensure testbed does not remain reserved.
+ # Perhaps call directly before script exit, to free testbed for other jobs.
+ # This function is smart enough to avoid multiple unreservations (so safe).
+ # Topo cleanup is executed (call it best practice), ignoring failures.
+ #
+ # Hardcoded values:
+ # - default message to die with if testbed might remain reserved.
+ # Arguments:
+ # - ${1} - Message to die with if unreservation fails. Default hardcoded.
+ # Variables read (by inner function):
+ # - WORKING_TOPOLOGY - Path to topology yaml file of the reserved testbed.
+ # - PYTHON_SCRIPTS_DIR - Path to directory holding Python scripts.
+ # Variables written:
+ # - WORKING_TOPOLOGY - Set to empty string on successful unreservation.
+ # Trap unregistered:
+ # - EXIT - Failure to untrap is reported, but ignored otherwise.
+ # Functions called:
+ # - die - Print to stderr and exit.
+
+ set -xo pipefail
+ set +eu # We do not want to exit early in a "teardown" function.
+ trap - EXIT || echo "Trap deactivation failed, continuing anyway."
+ wt="${WORKING_TOPOLOGY}" # Just to avoid too long lines.
+ if [[ -z "${wt-}" ]]; then
+ set -eu
+ echo "Testbed looks unreserved already. Trap removal failed before?"
+ else
+ python "${PYTHON_SCRIPTS_DIR}/topo_cleanup.py" -t "${wt}" || true
+ python "${PYTHON_SCRIPTS_DIR}/topo_reservation.py" -c -t "${wt}" || {
+ die "${1:-FAILED TO UNRESERVE, FIX MANUALLY.}" 2
+ }
+ WORKING_TOPOLOGY=""
+ set -eu
+ fi
+}
+
+
+function warn () {
+ # Print the message to standard error.
+ #
+ # Arguments:
+ # - ${@} - The text of the message.
+
+ echo "$@" >&2
+}
diff --git a/resources/libraries/bash/function/gather.sh b/resources/libraries/bash/function/gather.sh
new file mode 100644
index 0000000000..f490c80110
--- /dev/null
+++ b/resources/libraries/bash/function/gather.sh
@@ -0,0 +1,307 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used mainly by "bootstrap" entry scripts.
+# Generally, the functions assume "common.sh" library has been sourced already.
+
+# Keep functions ordered alphabetically, please.
+
+# TODO: Add a link to bash style guide.
+
+
+function gather_build () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - TEST_CODE - String affecting test selection, usually jenkins job name.
+ # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # Variables set:
+ # - DUT - CSIT test/ subdirectory containing suites to execute.
+ # Directories updated:
+ # - ${DOWNLOAD_DIR} - Files needed by tests are gathered here.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+ # - gather_dpdk, gather_vpp, gather_ligato - See their definitions.
+ # Multiple other side effects are possible,
+ # see functions called from here for their current description.
+
+ # TODO: Separate DUT-from-TEST_CODE from gather-for-DUT,
+ # when the first one becomes relevant for per_patch.
+
+ pushd "${DOWNLOAD_DIR}" || die "Pushd failed."
+ case "${TEST_CODE}" in
+ *"hc2vpp"*)
+ DUT="hc2vpp"
+ # FIXME: Avoid failing on empty ${DOWNLOAD_DIR}.
+ ;;
+ *"vpp"*)
+ DUT="vpp"
+ gather_vpp || die "The function should have died on error."
+ ;;
+ *"ligato"*)
+ DUT="kubernetes"
+ gather_ligato || die "The function should have died on error."
+ ;;
+ *"dpdk"*)
+ DUT="dpdk"
+ gather_dpdk || die "The function should have died on error."
+ ;;
+ *)
+ die "Unable to identify DUT type from: ${TEST_CODE}"
+ ;;
+ esac
+ popd || die "Popd failed."
+}
+
+
+function gather_dpdk () {
+
+ set -exuo pipefail
+
+ # Ensure latest DPDK archive is downloaded.
+ #
+ # Variables read:
+ # - TEST_CODE - The test selection string from environment or argument.
+ # Hardcoded:
+ # - dpdk archive name to download if TEST_CODE is not time based.
+ # Directories updated:
+ # - ./ - Assumed ${DOWNLOAD_DIR}, dpdk-*.tar.xz is downloaded if not there.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ dpdk_repo="https://fast.dpdk.org/rel"
+ # Use downloaded packages with specific version
+ if [[ "${TEST_CODE}" == *"daily"* ]] || \
+ [[ "${TEST_CODE}" == *"weekly"* ]] || \
+ [[ "${TEST_CODE}" == *"timed"* ]];
+ then
+ echo "Downloading latest DPDK packages from repo..."
+ # URL is not in quotes, calling command from variable keeps them.
+ wget_command=("wget" "--no-check-certificate" "-nv" "-O" "-")
+ wget_command+=("${dpdk_repo}")
+ dpdk_stable_ver="$("${wget_command[@]}" | grep -v "2015"\
+ | grep -Eo 'dpdk-[^\"]+xz' | tail -1)" || {
+ die "Composite piped command failed."
+ }
+ else
+ echo "Downloading DPDK packages of specific version from repo..."
+ # TODO: Can we autodetect this based on what CSIT-stable VPP uses?
+ dpdk_stable_ver="dpdk-18.08.tar.xz"
+ fi
+ # TODO: Use "wget -N" instead checking for file presence?
+ if [[ ! -f "${dpdk_stable_ver}" ]]; then
+ wget -nv --no-check-certificate "${dpdk_repo}/${dpdk_stable_ver}" || {
+ die "Failed to get DPDK package from: ${dpdk_repo}"
+ }
+ fi
+}
+
+
+function gather_ligato () {
+
+ set -exuo pipefail
+
+ # Build docker image (with vpp, ligato and vpp-agent),
+ # and put it to ${DOWNLOAD_DIR}/.
+ #
+ # Access rights needed for:
+ # - "wget", "git clone", "dpdk -x", "cd" above ${CSIT_DIR}.
+ # - "sudo" without password.
+ # - With sudo:
+ # - "dpdk -i" is allowed.
+ # - "docker" commands have everything they needs.
+ # Variables read:
+ # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # Files read:
+ # - ${CSIT_DIR}/VPP_AGENT_STABLE_VER - Vpp agent version to use.
+ # Directories updated:
+ # - ${DOWNLOAD_DIR} - Docker image stored, VPP *.deb stored and deleted.
+ # - /tmp/vpp - VPP is unpacked there, not cleaned afterwards.
+ # - ${CSIT_DIR}/vpp-agent - Created, vpp-agent git repo si cloned there.
+ # - Also, various temporary files are stored there.
+ # System consequences:
+ # - Docker package is installed.
+ # - Presumably dockerd process is started.
+ # - The ligato/dev-vpp-agent docker image is downloaded.
+ # - Results of subsequent image manipulation are probably left lingering.
+ # Other hardcoded values:
+ # - Docker .deb file name to download and install.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common_functions.sh
+ # - gather_vpp - See eponymous fragment file assumend to be sourced already.
+ # TODO: What is the best order of description items?
+
+ # TODO: Many of the following comments act as abstraction.
+ # But the abstracted blocks are mostly one-liners (plus "|| die"),
+ # so maybe it is not worth introducing fragments/functions for the blocks.
+ # TODO: This fragment is too long anyway, split it up.
+
+ gather_vpp || die "The function should have died on error."
+
+ # Extract VPP API to specific folder
+ # FIXME: Make sure /tmp/vpp/ exists. Should we clean it?
+ dpkg -x "${DOWNLOAD_DIR}/vpp_"*".deb" "/tmp/vpp" || {
+ die "Failed to extract VPP packages for kubernetes!"
+ }
+
+ ligato_repo_url="https://github.com/ligato/"
+ vpp_agent_stable_ver="$(cat "${CSIT_DIR}/VPP_AGENT_STABLE_VER")" || {
+ die "Cat failed."
+ }
+ docker_deb="docker-ce_18.03.0~ce-0~ubuntu_amd64.deb"
+
+ # Clone & checkout stable vpp-agent
+ cd "${CSIT_DIR}" || die "Change directory failed."
+ git clone -b "${vpp_agent_stable_ver}" --single-branch \
+ "${ligato_repo_url}/vpp-agent" "vpp-agent" || {
+ die "Failed to run: git clone ${ligato_repo_url}/vpp-agent!"
+ }
+ cd "vpp-agent" || die "Change directory failed."
+
+ # Install Docker
+ url_prefix="https://download.docker.com/linux/ubuntu/dists/xenial/pool"
+ # URL is not in quotes, calling command from variable keeps them.
+ wget_command=("wget" "-nv" "${url_prefix}/stable/amd64/${docker_deb}")
+ "${wget_command[@]}" || die "Failed to download Docker package!"
+
+ sudo dpkg -i "${docker_deb}" || die "Failed to install Docker!"
+
+ # Pull ligato/dev_vpp_agent docker image and re-tag as local
+ sudo docker pull "ligato/dev-vpp-agent:${vpp_agent_stable_ver}" || {
+ die "Failed to pull Docker image!"
+ }
+
+ first_arg="ligato/dev-vpp-agent:${vpp_agent_stable_ver}"
+ sudo docker tag "${first_arg}" "dev_vpp_agent:latest" || {
+ die "Failed to tag Docker image!"
+ }
+
+ # Start dev_vpp_agent container as daemon
+ sudo docker run --rm -itd --name "agentcnt" "dev_vpp_agent" bash || {
+ die "Failed to run Docker image!"
+ }
+
+ # Copy latest vpp api into running container
+ sudo docker exec agentcnt rm -rf "agentcnt:/usr/share/vpp/api" || {
+ die "Failed to remove previous API!"
+ }
+ sudo docker cp "/tmp/vpp/usr/share/vpp/api" "agentcnt:/usr/share/vpp" || {
+ die "Failed to copy files Docker image!"
+ }
+
+ # Recompile vpp-agent
+ script_arg=". ~/.bashrc; cd /go/src/github.com/ligato/vpp-agent"
+ script_arg+=" && make generate && make install"
+ sudo docker exec -i agentcnt script -qec "${script_arg}" || {
+ die "Failed to recompile vpp-agent in Docker image!"
+ }
+ # Make sure .deb files of other version are not present.
+ rm_cmd="rm -vf /opt/vpp-agent/dev/vpp/build-root/vpp*.deb /opt/vpp/*.deb"
+ sudo docker exec agentcnt bash -c "${rm_cmd}" || {
+ die "Failed to remove VPP debian packages!"
+ }
+ for f in "${DOWNLOAD_DIR}"/*; do
+ sudo docker cp "$f" "agentcnt:/opt/vpp-agent/dev/vpp/build-root"/ || {
+ die "Failed to copy files Docker image!"
+ }
+ done
+ # Save container state
+ sudo docker commit "$(sudo docker ps -q)" "dev_vpp_agent:latest" || {
+ die "Failed to commit state of Docker image!"
+ }
+
+ # Build prod_vpp_agent docker image
+ cd "docker/prod" || die "Change directory failed."
+ sudo docker build --tag "prod_vpp_agent" --no-cache "." || {
+ die "Failed to build Docker image!"
+ }
+ # Export Docker image
+ sudo docker save "prod_vpp_agent" | gzip > "prod_vpp_agent.tar.gz" || {
+ die "Failed to save Docker image!"
+ }
+ docker_image="$(readlink -e "prod_vpp_agent.tar.gz")" || {
+ die "Readlink failed."
+ }
+ rm -r "${DOWNLOAD_DIR}/vpp"* || die "Rm failed."
+ mv "${docker_image}" "${DOWNLOAD_DIR}"/ || die "Mv failed."
+}
+
+
+function gather_vpp () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - TEST_CODE - The test selection string from environment or argument.
+ # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # Files read:
+ # - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
+ # by csit-vpp not-timed jobs.
+ # - ${CSIT_DIR}/VPP_STABLE_VER_UBUNTU - VPP version to use by those.
+ # - ../vpp*.deb - Relative to ${DOWNLOAD_DIR}, copied for vpp-csit jobs.
+ # Directories updated:
+ # - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
+ # - ./ - Assumed ${DOWNLOAD_DIR}, vpp-*.deb files
+ # are downloaded here for csit-vpp.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common_functions.sh
+ # Bash scripts executed:
+ # - ${CSIT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh
+ # - Should download and extract requested files to ./.
+
+ case "${TEST_CODE}" in
+ # Not csit-vpp as this code is re-used by ligato gathering.
+ "csit-"*)
+ install_script="${CSIT_DIR}/resources/tools/scripts/"
+ install_script+="download_install_vpp_pkgs.sh"
+ # Use downloaded packages with specific version
+ if [[ "${TEST_CODE}" == *"daily"* ]] || \
+ [[ "${TEST_CODE}" == *"weekly"* ]] || \
+ [[ "${TEST_CODE}" == *"timed"* ]];
+ then
+ echo "Downloading latest VPP packages from NEXUS..."
+ # TODO: Can we source?
+ bash "${install_script}" --skip-install || {
+ die "Failed to get VPP packages!"
+ }
+ else
+ echo "Downloading VPP packages of specific version from NEXUS."
+ dpdk_stable_ver="$(cat "${CSIT_DIR}/DPDK_STABLE_VER")" || {
+ die "Cat failed."
+ }
+ vpp_stable_ver="$(cat "${CSIT_DIR}/VPP_STABLE_VER_UBUNTU")" || {
+ die "Cat failed."
+ }
+ install_args=("--skip-install" "--vpp" "${vpp_stable_ver}")
+ install_args+=("--dkms" "${dpdk_stable_ver}")
+ bash "${install_script}" "${install_args[@]}" || {
+ die "Failed to get VPP packages!"
+ }
+ fi
+ ;;
+ "vpp-csit-"*)
+ # Use local built packages.
+ mv "${DOWNLOAD_DIR}"/../"vpp"*".deb" "${DOWNLOAD_DIR}"/ || {
+ die "Move command failed."
+ }
+ ;;
+ *)
+ die "Unable to identify job type from: ${TEST_CODE}"
+ ;;
+ esac
+}
diff --git a/resources/libraries/bash/function/per_patch.sh b/resources/libraries/bash/function/per_patch.sh
new file mode 100644
index 0000000000..15eaf028ee
--- /dev/null
+++ b/resources/libraries/bash/function/per_patch.sh
@@ -0,0 +1,314 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used mainly by "per_patch_perf.sh" entry script.
+# Generally, the functions assume "common.sh" library has been sourced already.
+
+# Keep functions ordered alphabetically, please.
+
+# TODO: Add a link to bash style guide.
+
+
+function build_vpp_ubuntu_amd64 () {
+
+ set -exuo pipefail
+
+ # TODO: Make sure whether this works on other distros/archs too.
+
+ # Arguments:
+ # - ${1} - String identifier for echo, can be unset.
+ # Variables read:
+ # - VPP_DIR - Path to existing directory, parent to accessed directories.
+ # Directories updated:
+ # - ${VPP_DIR} - Whole subtree, many files (re)created by the build process.
+ # - ${VPP_DIR}/build-root - Final build artifacts for CSIT end up here.
+ # - ${VPP_DIR}/dpdk - The dpdk artifact is built, but moved to build-root/.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ cd "${VPP_DIR}" || die "Change directory command failed."
+ echo 'Building using "make build-root/vagrant/build.sh"'
+ # TODO: Do we want to support "${DRYRUN}" == "True"?
+ make UNATTENDED=yes install-dep || die "Make install-dep failed."
+ # The per_patch script calls this function twice, first for the new commit,
+ # then for its parent commit. On Jenkins, no dpdk is installed at first,
+ # locally it might have been installed. New dpdk is installed second call.
+ # If make detects installed vpp-dpdk-dev with matching version,
+ # it skips building vpp-dpdk-dkms entirely, but we need that file.
+ # On the other hand, if parent uses different dpdk version,
+ # The new vpp-dpdk-dkms is built, but the old one is not removed
+ # from the build directory if present. (Further functions move both,
+ # and during test dpkg decides on its own which version gets installed.)
+ # As per_patch is too dumb (yet) to detect any of that,
+ # the only safe solution is to clean build directory and force rebuild.
+ # TODO: Make this function smarter and skip DPDK rebuilds if possible.
+ cmd=("dpkg-query" "--showformat='$${Version}'" "--show" "vpp-dpdk-dev")
+ installed_deb_ver="$(sudo "${cmd[@]}" || true)"
+ if [[ -n "${installed_deb_ver}" ]]; then
+ sudo dpkg --purge "vpp-dpdk-dev" || {
+ die "Dpdk package uninstalation failed."
+ }
+ fi
+ make UNATTENDED=yes dpdk-install-dev || {
+ die "Make dpdk-install-dev failed."
+ }
+ build-root/vagrant/"build.sh" || die "Vagrant VPP build script failed."
+ # CSIT also needs the DPDK artifacts, which is not in build-root.
+ mv -v "dpdk/vpp-dpdk-dkms"*".deb" "build-root"/ || {
+ die "*.deb move failed."
+ }
+
+ echo "*******************************************************************"
+ echo "* VPP ${1-} BUILD SUCCESSFULLY COMPLETED" || {
+ die "Argument not found."
+ }
+ echo "*******************************************************************"
+}
+
+
+function compare_test_results () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - VPP_DIR - Path to directory with VPP git repo (at least built parts).
+ # - ARCHIVE_DIR - Path to where robot result files are created in.
+ # - PYTHON_SCRIPTS_DIR - Path to directory holding comparison utility.
+ # Directories recreated:
+ # - csit_parent - Sibling to csit directory, for holding results
+ # of parent build.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+ # - parse_bmrr_results - See definition in this file.
+ # Exit code:
+ # - 0 - If the comparison utility sees no regression (nor data error).
+ # - 1 - If the comparison utility sees a regression (or data error).
+
+ cd "${VPP_DIR}" || die "Change directory operation failed."
+ rm -rf "csit_parent" || die "Remove operation failed."
+ mkdir -p "csit_parent" || die "Directory creation failed."
+ for filename in "output.xml" "log.html" "report.html"; do
+ mv "${ARCHIVE_DIR}/${filename}" "csit_parent/${filename}" || {
+ die "Attempt to move '${filename}' failed."
+ }
+ done
+ parse_bmrr_results "csit_parent" || {
+ die "The function should have died on error."
+ }
+
+ # Reusing CSIT main virtualenv.
+ pip install -r "${PYTHON_SCRIPTS_DIR}/perpatch_requirements.txt" || {
+ die "Perpatch Python requirements installation failed."
+ }
+ python "${PYTHON_SCRIPTS_DIR}/compare_perpatch.py"
+ # The exit code determines the vote result.
+}
+
+
+function download_builds () {
+
+ set -exuo pipefail
+
+ # This is mostly useful only for Sandbox testing, to avoid recompilation.
+ #
+ # Arguments:
+ # - ${1} - URL to download VPP builds from.
+ # Variables read:
+ # - VPP_DIR - Path to WORKSPACE, parent of created directories.
+ # - DOWNLOAD_DIR - Path to directory pybot takes the build to test from.
+ # Directories created:
+ # - archive - Ends up empty, not to be confused with ${ARCHIVE_DIR}.
+ # - build_new - Holding built artifacts of the patch under test (PUT).
+ # - built_parent - Holding built artifacts of parent of PUT.
+ # - csit_new - (Re)set to a symlink to archive robot results on failure.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ cd "${VPP_DIR}" || die "Change directory operation failed."
+ rm -rf "build-root" "build_parent" "build_new" "archive" "csit_new" || {
+ die "Directory removal failed."
+ }
+ wget -N --progress=dot:giga "${1}" || die "Wget download failed."
+ unzip "archive.zip" || die "Archive extraction failed."
+ mv "archive/build_parent" ./ || die "Move operation failed."
+ mv "archive/build_new" ./ || die "Move operation failed."
+ cp -r "build_new"/*".deb" "${DOWNLOAD_DIR}" || {
+ die "Copy operation failed."
+ }
+ # Create symlinks so that if job fails on robot, results can be archived.
+ ln -s "${ARCHIVE_DIR}" "csit_new" || die "Symbolic link creation failed."
+}
+
+
+function parse_bmrr_results () {
+
+ set -exuo pipefail
+
+ # Currently "parsing" is just two greps.
+ # TODO: Re-use PAL parsing code, make parsing more general and centralized.
+ #
+ # Arguments:
+ # - ${1} - Path to (existing) directory holding robot output.xml result.
+ # Files read:
+ # - output.xml - From argument location.
+ # Files updated:
+ # - results.txt - (Re)created, in argument location.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ rel_dir="$(readlink -e "${1}")" || die "Readlink failed."
+ in_file="${rel_dir}/output.xml"
+ out_file="${rel_dir}/results.txt"
+
+ # TODO: Do we need to check echo exit code explicitly?
+ echo "Parsing ${in_file} putting results into ${out_file}"
+ echo "TODO: Re-use parts of PAL when they support subsample test parsing."
+
+ pattern='Maximum Receive Rate trial results in packets'
+ pattern+=' per second: .*\]</status>'
+ grep -o "${pattern}" "${in_file}" | grep -o '\[.*\]' > "${out_file}" || {
+ die "Some parsing grep command has failed."
+ }
+}
+
+
+function prepare_build_parent () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - VPP_DIR - Path to existing directory, parent to accessed directories.
+ # Directories read:
+ # - build-root - Existing directory with built VPP artifacts (also DPDK).
+ # Directories updated:
+ # - ${VPP_DIR} - A local git repository, parent commit gets checked out.
+ # - build_new - Old contents removed, content of build-root copied here.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ cd "${VPP_DIR}" || die "Change directory operation failed."
+ rm -rf "build_new" || die "Remove operation failed."
+ mkdir -p "build_new" || die "Directory creation failed."
+ mv "build-root"/*".deb" "build_new"/ || die "Move operation failed."
+ # The previous build could have left some incompatible leftovers,
+ # e.g. DPDK artifacts of different version.
+ # "make -C dpdk clean" does not actually remove such .deb file.
+ # Also, there usually is a copy of dpdk artifact in build-root.
+ git clean -dffx "dpdk"/ "build-root"/ || die "Git clean operation failed."
+ # Finally, check out the parent commit.
+ git checkout HEAD~ || die "Git checkout operation failed."
+ # Display any other leftovers.
+ git status || die "Git status operation failed."
+}
+
+
+function prepare_test_new () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - VPP_DIR - Path to existing directory, parent of accessed directories.
+ # - DOWNLOAD_DIR - Path to directory where Robot takes builds to test from.
+ # - ARCHIVE_DIR - Path to where robot result files are created in.
+ # Directories read:
+ # - build-root - Existing directory with built VPP artifacts (also DPDK).
+ # Directories updated:
+ # - build_parent - Old directory removed, build-root moved to become this.
+ # - ${DOWNLOAD_DIR} - Old content removed, files from build_new copied here.
+ # - csit_new - Currently a symlink to to archive robot results on failure.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ cd "${VPP_DIR}" || die "Change directory operationf failed."
+ rm -rf "build_parent" "csit_new" "${DOWNLOAD_DIR}"/* || die "Remove failed."
+ mkdir -p "build_parent" || die "Directory creation operation failed."
+ mv "build-root"/*".deb" "build_parent"/ || die "Move operation failed."
+ cp "build_new"/*".deb" "${DOWNLOAD_DIR}" || die "Copy operation failed."
+ # Create symlinks so that if job fails on robot, results can be archived.
+ ln -s "${ARCHIVE_DIR}" "csit_new" || die "Symbolic link creation failed."
+}
+
+
+function prepare_test_parent () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - VPP_DIR - Path to existing directory, parent of accessed directories.
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # - ARCHIVE_DIR and DOWNLOAD_DIR - Paths to directories to update.
+ # Directories read:
+ # - build_parent - Build artifacts (to test next) are copied from here.
+ # Directories updated:
+ # - csit_new - Deleted, then recreated and latest robot results copied here.
+ # - ${CSIT_DIR} - Subjected to git reset and git clean.
+ # - ${ARCHIVE_DIR} - Created if not existing (if deleted by git clean).
+ # - ${DOWNLOAD_DIR} - Created after git clean, parent build copied here.
+ # - csit_parent - Currently a symlink to csit/ to archive robot results.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+ # - parse_bmrr_results - See definition in this file.
+
+ cd "${VPP_DIR}" || die "Change directory operation failed."
+ rm -rf "csit_new" "csit_parent" || die "Remove operation failed."
+ mkdir -p "csit_new" || die "Create directory operation failed."
+ for filename in "output.xml" "log.html" "report.html"; do
+ mv "${ARCHIVE_DIR}/${filename}" "csit_new/${filename}" || {
+ die "Move operation of '${filename}' failed."
+ }
+ done
+ parse_bmrr_results "csit_new" || {
+ die "The function should have died on error."
+ }
+
+ pushd "${CSIT_DIR}" || die "Change directory operation failed."
+ git reset --hard HEAD || die "Git reset operation failed."
+ git clean -dffx || die "Git clean operation failed."
+ popd || die "Change directory operation failed."
+ mkdir -p "${ARCHIVE_DIR}" "${DOWNLOAD_DIR}" || die "Dir creation failed."
+
+ cp "build_parent"/*".deb" "${DOWNLOAD_DIR}"/ || die "Copy failed."
+ # Create symlinks so that if job fails on robot, results can be archived.
+ ln -s "${ARCHIVE_DIR}" "csit_parent" || die "Symlink creation failed."
+}
+
+
+function set_perpatch_dut () {
+
+ set -exuo pipefail
+
+ # Variables set:
+ # - DUT - CSIT test/ subdirectory containing suites to execute.
+
+ # TODO: Detect DUT from job name, when we have more than just VPP perpatch.
+
+ DUT="vpp"
+}
+
+
+function set_perpatch_vpp_dir () {
+
+ set -exuo pipefail
+
+ # Variables read:
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # Variables set:
+ # - VPP_DIR - Path to existing root of local VPP git repository.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ # In perpatch, CSIT is cloned inside VPP clone.
+ VPP_DIR="$(readlink -e "${CSIT_DIR}/..")" || die "Readlink failed."
+}