aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'resources/libraries')
-rw-r--r--resources/libraries/bash/function/ansible.sh24
-rw-r--r--resources/libraries/bash/function/common.sh143
-rw-r--r--resources/libraries/bash/function/device.sh4
-rw-r--r--resources/libraries/bash/function/gather.sh136
-rw-r--r--resources/libraries/bash/function/gather_dpdk.sh66
-rw-r--r--resources/libraries/bash/function/gather_trex.sh30
-rw-r--r--resources/libraries/bash/function/gather_vpp.sh80
-rw-r--r--resources/libraries/bash/function/hugo.sh24
-rw-r--r--resources/libraries/python/Constants.py9
-rw-r--r--resources/libraries/python/ContainerUtils.py3
-rw-r--r--resources/libraries/python/IPsecUtil.py8
-rw-r--r--resources/libraries/python/PapiExecutor.py8
-rw-r--r--resources/libraries/python/TrafficGenerator.py20
-rw-r--r--resources/libraries/robot/l2/l2_bridge_domain.robot81
-rw-r--r--resources/libraries/robot/l2/l2_xconnect.robot48
-rw-r--r--resources/libraries/robot/performance/performance_actions.robot32
16 files changed, 346 insertions, 370 deletions
diff --git a/resources/libraries/bash/function/ansible.sh b/resources/libraries/bash/function/ansible.sh
index 6cf4d16f43..587c59cba7 100644
--- a/resources/libraries/bash/function/ansible.sh
+++ b/resources/libraries/bash/function/ansible.sh
@@ -27,11 +27,17 @@ function ansible_adhoc () {
set -exuo pipefail
- case "$FLAVOR" in
- "aws" | "c6in" | "c6gn" | "c7gn")
+ case "${TEST_CODE}" in
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
INVENTORY_PATH="cloud_inventory"
;;
- "x-"*)
+ *"2n-c7gn" | *"3n-c7gn")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ *"-x-2n"* | *"-x-3n"*)
INVENTORY_PATH="external_inventory"
;;
*)
@@ -72,11 +78,17 @@ function ansible_playbook () {
set -exuo pipefail
- case "$FLAVOR" in
- "aws" | "c6in" | "c6gn" | "c7gn")
+ case "${TEST_CODE}" in
+ *"1n-aws" | *"2n-aws" | *"3n-aws")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ *"2n-c7gn" | *"3n-c7gn")
+ INVENTORY_PATH="cloud_inventory"
+ ;;
+ *"1n-c6in" | *"2n-c6in" | *"3n-c6in")
INVENTORY_PATH="cloud_inventory"
;;
- "x-"*)
+ *"-x-2n"* | *"-x-3n"*)
INVENTORY_PATH="external_inventory"
;;
*)
diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh
index 8a46fc329d..f6cf378167 100644
--- a/resources/libraries/bash/function/common.sh
+++ b/resources/libraries/bash/function/common.sh
@@ -17,11 +17,6 @@ set -exuo pipefail
# This library defines functions used by multiple entry scripts.
# Keep functions ordered alphabetically, please.
-# TODO: Add a link to bash style guide.
-# TODO: Consider putting every die into a {} block,
-# the code might become more readable (but longer).
-
-
function activate_docker_topology () {
# Create virtual vpp-device topology. Output of the function is topology
@@ -124,9 +119,9 @@ function activate_virtualenv () {
env_dir="${root_path}/env"
req_path=${2-$CSIT_DIR/requirements.txt}
rm -rf "${env_dir}" || die "Failed to clean previous virtualenv."
- pip3 install virtualenv==20.15.1 || {
- die "Virtualenv package install failed."
- }
+ #pip3 install virtualenv==20.26.3 || {
+ # die "Virtualenv package install failed."
+ #}
virtualenv --no-download --python=$(which python3) "${env_dir}" || {
die "Virtualenv creation for $(which python3) failed."
}
@@ -492,10 +487,6 @@ function get_test_code () {
NODENESS="2n"
FLAVOR="zn2"
;;
- *"2n-clx")
- NODENESS="2n"
- FLAVOR="clx"
- ;;
*"2n-icx")
NODENESS="2n"
FLAVOR="icx"
@@ -524,25 +515,23 @@ function get_test_code () {
NODENESS="3n"
FLAVOR="icxd"
;;
- *"2n-tx2")
- NODENESS="2n"
- FLAVOR="tx2"
- ;;
- *"3n-tsh")
- NODENESS="3n"
- FLAVOR="tsh"
- ;;
*"3n-alt")
NODENESS="3n"
FLAVOR="alt"
;;
- *"2n-x-"*)
+ *"2n-grc")
NODENESS="2n"
- FLAVOR="${TEST_CODE#*2n-}"
+ FLAVOR="grc"
;;
- *"3n-x-"*)
- NODENESS="3n"
- FLAVOR="${TEST_CODE#*3n-}"
+ *"-x-2n"*)
+ TESTBED="${TEST_CODE#${TEST_CODE%2n*}}"
+ NODENESS="${TESTBED%-${TEST_CODE#*-x-2n*-}}"
+ FLAVOR="${TEST_CODE#*-x-2n*-}"
+ ;;
+ *"-x-3n"*)
+ TESTBED="${TEST_CODE#${TEST_CODE%3n*}}"
+ NODENESS="${TESTBED%-${TEST_CODE#*-x-3n*-}}"
+ FLAVOR="${TEST_CODE#*-x-3n*-}"
;;
esac
}
@@ -878,6 +867,11 @@ function select_arch_os () {
VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_JAMMY"
PKG_SUFFIX="deb"
;;
+ *"LTS (Noble Numbat)"*)
+ IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU_NOBLE"
+ VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_NOBLE"
+ PKG_SUFFIX="deb"
+ ;;
*)
die "Unsupported Ubuntu version!"
;;
@@ -952,9 +946,6 @@ function select_tags () {
*"3n-icxd")
default_nic="nic_intel-e823c"
;;
- *"3n-tsh")
- default_nic="nic_intel-x520-da2"
- ;;
*"3n-icx" | *"2n-icx")
default_nic="nic_intel-e810cq"
;;
@@ -967,12 +958,15 @@ function select_tags () {
*"2n-spr")
default_nic="nic_intel-e810cq"
;;
- *"2n-clx" | *"2n-zn2")
+ *"2n-zn2")
default_nic="nic_intel-xxv710"
;;
- *"2n-tx2" | *"3n-alt")
+ *"3n-alt")
default_nic="nic_intel-xl710"
;;
+ *"2n-grc")
+ default_nic="nic_mellanox-cx7veat"
+ ;;
*"1n-aws" | *"2n-aws" | *"3n-aws")
default_nic="nic_amazon-nitro-50g"
;;
@@ -982,7 +976,7 @@ function select_tags () {
*"1n-c6in" | *"2n-c6in" | *"3n-c6in")
default_nic="nic_amazon-nitro-200g"
;;
- *"2n-x-"* | *"3n-x-"*)
+ *"-x-2n"* | *"-x-3n"*)
default_nic="nic_intel-e810cq"
;;
*)
@@ -995,7 +989,6 @@ function select_tags () {
awk_nics_sub_cmd+='gsub("xxv710","25ge2p1xxv710");'
awk_nics_sub_cmd+='gsub("x710","10ge2p1x710");'
awk_nics_sub_cmd+='gsub("xl710","40ge2p1xl710");'
- awk_nics_sub_cmd+='gsub("x520-da2","10ge2p1x520");'
awk_nics_sub_cmd+='gsub("cx556a","100ge2p1cx556a");'
awk_nics_sub_cmd+='gsub("2p1cx7veat","200ge2p1cx7veat");'
awk_nics_sub_cmd+='gsub("6p3cx7veat","200ge6p3cx7veat");'
@@ -1106,23 +1099,20 @@ function select_tags () {
*"1n-alt")
test_tag_array+=("!flow")
;;
- *"2n-clx")
- test_tag_array+=("!ipsechw")
- ;;
*"2n-icx")
test_tag_array+=("!ipsechw")
;;
*"2n-spr")
;;
- *"2n-tx2")
- test_tag_array+=("!ipsechw")
- ;;
*"2n-zn2")
test_tag_array+=("!ipsechw")
;;
*"3n-alt")
test_tag_array+=("!ipsechw")
;;
+ *"2n-grc")
+ test_tag_array+=("!ipsechw")
+ ;;
*"3n-icx")
test_tag_array+=("!ipsechw")
test_tag_array+=("!3_node_double_link_topoANDnic_intel-xxv710")
@@ -1135,10 +1125,6 @@ function select_tags () {
;;
*"3nb-spr")
;;
- *"3n-tsh")
- test_tag_array+=("!drv_avf")
- test_tag_array+=("!ipsechw")
- ;;
*"1n-aws" | *"2n-aws" | *"3n-aws")
test_tag_array+=("!ipsechw")
;;
@@ -1148,7 +1134,7 @@ function select_tags () {
*"1n-c6in" | *"2n-c6in" | *"3n-c6in")
test_tag_array+=("!ipsechw")
;;
- *"2n-x-"* | *"3n-x-"*)
+ *"-x-2n"* | *"-x-3n"*)
;;
esac
@@ -1210,103 +1196,94 @@ function select_topology () {
set -exuo pipefail
- case_text="${NODENESS}_${FLAVOR}"
- case "${case_text}" in
- "1n_aws")
+ case "${TEST_CODE}" in
+ *"1n-aws")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*1n-aws*.yaml )
TOPOLOGIES_TAGS="1_node_single_link_topo"
;;
- "1n_c6in")
+ *"1n-c6in")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*1n-c6in*.yaml )
TOPOLOGIES_TAGS="1_node_single_link_topo"
;;
- "1n_alt" | "1n_spr")
+ *"1n-alt" | *"1n-spr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "1n_vbox")
+ *"1n-vbox")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*vpp_device*.template )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_aws")
+ *"2n-aws")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-aws*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_c7gn")
+ *"2n-c7gn")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-c7gn*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_c6in")
+ *"2n-c6in")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n-c6in*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "2n_clx")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_clx_*.yaml )
- TOPOLOGIES_TAGS="2_node_*_link_topo"
- ;;
- "2n_icx")
+ *"2n-icx")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_icx_*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
;;
- "2n_spr")
+ *"2n-spr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_spr_*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
;;
- "2n_tx2")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_tx2_*.yaml )
- TOPOLOGIES_TAGS="2_node_single_link_topo"
- ;;
- "2n_zn2")
+ *"2n-zn2")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_zn2_*.yaml )
TOPOLOGIES_TAGS="2_node_*_link_topo"
;;
- "3n_alt")
+ *"3n-alt")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_alt_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_aws")
+ *"2n-grc")
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_grc_*.yaml )
+ TOPOLOGIES_TAGS="2_node_single_link_topo"
+ ;;
+ *"3n-aws")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-aws*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_c7gn")
+ *"3n-c7gn")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-c7gn*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_c6in")
+ *"3n-c6in")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n-c6in*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_icx")
+ *"3n-icx")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_icx_*.yaml )
# Trailing underscore is needed to distinguish from 3n_icxd.
TOPOLOGIES_TAGS="3_node_*_link_topo"
;;
- "3n_icxd")
+ *"3n-icxd")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_icxd_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_snr")
+ *"3n-snr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_snr_*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
- "3n_tsh")
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_tsh_*.yaml )
- TOPOLOGIES_TAGS="3_node_single_link_topo"
- ;;
- "3na_spr")
+ *"3na-spr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3na_spr_*.yaml )
TOPOLOGIES_TAGS="3_node_*_link_topo"
;;
- "3nb_spr")
+ *"3nb-spr")
TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3nb_spr_*.yaml )
TOPOLOGIES_TAGS="3_node_*_link_topo"
;;
- "2n_x"*)
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*2n_"${FLAVOR}"*.yaml )
+ *"-x-2n"*)
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*_x_"${NODENESS}_${FLAVOR}"*.yaml )
TOPOLOGIES_TAGS="2_node_single_link_topo"
;;
- "3n_x"*)
- TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*3n_"${FLAVOR}"*.yaml )
+ *"-x-3n"*)
+ TOPOLOGIES=( "${TOPOLOGIES_DIR}"/*_x_"${NODENESS}_${FLAVOR}"*.yaml )
TOPOLOGIES_TAGS="3_node_single_link_topo"
;;
*)
@@ -1362,9 +1339,11 @@ function set_environment_variables () {
# Maciek's workaround for Zen2 with lower amount of cores.
export TREX_CORE_COUNT=14
;;
- *"2n-x-"* | *"3n-x-"* )
- export TREX_CORE_COUNT=6
+ *"-x-2n"* | *"-x-3n"* )
+ export TREX_CORE_COUNT=14
export TREX_PORT_MTU=9000
+ # Settings to prevent duration stretching.
+ export PERF_TRIAL_STL_DELAY=0.12
;;
esac
}
diff --git a/resources/libraries/bash/function/device.sh b/resources/libraries/bash/function/device.sh
index a0ba5a67a9..7a6f5aaa13 100644
--- a/resources/libraries/bash/function/device.sh
+++ b/resources/libraries/bash/function/device.sh
@@ -332,8 +332,8 @@ function get_available_interfaces () {
# Add Intel Corporation E810 Virtual Function to the
# whitelist.
pci_id="0x154c\|0x1889"
- tg_netdev=(enp42s0 ens5)
- dut1_netdev=(enp61s0 ens7)
+ tg_netdev=(ens2 ens5)
+ dut1_netdev=(ens4 ens7)
ports_per_nic=2
;;
"1n_vbox")
diff --git a/resources/libraries/bash/function/gather.sh b/resources/libraries/bash/function/gather.sh
index e432777e32..a5820a5a61 100644
--- a/resources/libraries/bash/function/gather.sh
+++ b/resources/libraries/bash/function/gather.sh
@@ -1,5 +1,5 @@
-# Copyright (c) 2023 Cisco and/or its affiliates.
-# Copyright (c) 2023 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -19,14 +19,12 @@ set -exuo pipefail
# Keep functions ordered alphabetically, please.
-# TODO: Add a link to bash style guide.
-
-
function gather_build () {
# Variables read:
# - TEST_CODE - String affecting test selection, usually jenkins job name.
# - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
+ # - BASH_FUNCTION_DIR = Path to Bash script directory.
# Variables set:
# - DUT - CSIT test/ subdirectory containing suites to execute.
# Directories updated:
@@ -38,27 +36,23 @@ function gather_build () {
# Multiple other side effects are possible,
# see functions called from here for their current description.
- # TODO: Separate DUT-from-TEST_CODE from gather-for-DUT,
- # when the first one becomes relevant for per_patch.
-
set -exuo pipefail
pushd "${DOWNLOAD_DIR}" || die "Pushd failed."
case "${TEST_CODE}" in
- *"hc2vpp"*)
- DUT="hc2vpp"
- # FIXME: Avoid failing on empty ${DOWNLOAD_DIR}.
- ;;
*"vpp"*)
DUT="vpp"
+ source "${BASH_FUNCTION_DIR}/gather_${DUT}.sh" || die "Source fail."
gather_vpp || die "The function should have died on error."
;;
*"dpdk"*)
DUT="dpdk"
+ source "${BASH_FUNCTION_DIR}/gather_${DUT}.sh" || die "Source fail."
gather_dpdk || die "The function should have died on error."
;;
*"trex"*)
DUT="trex"
+ source "${BASH_FUNCTION_DIR}/gather_${DUT}.sh" || die "Source fail."
gather_trex || die "The function should have died on error."
;;
*)
@@ -67,121 +61,3 @@ function gather_build () {
esac
popd || die "Popd failed."
}
-
-
-function gather_dpdk () {
-
- # Ensure latest DPDK archive is downloaded.
- #
- # Variables read:
- # - TEST_CODE - The test selection string from environment or argument.
- # Hardcoded:
- # - dpdk archive name to download if TEST_CODE is not time based.
- # Directories updated:
- # - ./ - Assumed ${DOWNLOAD_DIR}, dpdk-*.tar.xz is downloaded if not there.
- # Functions called:
- # - die - Print to stderr and exit, defined in common.sh
-
- set -exuo pipefail
-
- dpdk_repo="https://fast.dpdk.org/rel"
- # Use downloaded packages with specific version
- if [[ "${TEST_CODE}" == *"daily"* ]] || \
- [[ "${TEST_CODE}" == *"weekly"* ]] || \
- [[ "${TEST_CODE}" == *"timed"* ]];
- then
- echo "Downloading latest DPDK packages from repo..."
- # URL is not in quotes, calling command from variable keeps them.
- wget_command=("wget" "--no-check-certificate" "--compression=auto")
- wget_command+=("-nv" "-O" "-")
- wget_command+=("${dpdk_repo}")
- dpdk_stable_ver="$("${wget_command[@]}" | grep -v "2015"\
- | grep -Eo 'dpdk-[^\"]+xz' | tail -1)" || {
- die "Composite piped command failed."
- }
- else
- echo "Downloading DPDK package of specific version from repo ..."
- # Downloading DPDK version based on what VPP is using. Currently
- # it is not easy way to detect from VPP version automatically.
- dpdk_stable_ver="$(< "${CSIT_DIR}/DPDK_VPP_VER")".tar.xz || {
- die "Failed to read DPDK VPP version!"
- }
- fi
- # TODO: Use "wget -N" instead checking for file presence?
- if [[ ! -f "${dpdk_stable_ver}" ]]; then
- wget -nv --no-check-certificate "${dpdk_repo}/${dpdk_stable_ver}" || {
- die "Failed to get DPDK package from: ${dpdk_repo}"
- }
- fi
-}
-
-function gather_trex () {
-
- # This function is required to bypass download dir check.
- # Currently it creates empty file in download dir.
- # TODO: Add required packages
-
- set -exuo pipefail
-
- touch trex-download-to-be-added.txt
-}
-
-function gather_vpp () {
-
- # Variables read:
- # - BASH_FUNCTION_DIR - Bash directory with functions.
- # - TEST_CODE - The test selection string from environment or argument.
- # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
- # - CSIT_DIR - Path to existing root of local CSIT git repository.
- # Variables set:
- # - VPP_VERSION - VPP stable version under test.
- # Files read:
- # - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
- # by csit-vpp not-timed jobs.
- # - ${CSIT_DIR}/${VPP_VER_FILE} - Ubuntu VPP version to use.
- # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR},
- # copied for vpp-csit jobs.
- # Directories updated:
- # - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
- # - ./ - Assumed ${DOWNLOAD_DIR}, *vpp*.deb|rpm files
- # are downloaded here for csit-vpp.
- # Functions called:
- # - die - Print to stderr and exit, defined in common_functions.sh
- # Bash scripts executed:
- # - ${CSIT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh
- # - Should download and extract requested files to ./.
-
- set -exuo pipefail
-
- case "${TEST_CODE}" in
- "csit-"*)
- # Use downloaded packages with specific version.
- if [[ "${TEST_CODE}" == *"daily"* ]] || \
- { [[ "${TEST_CODE}" == *"weekly"* ]] && \
- [[ "${TEST_CODE}" != *"device"* ]]; } || \
- [[ "${TEST_CODE}" == *"semiweekly"* ]] || \
- [[ "${TEST_CODE}" == *"hourly"* ]];
- then
- warn "Downloading latest VPP packages from Packagecloud."
- else
- warn "Downloading stable VPP packages from Packagecloud."
- VPP_VERSION="$(<"${CSIT_DIR}/${VPP_VER_FILE}")" || {
- die "Read VPP stable version failed."
- }
- fi
- source "${BASH_FUNCTION_DIR}/artifacts.sh" || die "Source failed."
- download_artifacts || die
- ;;
- "vpp-csit-"*)
- # Shorten line.
- pkgs="${PKG_SUFFIX}"
- # Use locally built packages.
- mv "${DOWNLOAD_DIR}"/../*vpp*."${pkgs}" "${DOWNLOAD_DIR}"/ || {
- die "Move command failed."
- }
- ;;
- *)
- die "Unable to identify job type from: ${TEST_CODE}"
- ;;
- esac
-}
diff --git a/resources/libraries/bash/function/gather_dpdk.sh b/resources/libraries/bash/function/gather_dpdk.sh
new file mode 100644
index 0000000000..da3fa930e6
--- /dev/null
+++ b/resources/libraries/bash/function/gather_dpdk.sh
@@ -0,0 +1,66 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used mainly by "bootstrap" entry scripts.
+# Generally, the functions assume "common.sh" library has been sourced already.
+
+# Keep functions ordered alphabetically, please.
+
+function gather_dpdk () {
+
+ # Ensure latest DPDK archive is downloaded.
+ #
+ # Variables read:
+ # - TEST_CODE - The test selection string from environment or argument.
+ # Hardcoded:
+ # - dpdk archive name to download if TEST_CODE is not time based.
+ # Directories updated:
+ # - ./ - Assumed ${DOWNLOAD_DIR}, dpdk-*.tar.xz is downloaded if not there.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common.sh
+
+ set -exuo pipefail
+
+ dpdk_repo="https://fast.dpdk.org/rel"
+ # Use downloaded packages with specific version
+ if [[ "${TEST_CODE}" == *"daily"* ]] || \
+ [[ "${TEST_CODE}" == *"weekly"* ]] || \
+ [[ "${TEST_CODE}" == *"timed"* ]];
+ then
+ echo "Downloading latest DPDK packages from repo..."
+ # URL is not in quotes, calling command from variable keeps them.
+ wget_command=("wget" "--no-check-certificate" "--compression=auto")
+ wget_command+=("-nv" "-O" "-")
+ wget_command+=("${dpdk_repo}")
+ dpdk_stable_ver="$("${wget_command[@]}" | grep -v "2015"\
+ | grep -Eo 'dpdk-[^\"]+xz' | tail -1)" || {
+ die "Composite piped command failed."
+ }
+ else
+ echo "Downloading DPDK package of specific version from repo ..."
+ # Downloading DPDK version based on what VPP is using. Currently
+ # it is not easy way to detect from VPP version automatically.
+ dpdk_stable_ver="$(< "${CSIT_DIR}/DPDK_VPP_VER")".tar.xz || {
+ die "Failed to read DPDK VPP version!"
+ }
+ fi
+ # TODO: Use "wget -N" instead checking for file presence?
+ if [[ ! -f "${dpdk_stable_ver}" ]]; then
+ wget -nv --no-check-certificate "${dpdk_repo}/${dpdk_stable_ver}" || {
+ die "Failed to get DPDK package from: ${dpdk_repo}"
+ }
+ fi
+}
diff --git a/resources/libraries/bash/function/gather_trex.sh b/resources/libraries/bash/function/gather_trex.sh
new file mode 100644
index 0000000000..99591affc3
--- /dev/null
+++ b/resources/libraries/bash/function/gather_trex.sh
@@ -0,0 +1,30 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used mainly by "bootstrap" entry scripts.
+# Generally, the functions assume "common.sh" library has been sourced already.
+
+# Keep functions ordered alphabetically, please.
+
+function gather_trex () {
+
+ # This function is required to bypass download dir check.
+ # Currently it creates empty file in download dir.
+
+ set -exuo pipefail
+
+ touch trex-download-to-be-added.txt
+}
diff --git a/resources/libraries/bash/function/gather_vpp.sh b/resources/libraries/bash/function/gather_vpp.sh
new file mode 100644
index 0000000000..6ed4aa981c
--- /dev/null
+++ b/resources/libraries/bash/function/gather_vpp.sh
@@ -0,0 +1,80 @@
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -exuo pipefail
+
+# This library defines functions used mainly by "bootstrap" entry scripts.
+# Generally, the functions assume "common.sh" library has been sourced already.
+
+# Keep functions ordered alphabetically, please.
+
+function gather_vpp () {
+
+ # Variables read:
+ # - BASH_FUNCTION_DIR - Bash directory with functions.
+ # - TEST_CODE - The test selection string from environment or argument.
+ # - DOWNLOAD_DIR - Path to directory robot takes the build to test from.
+ # - CSIT_DIR - Path to existing root of local CSIT git repository.
+ # Variables set:
+ # - VPP_VERSION - VPP stable version under test.
+ # Files read:
+ # - ${CSIT_DIR}/DPDK_STABLE_VER - DPDK version to use
+ # by csit-vpp not-timed jobs.
+ # - ${CSIT_DIR}/${VPP_VER_FILE} - Ubuntu VPP version to use.
+ # - ../*vpp*.deb|rpm - Relative to ${DOWNLOAD_DIR},
+ # copied for vpp-csit jobs.
+ # Directories updated:
+ # - ${DOWNLOAD_DIR}, vpp-*.deb files are copied here for vpp-csit jobs.
+ # - ./ - Assumed ${DOWNLOAD_DIR}, *vpp*.deb|rpm files
+ # are downloaded here for csit-vpp.
+ # Functions called:
+ # - die - Print to stderr and exit, defined in common_functions.sh
+ # Bash scripts executed:
+ # - ${CSIT_DIR}/resources/tools/scripts/download_install_vpp_pkgs.sh
+ # - Should download and extract requested files to ./.
+
+ set -exuo pipefail
+
+ case "${TEST_CODE}" in
+ "csit-"*)
+ # Use downloaded packages with specific version.
+ if [[ "${TEST_CODE}" == *"daily"* ]] || \
+ { [[ "${TEST_CODE}" == *"weekly"* ]] && \
+ [[ "${TEST_CODE}" != *"device"* ]]; } || \
+ [[ "${TEST_CODE}" == *"semiweekly"* ]] || \
+ [[ "${TEST_CODE}" == *"hourly"* ]];
+ then
+ warn "Downloading latest VPP packages from Packagecloud."
+ else
+ warn "Downloading stable VPP packages from Packagecloud."
+ VPP_VERSION="$(<"${CSIT_DIR}/${VPP_VER_FILE}")" || {
+ die "Read VPP stable version failed."
+ }
+ fi
+ source "${BASH_FUNCTION_DIR}/artifacts.sh" || die "Source failed."
+ download_artifacts || die
+ ;;
+ "vpp-csit-"*)
+ # Shorten line.
+ pkgs="${PKG_SUFFIX}"
+ # Use locally built packages.
+ mv "${DOWNLOAD_DIR}"/../*vpp*."${pkgs}" "${DOWNLOAD_DIR}"/ || {
+ die "Move command failed."
+ }
+ ;;
+ *)
+ die "Unable to identify job type from: ${TEST_CODE}"
+ ;;
+ esac
+}
diff --git a/resources/libraries/bash/function/hugo.sh b/resources/libraries/bash/function/hugo.sh
index 052e8333fb..4d0e3eccc7 100644
--- a/resources/libraries/bash/function/hugo.sh
+++ b/resources/libraries/bash/function/hugo.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -26,12 +26,12 @@ function go_install () {
aarch64) architecture="arm64" ;;
esac
- go_version="go1.20.2.linux-${architecture}.tar.gz"
+ go_version="go1.22.5.linux-${architecture}.tar.gz"
go_url="https://go.dev/dl"
wget "${go_url}/${go_version}"
- rm -rf "/usr/local/go"
- tar -C "/usr/local" -xzf "go1.20.2.linux-${architecture}.tar.gz"
- rm "go1.20.2.linux-${architecture}.tar.gz"
+ sudo rm -rf "/usr/local/go"
+ sudo tar -C "/usr/local" -xzf "go1.22.5.linux-${architecture}.tar.gz"
+ rm "go1.22.5.linux-${architecture}.tar.gz"
export PATH=$PATH:/usr/local/go/bin
}
@@ -45,8 +45,12 @@ function hugo_build_site () {
# Functions called:
# - die - Print to stderr and exit.
+ if ! install go; then
+ go_install || die "Please install Go!"
+ fi
+
if ! installed hugo; then
- die "Please install Hugo!"
+ hugo_install || die "Please install Hugo!"
fi
pushd "${CSIT_DIR}"/docs || die "Pushd failed!"
@@ -64,8 +68,12 @@ function hugo_init_modules () {
# Functions called:
# - die - Print to stderr and exit.
+ if ! install go; then
+ go_install || die "Please install Go!"
+ fi
+
if ! installed hugo; then
- die "Please install Hugo!"
+ hugo_install || die "Please install Hugo!"
fi
hugo_book_url="github.com/alex-shpak/hugo-book"
@@ -92,7 +100,7 @@ function hugo_install () {
hugo_url="https://github.com/gohugoio/hugo/releases/download"
hugo_link="${hugo_url}/${hugo_version}"
wget -O "hugo.deb" "${hugo_link}" || die "Failed to install Hugo!"
- dpkg -i "hugo.deb" || die "Failed to install Hugo!"
+ sudo dpkg -i "hugo.deb" || die "Failed to install Hugo!"
rm "hugo.deb" || die "Failed to install Hugo!"
}
diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py
index 7e6d70b6cb..3fabe0141b 100644
--- a/resources/libraries/python/Constants.py
+++ b/resources/libraries/python/Constants.py
@@ -318,7 +318,6 @@ class Constants:
# Mapping from NIC name to its bps limit.
NIC_NAME_TO_BPS_LIMIT = {
- "Intel-X520-DA2": 10000000000,
"Intel-X710": 10000000000,
"Intel-XL710": 24500000000,
"Intel-XXV710": 24500000000,
@@ -337,7 +336,6 @@ class Constants:
# Mapping from NIC name to its pps limit.
NIC_NAME_TO_PPS_LIMIT = {
- "Intel-X520-DA2": 14880952,
"Intel-X710": 14880952,
"Intel-XL710": 18750000,
"Intel-XXV710": 18750000,
@@ -356,7 +354,6 @@ class Constants:
# Suite file names use codes for NICs.
NIC_NAME_TO_CODE = {
- "Intel-X520-DA2": "10ge2p1x520",
"Intel-X710": "10ge2p1x710",
"Intel-XL710": "40ge2p1xl710",
"Intel-XXV710": "25ge2p1xxv710",
@@ -374,7 +371,6 @@ class Constants:
"virtual": "1ge1p82540em",
}
NIC_CODE_TO_NAME = {
- "10ge2p1x520": "Intel-X520-DA2",
"10ge2p1x710": "Intel-X710",
"40ge2p1xl710": "Intel-XL710",
"25ge2p1xxv710": "Intel-XXV710",
@@ -394,7 +390,6 @@ class Constants:
# Shortened lowercase NIC model name, useful for presentation.
NIC_CODE_TO_SHORT_NAME = {
- "10ge2p1x520": "x520",
"10ge2p1x710": "x710",
"40ge2p1xl710": "xl710",
"25ge2p1xxv710": "xxv710",
@@ -414,7 +409,6 @@ class Constants:
# Not each driver is supported by each NIC.
NIC_NAME_TO_DRIVER = {
- "Intel-X520-DA2": ["vfio-pci", "af_xdp"],
"Intel-X710": ["vfio-pci", "avf", "af_xdp"],
"Intel-XL710": ["vfio-pci", "avf", "af_xdp"],
"Intel-XXV710": ["vfio-pci", "avf", "af_xdp"],
@@ -469,7 +463,6 @@ class Constants:
# Number of physical interfaces of physical nic.
NIC_CODE_TO_PFS = {
- "10ge2p1x520": "nic_pfs}= | 2",
"10ge2p1x710": "nic_pfs}= | 2",
"40ge2p1xl710": "nic_pfs}= | 2",
"25ge2p1xxv710": "nic_pfs}= | 2",
@@ -488,7 +481,6 @@ class Constants:
}
NIC_CODE_TO_CORESCALE = {
- "10ge2p1x520": 1,
"10ge2p1x710": 1,
"40ge2p1xl710": 1,
"25ge2p1xxv710": 1,
@@ -508,7 +500,6 @@ class Constants:
# Not each driver is supported by each NIC.
DPDK_NIC_NAME_TO_DRIVER = {
- "Intel-X520-DA2": ["vfio-pci"],
"Intel-X710": ["vfio-pci"],
"Intel-XL710": ["vfio-pci"],
"Intel-XXV710": ["vfio-pci"],
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index ef08317420..7d6d4d8673 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -939,8 +939,7 @@ class LXC(ContainerEngine):
image = self.container.image if self.container.image \
else f"-d ubuntu -r jammy -a {target_arch}"
- cmd = f"lxc-create -t download --name {self.container.name} " \
- f"-- {image} --no-validate"
+ cmd = f"lxc-create -t download --name {self.container.name} -- {image}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
if int(ret) != 0:
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 1abfee2cec..e93ae28142 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -348,13 +348,7 @@ class IPsecUtil:
cmd = "crypto_set_async_dispatch_v2"
err_msg = "Failed to set dispatch mode."
args = dict(mode=0, adaptive=False)
- try:
- papi_exec.add(cmd, **args).get_reply(err_msg)
- except (AttributeError, RuntimeError):
- # Expected when VPP build does not have the _v2 yet
- # (after and before the first CRC check).
- # TODO: Fail here when testing of pre-23.10 builds is over.
- pass
+ papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
def vpp_ipsec_crypto_sw_scheduler_set_worker(
diff --git a/resources/libraries/python/PapiExecutor.py b/resources/libraries/python/PapiExecutor.py
index a55638ab7c..e685f87a10 100644
--- a/resources/libraries/python/PapiExecutor.py
+++ b/resources/libraries/python/PapiExecutor.py
@@ -1051,6 +1051,10 @@ class PapiSocketExecutor:
PapiSocketExecutor._drain(vpp_instance, err_msg)
# Process replies for this command.
for reply in replies:
+ if reply is None:
+ raise RuntimeError(
+ f"{err_msg}\nNo reply to sync call. VPP crashed?"
+ )
self.crc_checker.check_api_name(reply.__class__.__name__)
dictized_reply = dictize_and_check_retval(reply, err_msg)
ret_list.append(dictized_reply)
@@ -1061,6 +1065,8 @@ class PapiSocketExecutor:
The messages were already sent by .add() in this mode,
local_list is used just so we know how many replies to read.
+ Similarly to async .add, we do not check connection status here,
+ thus we avoid needless logging.
Beware: It is not clear what to do when socket read fails
in the middle of async processing.
@@ -1084,7 +1090,7 @@ class PapiSocketExecutor:
:rtype: List[UserDict]
:raises RuntimeError: If the replies are not all correct.
"""
- vpp_instance = self.get_connected_client()
+ vpp_instance = self.get_connected_client(check_connected=False)
ret_list = list()
try:
for index, _ in enumerate(local_list):
diff --git a/resources/libraries/python/TrafficGenerator.py b/resources/libraries/python/TrafficGenerator.py
index 936cb3a06d..ae01f8d3a6 100644
--- a/resources/libraries/python/TrafficGenerator.py
+++ b/resources/libraries/python/TrafficGenerator.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -284,13 +284,13 @@ class TrafficGenerator(AbstractMeasurer):
def initialize_traffic_generator(self, osi_layer, pfs=2):
"""TG initialization.
- :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
+ :param osi_layer: OSI Layer testing type.
:param pfs: Number of physical interfaces to configure.
:type osi_layer: str
:type pfs: int
:raises ValueError: If OSI layer is unknown.
"""
- if osi_layer not in ("L2", "L3", "L7"):
+ if osi_layer not in ("L2", "L3", "L3_1", "L7"):
raise ValueError("Unknown OSI layer!")
topology = BuiltIn().get_variable_value("&{topology_info}")
@@ -304,7 +304,9 @@ class TrafficGenerator(AbstractMeasurer):
for link in range(1, pfs, 2):
tg_if1_adj_addr = topology[f"TG_pf{link+1}_mac"][0]
tg_if2_adj_addr = topology[f"TG_pf{link}_mac"][0]
- if osi_layer in ("L3", "L7") and "DUT1" in topology.keys():
+ skip = 0 if osi_layer in ("L3_1",) else 1
+ if osi_layer in ("L3", "L3_1", "L7") and "DUT1" \
+ in topology.keys():
ifl = BuiltIn().get_variable_value("${int}")
last = topology["duts_count"]
tg_if1_adj_addr = Topology().get_interface_mac(
@@ -316,7 +318,7 @@ class TrafficGenerator(AbstractMeasurer):
tg_if2_adj_addr = Topology().get_interface_mac(
topology[f"DUT{last}"],
BuiltIn().get_variable_value(
- f"${{DUT{last}_{ifl}{link+1}}}[0]"
+ f"${{DUT{last}_{ifl}{link+skip}}}[0]"
)
)
@@ -365,7 +367,7 @@ class TrafficGenerator(AbstractMeasurer):
"""Startup sequence for the TRex traffic generator.
:param tg_node: Traffic generator node.
- :param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
+ :param osi_layer: OSI Layer testing type.
:param subtype: Traffic generator sub-type.
:type tg_node: dict
:type osi_layer: str
@@ -429,7 +431,7 @@ class TrafficGenerator(AbstractMeasurer):
# Test T-Rex API responsiveness.
cmd = f"python3 {Constants.REMOTE_FW_DIR}/GPL/tools/trex/"
- if osi_layer in ("L2", "L3"):
+ if osi_layer in ("L2", "L3", "L3_1"):
cmd += "trex_stl_assert.py"
elif osi_layer == "L7":
cmd += "trex_astf_assert.py"
@@ -524,7 +526,7 @@ class TrafficGenerator(AbstractMeasurer):
command_line.add(f"'{value}'")
stdout, _ = exec_cmd_no_error(
node, command_line,
- message="T-Rex STL runtime error!"
+ message="T-Rex STL runtime error!", include_reason=True
)
self._parse_traffic_results(stdout)
@@ -763,7 +765,7 @@ class TrafficGenerator(AbstractMeasurer):
self._rate = float(rate[:-3]) if "pps" in rate else float(rate)
stdout, _ = exec_cmd_no_error(
self._node, command_line, timeout=int(duration) + 60,
- message="T-Rex STL runtime error"
+ message="T-Rex STL runtime error", include_reason=True
)
if async_call:
diff --git a/resources/libraries/robot/l2/l2_bridge_domain.robot b/resources/libraries/robot/l2/l2_bridge_domain.robot
index 00044e1253..347dd708a5 100644
--- a/resources/libraries/robot/l2/l2_bridge_domain.robot
+++ b/resources/libraries/robot/l2/l2_bridge_domain.robot
@@ -1,5 +1,5 @@
-# Copyright (c) 2023 Cisco and/or its affiliates.
-# Copyright (c) 2023 PANTHEON.tech and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
+# Copyright (c) 2024 PANTHEON.tech and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -458,83 +458,6 @@
| | ... | Add interface to bridge domain | ${dut2} | ${DUT2_${int}2}[0]
| | ... | ${bd_id2}
-| Initialize L2 bridge domains with Vhost-User and VLAN with VPP link bonding in a 3-node circular topology
-| | [Documentation]
-| | ... | Create two Vhost-User interfaces on all defined VPP nodes. Create one
-| | ... | link bonding (BondEthernet) interface on both VPP nodes. Add one
-| | ... | physical interface towards next DUT as a member of BondEthernet
-| | ... | interface. Setup VLAN on BondEthernet interfaces between DUTs. Add one
-| | ... | Vhost-User interface into L2 bridge domains with learning enabled with
-| | ... | physical interface towards TG and other Vhost-User interface into L2
-| | ... | bridge domains with learning enabled with VLAN sub-interface. All
-| | ... | interfaces are brought up.
-| |
-| | ... | *Arguments:*
-| | ... | - bd_id1 - Bridge domain ID. Type: integer
-| | ... | - bd_id2 - Bridge domain ID. Type: integer
-| | ... | - subid - ID of the sub-interface to be created. Type: string
-| | ... | - tag_rewrite - Method of tag rewrite. Type: string
-| | ... | - bond_mode - Link bonding mode. Type: string
-| | ... | - lb_mode - Load balance mode. Type: string
-| | ... | - virtio_feature_mask - Enabled Virtio features (Optional).
-| | ... | Type: integer
-| |
-| | ... | *Example:*
-| |
-| | ... | \| Initialize L2 bridge domains with Vhost-User and VLAN with VPP\
-| | ... | link bonding in a 3-node circular topology \| 1 \| 2 \
-| | ... | \| 10 \| pop-1 \| xor \| l34 \|
-| |
-| | [Arguments] | ${bd_id1} | ${bd_id2} | ${subid} | ${tag_rewrite}
-| | ... | ${bond_mode} | ${lb_mode} | ${virtio_feature_mask}=${None}
-| |
-| | Set interfaces in path up
-| | ${dut1_eth_bond_if1}= | VPP Create Bond Interface
-| | ... | ${dut1} | ${bond_mode} | ${lb_mode}
-| | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
-| | | ${_even}= | Evaluate | ${pf} % 2
-| | | Run Keyword If | not ${even}
-| | | ... | VPP Add Bond Member
-| | | ... | ${dut1} | ${DUT1_${int}${pf}}[0] | ${dut1_eth_bond_if1}
-| | END
-| | ${dut2_eth_bond_if1}= | VPP Create Bond Interface
-| | ... | ${dut2} | ${bond_mode} | ${lb_mode}
-| | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
-| | | ${_even}= | Evaluate | ${pf} % 2
-| | | Run Keyword If | ${even}
-| | | ... | VPP Add Bond Member
-| | | ... | ${dut2} | ${DUT2_${int}${pf}}[0] | ${dut2_eth_bond_if1}
-| | END
-| | VPP Show Bond Data On All Nodes | ${nodes} | verbose=${TRUE}
-| | Initialize VLAN dot1q sub-interfaces in circular topology
-| | ... | ${dut1} | ${dut1_eth_bond_if1}
-| | ... | ${dut2} | ${dut2_eth_bond_if1} | ${subid}
-| | Configure L2 tag rewrite method on interfaces
-| | ... | ${dut1} | ${subif_index_1}
-| | ... | ${dut2} | ${subif_index_2} | ${tag_rewrite}
-| | Configure vhost interfaces | ${dut1}
-| | ... | /var/run/vpp/sock-1-${bd_id1} | /var/run/vpp/sock-1-${bd_id2}
-| | ... | virtio_feature_mask=${virtio_feature_mask}
-| | Add interface to bridge domain
-| | ... | ${dut1} | ${DUT1_${int}1}[0] | ${bd_id1}
-| | Add interface to bridge domain
-| | ... | ${dut1} | ${vhost_if1} | ${bd_id1}
-| | Add interface to bridge domain
-| | ... | ${dut1} | ${vhost_if2} | ${bd_id2}
-| | Add interface to bridge domain
-| | ... | ${dut1} | ${subif_index_1} | ${bd_id2}
-| | Configure vhost interfaces | ${dut2}
-| | ... | /var/run/vpp/sock-1-${bd_id1} | /var/run/vpp/sock-1-${bd_id2}
-| | ... | virtio_feature_mask=${virtio_feature_mask}
-| | Add interface to bridge domain
-| | ... | ${dut2} | ${subif_index_2} | ${bd_id1}
-| | Add interface to bridge domain
-| | ... | ${dut2} | ${vhost_if1} | ${bd_id1}
-| | Add interface to bridge domain
-| | ... | ${dut2} | ${vhost_if2} | ${bd_id2}
-| | Add interface to bridge domain
-| | ... | ${dut2} | ${DUT2_${int}2}[0] | ${bd_id2}
-
| Initialize L2 Bridge Domain with memif pairs on DUT node
| | [Documentation]
| | ... | Create pairs of Memif interfaces on DUT node. Put each Memif interface
diff --git a/resources/libraries/robot/l2/l2_xconnect.robot b/resources/libraries/robot/l2/l2_xconnect.robot
index b782602827..604d3eba9b 100644
--- a/resources/libraries/robot/l2/l2_xconnect.robot
+++ b/resources/libraries/robot/l2/l2_xconnect.robot
@@ -1,4 +1,4 @@
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -214,7 +214,7 @@
| | Run Keyword If | '${dut2_status}' == 'PASS'
| | ... | Configure L2XC | ${dut2} | ${DUT2_${int}2}[0] | ${vhost_if2}
-| Initialize L2 xconnect with Vhost-User and VLAN with VPP link bonding in 3-node circular topology
+| Initialize L2 xconnect with link bonding in 3-node circular topology
| | [Documentation]
| | ... | Create two Vhost-User interfaces on all defined VPP nodes. Create one
| | ... | link bonding (BondEthernet) interface on both VPP nodes. Add one
@@ -224,60 +224,38 @@
| | ... | other Vhost interface with VLAN sub-interface. All interfaces are
| | ... | brought up.
| |
-| | ... | *Arguments:*
-| | ... | - subid - ID of the sub-interface to be created. Type: string
-| | ... | - tag_rewrite - Method of tag rewrite. Type: string
+| | ... | *Required arguments read from test variables:*
| | ... | - bond_mode - Link bonding mode. Type: string
| | ... | - lb_mode - Load balance mode. Type: string
-| | ... | - virtio_feature_mask - Enabled Virtio features (Optional).
+| | ... | *Optional argument read from test variable:*
+| | ... | - dut_dut_links - Number of parallel DUT1-DUT2 links. Type: int
| | ... | Type: integer
| |
| | ... | *Example:*
| |
-| | ... | \| Initialize L2 xconnect with Vhost-User and VLAN with VPP link\
-| | ... | bonding in 3-node circular topology \| 10 \| pop-1 \| xor \| l34 \|
+| | ... | \| Initialize L2 xconnect with link bonding in 3-node circular topology
| |
-| | [Arguments] | ${subid} | ${tag_rewrite} | ${bond_mode} | ${lb_mode}
-| | ... | ${virtio_feature_mask}=${None}
+| | ${dut_dut_links}= | Get Variable Value | \${dut_dut_links} | ${1}
| |
-| | Set interfaces in path up
| | ${dut1_eth_bond_if1}= | VPP Create Bond Interface
| | ... | ${dut1} | ${bond_mode} | ${lb_mode}
-| | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
-| | | ${_even}= | Evaluate | ${pf} % 2
-| | | Run Keyword If | not ${even}
+| | FOR | ${pf} | IN RANGE | 1 | ${dut_dut_links} * 2 + 1
+| | | Run Keyword If | not ${pf} % 2
| | | ... | VPP Add Bond Member
| | | ... | ${dut1} | ${DUT1_${int}${pf}}[0] | ${dut1_eth_bond_if1}
| | END
| | ${dut2_eth_bond_if1}= | VPP Create Bond Interface
| | ... | ${dut2} | ${bond_mode} | ${lb_mode}
-| | FOR | ${pf} | IN RANGE | 1 | ${nic_pfs} + 1
-| | | ${_even}= | Evaluate | ${pf} % 2
-| | | Run Keyword If | ${even}
+| | FOR | ${pf} | IN RANGE | 1 | ${dut_dut_links} * 2 + 1
+| | | Run Keyword If | ${pf} % 2
| | | ... | VPP Add Bond Member
| | | ... | ${dut2} | ${DUT2_${int}${pf}}[0] | ${dut2_eth_bond_if1}
| | END
| | VPP Show Bond Data On All Nodes | ${nodes} | verbose=${TRUE}
-| | Initialize VLAN dot1q sub-interfaces in circular topology
-| | ... | ${dut1} | ${dut1_eth_bond_if1}
-| | ... | ${dut2} | ${dut2_eth_bond_if1} | ${subid}
-| | Configure L2 tag rewrite method on interfaces
-| | ... | ${dut1} | ${subif_index_1}
-| | ... | ${dut2} | ${subif_index_2} | ${tag_rewrite}
-| | Configure vhost interfaces
-| | ... | ${dut1} | /run/vpp/sock-1-1 | /run/vpp/sock-1-2
-| | ... | virtio_feature_mask=${virtio_feature_mask}
-| | Configure L2XC
-| | ... | ${dut1} | ${DUT1_${int}1}[0] | ${vhost_if1}
-| | Configure L2XC
-| | ... | ${dut1} | ${subif_index_1} | ${vhost_if2}
-| | Configure vhost interfaces
-| | ... | ${dut2} | /run/vpp/sock-1-1 | /run/vpp/sock-1-2
-| | ... | virtio_feature_mask=${virtio_feature_mask}
| | Configure L2XC
-| | ... | ${dut2} | ${subif_index_2} | ${vhost_if1}
+| | ... | ${dut1} | ${DUT1_${int}1}[0] | ${dut1_eth_bond_if1}
| | Configure L2XC
-| | ... | ${dut2} | ${DUT2_${int}2}[0] | ${vhost_if2}
+| | ... | ${dut2} | ${DUT2_${int}2}[0] | ${dut2_eth_bond_if1}
| Initialize L2 xconnect with memif pairs on DUT node
| | [Documentation]
diff --git a/resources/libraries/robot/performance/performance_actions.robot b/resources/libraries/robot/performance/performance_actions.robot
index 1d2ea52274..d588bc9f83 100644
--- a/resources/libraries/robot/performance/performance_actions.robot
+++ b/resources/libraries/robot/performance/performance_actions.robot
@@ -61,6 +61,38 @@
| | Sleep | 1s
| | Stop traffic on tg
+| Additional Statistics Action For infra-warmup
+| | [Documentation]
+| | ... | Additional Statistics Action for infra warmup.
+| |
+| | ... | See documentation of the called keyword for required test variables.
+| |
+| | ${ppta} = | Get Packets Per Transaction Aggregated
+| | ${ramp_up_duration} = | Get Ramp Up Duration
+| | ${ramp_up_rate} = | Get Ramp Up Rate
+| | ${runtime_duration} = | Get Runtime Duration
+| | ${runtime_rate} = | Get Runtime Rate
+| | ${traffic_directions} = | Get Traffic Directions
+| | ${transaction_duration} = | Get Transaction Duration
+| | ${transaction_scale} = | Get Transaction Scale
+| | ${transaction_type} = | Get Transaction Type
+| | ${use_latency} = | Get Use Latency
+| | Send traffic on tg
+| | ... | duration=${5}
+| | ... | rate=${253}
+| | ... | frame_size=${frame_size}
+| | ... | traffic_profile=${traffic_profile}
+| | ... | async_call=${False}
+| | ... | ppta=${ppta}
+| | ... | use_latency=${False}
+| | ... | traffic_directions=${traffic_directions}
+| | ... | transaction_duration=${transaction_duration}
+| | ... | transaction_scale=${transaction_scale}
+| | ... | transaction_type=${transaction_type}
+| | ... | duration_limit=${0.0}
+| | ... | ramp_up_duration=${ramp_up_duration}
+| | ... | ramp_up_rate=${ramp_up_rate}
+
| Additional Statistics Action For vpp-runtime
| | [Documentation]
| | ... | Additional Statistics Action for clear and show runtime counters with