aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVratko Polak <vrpolak@cisco.com>2019-07-17 12:40:49 +0200
committerVratko Polak <vrpolak@cisco.com>2019-07-17 12:40:49 +0200
commitf88a3d9178dfbd73d0479f9aa2f5224e0c89ca1f (patch)
tree9963cb06a7f089d815f9ebf5e5ba8d8f1f26a209
parent248d1a52e06622dc9eb1dfdd6ca9f6670b4c0bc3 (diff)
Use PapiSocketProvider for most PAPI calls
Ticket: CSIT-1541 Ticket: VPP-1722 Ticket: CSIT-1546 + Increase timeout to hide x520 slownes of show hardware detail. - Install sshpass and update ssh client in virl bootstrap. + Added TODOs to remove when CSIT-1546 is fixed. + Enable default socksvr on any startup conf. + Improve OptionString init and repr. - The non-socket executor still kept for stats. + Remove everything unrelated to stats from non-socket executor. - Remove some debug-loooking calls to avoid failures. TODO: Introduce proper parsing to the affected keywords. + Reduce logging from PAPI code to level INFO. - Needs https://gerrit.fd.io/r/20660 to fully work. + Change default values for LocalExecution.run() + Return code check enabled by default. Code is more readable when rc!=0 is allowed explicitly, and the test code will now detect unexpected failures. + Logging disabled by default. Output XML is large already. Important logging can be enabled explicitly. + Restore alphabetical order in common.sh functions. Change-Id: I05882cb6b620ad14638f7404b5ad38c7a5de9e6c Signed-off-by: Vratko Polak <vrpolak@cisco.com>
-rwxr-xr-xbootstrap.sh6
-rw-r--r--docs/report/vpp_functional_tests/test_environment.rst3
-rw-r--r--docs/report/vpp_performance_tests/test_environment.rst3
-rw-r--r--resources/libraries/bash/function/common.sh183
-rw-r--r--resources/libraries/python/Classify.py26
-rw-r--r--resources/libraries/python/ContainerUtils.py2
-rw-r--r--resources/libraries/python/FilteredLogger.py95
-rw-r--r--resources/libraries/python/IPUtil.py68
-rw-r--r--resources/libraries/python/IPsecUtil.py6
-rw-r--r--resources/libraries/python/IPv6Util.py6
-rw-r--r--resources/libraries/python/InterfaceUtil.py46
-rw-r--r--resources/libraries/python/KubernetesUtils.py2
-rw-r--r--resources/libraries/python/L2Util.py32
-rw-r--r--resources/libraries/python/LocalExecution.py2
-rw-r--r--resources/libraries/python/Memif.py8
-rw-r--r--resources/libraries/python/NATUtil.py12
-rw-r--r--resources/libraries/python/OptionString.py17
-rw-r--r--resources/libraries/python/PapiExecutor.py674
-rw-r--r--resources/libraries/python/ProxyArp.py4
-rw-r--r--resources/libraries/python/QemuUtils.py1
-rw-r--r--resources/libraries/python/SetupFramework.py2
-rw-r--r--resources/libraries/python/TestConfig.py8
-rw-r--r--resources/libraries/python/Trace.py8
-rw-r--r--resources/libraries/python/VPPUtil.py19
-rw-r--r--resources/libraries/python/VhostUser.py6
-rw-r--r--resources/libraries/python/VppConfigGenerator.py12
-rw-r--r--resources/libraries/python/VppCounters.py16
-rw-r--r--resources/libraries/python/telemetry/SPAN.py4
-rw-r--r--resources/libraries/robot/honeycomb/performance.robot1
-rw-r--r--resources/libraries/robot/shared/default.robot3
30 files changed, 797 insertions, 478 deletions
diff --git a/bootstrap.sh b/bootstrap.sh
index 44b0dc02ae..04cefee40c 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -26,13 +26,15 @@ OS_VERSION_ID=$(grep '^VERSION_ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\
if [ "$OS_ID" == "centos" ]; then
DISTRO="CENTOS"
PACKAGE="rpm"
- sudo yum install -y python-devel python-virtualenv
+ # TODO: Remove when corresponding part of CSIT-1546 is addressed.
+ sudo yum install -y python-devel python-virtualenv openssh-clients sshpass
elif [ "$OS_ID" == "ubuntu" ]; then
DISTRO="UBUNTU"
PACKAGE="deb"
+ # TODO: Remove when corresponding part of CSIT-1546 is addressed.
export DEBIAN_FRONTEND=noninteractive
sudo apt-get -y update
- sudo apt-get -y install libpython2.7-dev python-virtualenv
+ sudo apt-get -y install libpython2.7-dev python-virtualenv sshpass
else
echo "$OS_ID is not yet supported."
exit 1
diff --git a/docs/report/vpp_functional_tests/test_environment.rst b/docs/report/vpp_functional_tests/test_environment.rst
index d8f2abff55..b8a6b16f88 100644
--- a/docs/report/vpp_functional_tests/test_environment.rst
+++ b/docs/report/vpp_functional_tests/test_environment.rst
@@ -402,6 +402,9 @@ There is used the default startup configuration as defined in `VPP startup.conf`
{
gid vpp
}
+ socksvr {
+ default
+ }
dpdk
{
vdev cryptodev_aesni_gcm_pmd,socket_id=0
diff --git a/docs/report/vpp_performance_tests/test_environment.rst b/docs/report/vpp_performance_tests/test_environment.rst
index 57e797339e..3c179e1f7a 100644
--- a/docs/report/vpp_performance_tests/test_environment.rst
+++ b/docs/report/vpp_performance_tests/test_environment.rst
@@ -64,6 +64,9 @@ below:
log /tmp/vpe.log
nodaemon
}
+ socksvr {
+ default
+ }
ip6
{
heap-size 4G
diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh
index b0b97e0040..549688f7bd 100644
--- a/resources/libraries/bash/function/common.sh
+++ b/resources/libraries/bash/function/common.sh
@@ -202,37 +202,41 @@ function common_dirs () {
set -exuo pipefail
- BASH_FUNCTION_DIR="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")" || {
- die "Some error during localizing this source directory."
+ this_file=$(readlink -e "${BASH_SOURCE[0]}") || {
+ die "Some error during locating of this source file."
+ }
+ BASH_FUNCTION_DIR=$(dirname "${this_file}") || {
+ die "Some error during dirname call."
}
# Current working directory could be in a different repo, e.g. VPP.
pushd "${BASH_FUNCTION_DIR}" || die "Pushd failed"
- CSIT_DIR="$(readlink -e "$(git rev-parse --show-toplevel)")" || {
- die "Readlink or git rev-parse failed."
+ relative_csit_dir=$(git rev-parse --show-toplevel) || {
+ die "Git rev-parse failed."
}
+ CSIT_DIR=$(readlink -e "${relative_csit_dir}") || die "Readlink failed."
popd || die "Popd failed."
- TOPOLOGIES_DIR="$(readlink -e "${CSIT_DIR}/topologies/available")" || {
+ TOPOLOGIES_DIR=$(readlink -e "${CSIT_DIR}/topologies/available") || {
die "Readlink failed."
}
- RESOURCES_DIR="$(readlink -e "${CSIT_DIR}/resources")" || {
+ RESOURCES_DIR=$(readlink -e "${CSIT_DIR}/resources") || {
die "Readlink failed."
}
- TOOLS_DIR="$(readlink -e "${RESOURCES_DIR}/tools")" || {
+ TOOLS_DIR=$(readlink -e "${RESOURCES_DIR}/tools") || {
die "Readlink failed."
}
- PYTHON_SCRIPTS_DIR="$(readlink -e "${TOOLS_DIR}/scripts")" || {
+ PYTHON_SCRIPTS_DIR=$(readlink -e "${TOOLS_DIR}/scripts") || {
die "Readlink failed."
}
- ARCHIVE_DIR="$(readlink -f "${CSIT_DIR}/archive")" || {
+ ARCHIVE_DIR=$(readlink -f "${CSIT_DIR}/archive") || {
die "Readlink failed."
}
mkdir -p "${ARCHIVE_DIR}" || die "Mkdir failed."
- DOWNLOAD_DIR="$(readlink -f "${CSIT_DIR}/download_dir")" || {
+ DOWNLOAD_DIR=$(readlink -f "${CSIT_DIR}/download_dir") || {
die "Readlink failed."
}
mkdir -p "${DOWNLOAD_DIR}" || die "Mkdir failed."
- GENERATED_DIR="$(readlink -f "${CSIT_DIR}/generated")" || {
+ GENERATED_DIR=$(readlink -f "${CSIT_DIR}/generated") || {
die "Readlink failed."
}
mkdir -p "${GENERATED_DIR}" || die "Mkdir failed."
@@ -619,6 +623,42 @@ function run_pybot () {
}
+function select_os () {
+
+ # Populate variables related to local operating system.
+ #
+ # Also install any missing prerequisities CSIT tests need.
+ # TODO: Move the installation to a separate function?
+ #
+ # Variables set:
+ # - VPP_VER_FILE - Name of File in CSIT dir containing vpp stable version.
+ # - IMAGE_VER_FILE - Name of File in CSIT dir containing the image name.
+ # - PKG_SUFFIX - Suffix of OS package file name, "rpm" or "deb."
+
+ set -exuo pipefail
+
+ os_id=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') || {
+ die "Get OS release failed."
+ }
+
+ case "${os_id}" in
+ "ubuntu"*)
+ IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
+ VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC"
+ PKG_SUFFIX="deb"
+ ;;
+ "centos"*)
+ IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS"
+ VPP_VER_FILE="VPP_STABLE_VER_CENTOS"
+ PKG_SUFFIX="rpm"
+ ;;
+ *)
+ die "Unable to identify distro or os from ${OS}"
+ ;;
+ esac
+}
+
+
function select_tags () {
# Variables read:
@@ -778,82 +818,6 @@ function select_tags () {
}
-function select_vpp_device_tags () {
-
- # Variables read:
- # - TEST_CODE - String affecting test selection, usually jenkins job name.
- # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
- # Can be unset.
- # Variables set:
- # - TAGS - Array of processed tag boolean expressions.
-
- set -exuo pipefail
-
- case "${TEST_CODE}" in
- # Select specific performance tests based on jenkins job type variable.
- * )
- if [[ -z "${TEST_TAG_STRING-}" ]]; then
- # If nothing is specified, we will run pre-selected tests by
- # following tags. Items of array will be concatenated by OR
- # in Robot Framework.
- test_tag_array=()
- else
- # If trigger contains tags, split them into array.
- test_tag_array=(${TEST_TAG_STRING//:/ })
- fi
- ;;
- esac
-
- TAGS=()
-
- # We will prefix with devicetest to prevent running other tests
- # (e.g. Functional).
- prefix="devicetestAND"
- if [[ "${TEST_CODE}" == "vpp-"* ]]; then
- # Automatic prefixing for VPP jobs to limit testing.
- prefix="${prefix}"
- fi
- for tag in "${test_tag_array[@]}"; do
- if [[ ${tag} == "!"* ]]; then
- # Exclude tags are not prefixed.
- TAGS+=("${tag}")
- else
- TAGS+=("${prefix}${tag}")
- fi
- done
-}
-
-function select_os () {
-
- # Variables set:
- # - VPP_VER_FILE - Name of File in CSIT dir containing vpp stable version.
- # - IMAGE_VER_FILE - Name of File in CSIT dir containing the image name.
- # - PKG_SUFFIX - Suffix of OS package file name, "rpm" or "deb."
-
- set -exuo pipefail
-
- os_id=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g') || {
- die "Get OS release failed."
- }
-
- case "${os_id}" in
- "ubuntu"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_UBUNTU"
- VPP_VER_FILE="VPP_STABLE_VER_UBUNTU_BIONIC"
- PKG_SUFFIX="deb"
- ;;
- "centos"*)
- IMAGE_VER_FILE="VPP_DEVICE_IMAGE_CENTOS"
- VPP_VER_FILE="VPP_STABLE_VER_CENTOS"
- PKG_SUFFIX="rpm"
- ;;
- *)
- die "Unable to identify distro or os from ${OS}"
- ;;
- esac
-}
-
-
function select_topology () {
# Variables read:
@@ -917,6 +881,51 @@ function select_topology () {
}
+function select_vpp_device_tags () {
+
+ # Variables read:
+ # - TEST_CODE - String affecting test selection, usually jenkins job name.
+ # - TEST_TAG_STRING - String selecting tags, from gerrit comment.
+ # Can be unset.
+ # Variables set:
+ # - TAGS - Array of processed tag boolean expressions.
+
+ set -exuo pipefail
+
+ case "${TEST_CODE}" in
+ # Select specific performance tests based on jenkins job type variable.
+ * )
+ if [[ -z "${TEST_TAG_STRING-}" ]]; then
+ # If nothing is specified, we will run pre-selected tests by
+ # following tags. Items of array will be concatenated by OR
+ # in Robot Framework.
+ test_tag_array=()
+ else
+ # If trigger contains tags, split them into array.
+ test_tag_array=(${TEST_TAG_STRING//:/ })
+ fi
+ ;;
+ esac
+
+ TAGS=()
+
+ # We will prefix with devicetest to prevent running other tests
+ # (e.g. Functional).
+ prefix="devicetestAND"
+ if [[ "${TEST_CODE}" == "vpp-"* ]]; then
+ # Automatic prefixing for VPP jobs to limit testing.
+ prefix="${prefix}"
+ fi
+ for tag in "${test_tag_array[@]}"; do
+ if [[ ${tag} == "!"* ]]; then
+ # Exclude tags are not prefixed.
+ TAGS+=("${tag}")
+ else
+ TAGS+=("${prefix}${tag}")
+ fi
+ done
+}
+
function untrap_and_unreserve_testbed () {
# Use this as a trap function to ensure testbed does not remain reserved.
diff --git a/resources/libraries/python/Classify.py b/resources/libraries/python/Classify.py
index b2cc3a6420..62508e1a49 100644
--- a/resources/libraries/python/Classify.py
+++ b/resources/libraries/python/Classify.py
@@ -21,7 +21,7 @@ from ipaddress import ip_address
from robot.api import logger
from resources.libraries.python.topology import Topology
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
class Classify(object):
@@ -289,7 +289,7 @@ class Classify(object):
err_msg = "Failed to create a classify table on host {host}".format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
return int(reply["new_table_index"]), int(reply["skip_n_vectors"]),\
@@ -355,7 +355,7 @@ class Classify(object):
err_msg = "Failed to create a classify session on host {host}".format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -379,7 +379,7 @@ class Classify(object):
err_msg = "Failed to create a classify session on host {host}".format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -407,7 +407,7 @@ class Classify(object):
err_msg = "Failed to set acl list for interface {idx} on host {host}".\
format(idx=sw_if_index, host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -434,7 +434,7 @@ class Classify(object):
err_msg = "Failed to add/replace acls on host {host}".format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -732,7 +732,7 @@ class Classify(object):
args = dict(
table_id=int(table_index)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
return reply
@@ -751,7 +751,7 @@ class Classify(object):
args = dict(
table_id=int(table_index)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details()
return details
@@ -764,7 +764,7 @@ class Classify(object):
:param node: VPP node.
:type node: dict
"""
- PapiExecutor.dump_and_log(node, ["acl_dump", ])
+ PapiSocketExecutor.dump_and_log(node, ["acl_dump", ])
@staticmethod
def vpp_log_plugin_acl_interface_assignment(node):
@@ -774,7 +774,7 @@ class Classify(object):
:param node: VPP node.
:type node: dict
"""
- PapiExecutor.dump_and_log(node, ["acl_interface_list_dump", ])
+ PapiSocketExecutor.dump_and_log(node, ["acl_interface_list_dump", ])
@staticmethod
def set_acl_list_for_interface(node, interface, acl_type, acl_idx=None):
@@ -902,7 +902,7 @@ class Classify(object):
:param node: VPP node.
:type node: dict
"""
- PapiExecutor.dump_and_log(node, ["macip_acl_dump", ])
+ PapiSocketExecutor.dump_and_log(node, ["macip_acl_dump", ])
@staticmethod
def add_del_macip_acl_interface(node, interface, action, acl_idx):
@@ -933,7 +933,7 @@ class Classify(object):
sw_if_index=int(sw_if_index),
acl_index=int(acl_idx)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -946,6 +946,6 @@ class Classify(object):
cmd = 'macip_acl_interface_get'
err_msg = "Failed to get 'macip_acl_interface' on host {host}".format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
logger.info(reply)
diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py
index 228648921a..a324465eaf 100644
--- a/resources/libraries/python/ContainerUtils.py
+++ b/resources/libraries/python/ContainerUtils.py
@@ -449,6 +449,7 @@ class ContainerEngine(object):
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_socksvr()
# We will pop the first core from the list to be a main core
vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
# If more cores in the list, the rest will be used as workers.
@@ -498,6 +499,7 @@ class ContainerEngine(object):
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_socksvr()
vpp_config.add_plugin('disable', 'dpdk_plugin.so')
# Apply configuration
diff --git a/resources/libraries/python/FilteredLogger.py b/resources/libraries/python/FilteredLogger.py
new file mode 100644
index 0000000000..a04eb67476
--- /dev/null
+++ b/resources/libraries/python/FilteredLogger.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2019 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Python library for customizing robot.api.logger
+
+As robot.api.logger is a module, it is not easy to copy, edit or inherit from.
+This module offers a class to wrap it.
+The main point of the class is to lower verbosity of Robot logging,
+especially when injected to third party code (such as vpp_papi.VPPApiClient).
+
+Also, String formatting using '%' operator is supported.
+
+Logger.console() is not supported.
+"""
+
+import logging
+
+_LEVELS = {
+ "TRACE": logging.DEBUG // 2,
+ "DEBUG": logging.DEBUG,
+ "INFO": logging.INFO,
+ "HTML": logging.INFO,
+ "WARN": logging.WARN,
+ "ERROR": logging.ERROR,
+ "CRITICAL": logging.CRITICAL,
+ "NONE": logging.CRITICAL,
+}
+
+class FilteredLogger(object):
+ """Instances of this class have the similar API to robot.api.logger.
+
+ TODO: Support html argument?
+ TODO: Support console with a filtering switch?
+ """
+
+ def __init__(self, logger_module, min_level="INFO"):
+ """Remember the values, check min_level is known.
+
+ Use min_level of "CRITICAL" or "NONE" to disable logging entirely.
+
+ :param logger_module: robot.api.logger, or a compatible object.
+ :param min_level: Minimal level to log, lower levels are ignored.
+ :type logger_module: Object with .write(msg, level="INFO") signature.
+ :type min_level: str
+ :raises KeyError: If given min_level is not supported.
+ """
+ self.logger_module = logger_module
+ self.min_level_num = _LEVELS[min_level.upper()]
+
+ def write(self, message, farg=None, level="INFO"):
+ """Forwards the message to logger if min_level is reached.
+
+ Formatting using '%' operator is used when farg argument is suplied.
+
+ :param message: Message to log.
+ :param farg: Value for '%' operator, or None.
+ :param level: Level to possibly log with.
+ :type message: str
+ :type farg: NoneTye, or whatever '%' accepts: str, int, float, dict...
+ :type level: str
+ """
+ if _LEVELS[level.upper()] >= self.min_level_num:
+ if farg is not None:
+ message = message % farg
+ self.logger_module.write(message, level=level)
+
+ def trace(self, message, farg=None):
+ """Forward the message using the ``TRACE`` level."""
+ self.write(message, farg=farg, level="TRACE")
+
+ def debug(self, message, farg=None):
+ """Forward the message using the ``DEBUG`` level."""
+ self.write(message, farg=farg, level="DEBUG")
+
+ def info(self, message, farg=None):
+ """Forward the message using the ``INFO`` level."""
+ self.write(message, farg=farg, level="INFO")
+
+ def warn(self, message, farg=None):
+ """Forward the message using the ``WARN`` level."""
+ self.write(message, farg=farg, level="WARN")
+
+ def error(self, message, farg=None):
+ """Forward the message using the ``ERROR`` level."""
+ self.write(message, farg=farg, level="ERROR")
diff --git a/resources/libraries/python/IPUtil.py b/resources/libraries/python/IPUtil.py
index 6a8e1a2401..0212ead1d7 100644
--- a/resources/libraries/python/IPUtil.py
+++ b/resources/libraries/python/IPUtil.py
@@ -16,11 +16,11 @@
import re
from enum import IntEnum
-from ipaddress import ip_address, IPv4Network, IPv6Network
+from ipaddress import ip_address
from resources.libraries.python.Constants import Constants
from resources.libraries.python.InterfaceUtil import InterfaceUtil
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd
from resources.libraries.python.topology import Topology
from resources.libraries.python.VatExecutor import VatTerminal
@@ -112,28 +112,22 @@ class IPUtil(object):
"""
sw_if_index = InterfaceUtil.get_interface_index(node, interface)
- if sw_if_index:
- is_ipv6 = 1 if ip_version == 'ipv6' else 0
-
- cmd = 'ip_address_dump'
- args = dict(sw_if_index=sw_if_index,
- is_ipv6=is_ipv6)
- err_msg = 'Failed to get L2FIB dump on host {host}'.format(
- host=node['host'])
-
- with PapiExecutor(node) as papi_exec:
- details = papi_exec.add(cmd, **args).get_details(err_msg)
-
- for item in details:
- item['ip'] = item['prefix'].split('/')[0]
- item['prefix_length'] = int(item['prefix'].split('/')[1])
- item['is_ipv6'] = is_ipv6
- item['netmask'] = \
- str(IPv6Network(unicode('::/{pl}'.format(
- pl=item['prefix_length']))).netmask) \
- if is_ipv6 \
- else str(IPv4Network(unicode('0.0.0.0/{pl}'.format(
- pl=item['prefix_length']))).netmask)
+ if not sw_if_index:
+ return list()
+
+ is_ipv6 = 1 if ip_version == 'ipv6' else 0
+
+ cmd = 'ip_address_dump'
+ args = dict(sw_if_index=sw_if_index,
+ is_ipv6=is_ipv6)
+ err_msg = 'Failed to get L2FIB dump on host {host}'.format(
+ host=node['host'])
+
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd, **args).get_details(err_msg)
+
+ # TODO: CSIT currently looks only whether the list is empty.
+ # Add proper value processing if values become important.
return details
@@ -145,10 +139,10 @@ class IPUtil(object):
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, 'show ip fib')
- PapiExecutor.run_cli_cmd(node, 'show ip fib summary')
- PapiExecutor.run_cli_cmd(node, 'show ip6 fib')
- PapiExecutor.run_cli_cmd(node, 'show ip6 fib summary')
+ PapiSocketExecutor.run_cli_cmd(node, 'show ip fib')
+ PapiSocketExecutor.run_cli_cmd(node, 'show ip fib summary')
+ PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib')
+ PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib summary')
@staticmethod
def vpp_get_ip_tables_prefix(node, address):
@@ -159,7 +153,7 @@ class IPUtil(object):
"""
addr = ip_address(unicode(address))
- PapiExecutor.run_cli_cmd(
+ PapiSocketExecutor.run_cli_cmd(
node, 'show {ip_ver} fib {addr}/{addr_len}'.format(
ip_ver='ip6' if addr.version == 6 else 'ip',
addr=addr,
@@ -188,7 +182,7 @@ class IPUtil(object):
err_msg = 'Failed to get VRF id assigned to interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
return reply['vrf_id']
@@ -209,7 +203,7 @@ class IPUtil(object):
loose=0)
err_msg = 'Failed to enable source check on interface {ifc}'.format(
ifc=if_name)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -230,7 +224,7 @@ class IPUtil(object):
err_msg = 'VPP ip probe {dev} {ip} failed on {h}'.format(
dev=interface, ip=addr, h=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -407,7 +401,7 @@ class IPUtil(object):
address=ip_addr.packed)
err_msg = 'Failed to add IP address on interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -436,7 +430,7 @@ class IPUtil(object):
neighbor=neighbor)
err_msg = 'Failed to add IP neighbor on interface {ifc}'.format(
ifc=iface_key)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -583,7 +577,7 @@ class IPUtil(object):
err_msg = 'Failed to add route(s) on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
for i in xrange(kwargs.get('count', 1)):
args['route']['prefix']['address']['un'] = \
IPUtil.union_addr(net_addr + i)
@@ -608,7 +602,7 @@ class IPUtil(object):
del_all=1)
err_msg = 'Failed to flush IP address on interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -631,5 +625,5 @@ class IPUtil(object):
is_add=1)
err_msg = 'Failed to add FIB table on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
diff --git a/resources/libraries/python/IPsecUtil.py b/resources/libraries/python/IPsecUtil.py
index 78239f9950..f28605fc77 100644
--- a/resources/libraries/python/IPsecUtil.py
+++ b/resources/libraries/python/IPsecUtil.py
@@ -20,7 +20,7 @@ from ipaddress import ip_network, ip_address
from enum import Enum, IntEnum
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
from resources.libraries.python.VatExecutor import VatExecutor
from resources.libraries.python.VatJsonUtil import VatJsonUtil
@@ -260,7 +260,7 @@ class IPsecUtil(object):
err_msg = 'Failed to select IPsec backend on host {host}'.format(
host=node['host'])
args = dict(protocol=protocol, index=index)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -273,7 +273,7 @@ class IPsecUtil(object):
err_msg = 'Failed to dump IPsec backends on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add('ipsec_backend_dump').get_details(err_msg)
@staticmethod
diff --git a/resources/libraries/python/IPv6Util.py b/resources/libraries/python/IPv6Util.py
index 9138c09a20..aacf0fb5f7 100644
--- a/resources/libraries/python/IPv6Util.py
+++ b/resources/libraries/python/IPv6Util.py
@@ -15,7 +15,7 @@
from resources.libraries.python.InterfaceUtil import InterfaceUtil
from resources.libraries.python.IPUtil import IPUtil
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType
@@ -38,7 +38,7 @@ class IPv6Util(object):
err_msg = 'Failed to suppress ICMPv6 router advertisement message on ' \
'interface {ifc}'.format(ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -60,7 +60,7 @@ class IPv6Util(object):
err_msg = 'Failed to set router advertisement interval on ' \
'interface {ifc}'.format(ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
diff --git a/resources/libraries/python/InterfaceUtil.py b/resources/libraries/python/InterfaceUtil.py
index 0b1f06f9bf..6de17d10d9 100644
--- a/resources/libraries/python/InterfaceUtil.py
+++ b/resources/libraries/python/InterfaceUtil.py
@@ -24,7 +24,7 @@ from resources.libraries.python.Constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.L2Util import L2Util
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.parsers.JsonParser import JsonParser
from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
@@ -138,7 +138,7 @@ class InterfaceUtil(object):
host=node['host'])
args = dict(sw_if_index=sw_if_index,
admin_up_down=admin_up_down)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
cmd = 'ip link set {ifc} {state}'.format(
@@ -210,7 +210,7 @@ class InterfaceUtil(object):
args = dict(sw_if_index=sw_if_index,
mtu=int(mtu))
try:
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
except AssertionError as err:
# TODO: Make failure tolerance optional.
@@ -321,7 +321,7 @@ class InterfaceUtil(object):
name_filter='')
err_msg = 'Failed to get interface dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
def process_if_dump(if_dump):
@@ -729,7 +729,7 @@ class InterfaceUtil(object):
vlan_id=int(vlan))
err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
if_key = Topology.add_new_port(node, 'vlan_subif')
@@ -771,7 +771,7 @@ class InterfaceUtil(object):
vni=int(vni))
err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
if_key = Topology.add_new_port(node, 'vxlan_tunnel')
@@ -805,7 +805,7 @@ class InterfaceUtil(object):
args = dict(sw_if_index=sw_if_index)
err_msg = 'Failed to get VXLAN dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
def process_vxlan_dump(vxlan_dump):
@@ -853,7 +853,7 @@ class InterfaceUtil(object):
cmd = 'sw_interface_vhost_user_dump'
err_msg = 'Failed to get vhost-user dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details(err_msg)
def process_vhost_dump(vhost_dump):
@@ -896,7 +896,7 @@ class InterfaceUtil(object):
cmd = 'sw_interface_tap_v2_dump'
err_msg = 'Failed to get TAP dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details(err_msg)
def process_tap_dump(tap_dump):
@@ -972,7 +972,7 @@ class InterfaceUtil(object):
inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
err_msg = 'Failed to create sub-interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
if_key = Topology.add_new_port(node, 'subinterface')
@@ -1007,7 +1007,7 @@ class InterfaceUtil(object):
tunnel=tunnel)
err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
if_key = Topology.add_new_port(node, 'gre_tunnel')
@@ -1032,7 +1032,7 @@ class InterfaceUtil(object):
args = dict(mac_address=0)
err_msg = 'Failed to create loopback interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
if_key = Topology.add_new_port(node, 'loopback')
@@ -1071,7 +1071,7 @@ class InterfaceUtil(object):
lb=load_balance.upper())).value)
err_msg = 'Failed to create bond interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
InterfaceUtil.add_eth_interface(node, sw_if_index=sw_if_index,
@@ -1128,7 +1128,7 @@ class InterfaceUtil(object):
txq_size=0)
err_msg = 'Failed to create AVF interface on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
InterfaceUtil.add_eth_interface(node, sw_if_index=sw_if_index,
@@ -1160,7 +1160,7 @@ class InterfaceUtil(object):
'interface {bond} on host {host}'.format(ifc=interface,
bond=bond_if,
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -1177,7 +1177,7 @@ class InterfaceUtil(object):
host=node['host'])
data = ('Bond data on node {host}:\n'.format(host=node['host']))
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details(err_msg)
for bond in details:
@@ -1220,7 +1220,7 @@ class InterfaceUtil(object):
err_msg = 'Failed to get slave dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
def process_slave_dump(slave_dump):
@@ -1281,7 +1281,7 @@ class InterfaceUtil(object):
is_add=1)
err_msg = 'Failed to enable input acl on interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -1306,7 +1306,7 @@ class InterfaceUtil(object):
args = dict(sw_if_index=sw_if_index)
err_msg = 'Failed to get classify table name by interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
return reply
@@ -1350,7 +1350,7 @@ class InterfaceUtil(object):
args = dict(sw_if_index=sw_if_index)
err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
def process_vxlan_gpe_dump(vxlan_dump):
@@ -1405,7 +1405,7 @@ class InterfaceUtil(object):
vrf_id=int(table_id))
err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -1563,7 +1563,7 @@ class InterfaceUtil(object):
cmd = 'sw_interface_rx_placement_dump'
err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
cmd=cmd, host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
for ifc in node['interfaces'].values():
if ifc['vpp_sw_index'] is not None:
papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
@@ -1591,7 +1591,7 @@ class InterfaceUtil(object):
"{host}!".format(host=node['host'])
args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
worker_id=worker_id)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
diff --git a/resources/libraries/python/KubernetesUtils.py b/resources/libraries/python/KubernetesUtils.py
index 60e12863b4..029d635c72 100644
--- a/resources/libraries/python/KubernetesUtils.py
+++ b/resources/libraries/python/KubernetesUtils.py
@@ -483,6 +483,7 @@ class KubernetesUtils(object):
vpp_config.set_node(kwargs['node'])
vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
vpp_config.add_unix_nodaemon()
+ vpp_config.add_socksvr()
vpp_config.add_heapsize('4G')
vpp_config.add_ip_heap_size('4G')
vpp_config.add_ip6_heap_size('4G')
@@ -528,6 +529,7 @@ class KubernetesUtils(object):
vpp_config.set_node(kwargs['node'])
vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
vpp_config.add_unix_nodaemon()
+ vpp_config.add_socksvr()
# We will pop first core from list to be main core
vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
diff --git a/resources/libraries/python/L2Util.py b/resources/libraries/python/L2Util.py
index 7c575a290e..4ca0c47308 100644
--- a/resources/libraries/python/L2Util.py
+++ b/resources/libraries/python/L2Util.py
@@ -19,7 +19,7 @@ from textwrap import wrap
from enum import IntEnum
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
from resources.libraries.python.ssh import exec_cmd_no_error
@@ -129,7 +129,7 @@ class L2Util(object):
static_mac=int(static_mac),
filter_mac=int(filter_mac),
bvi_mac=int(bvi_mac))
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -168,7 +168,7 @@ class L2Util(object):
learn=int(learn),
arp_term=int(arp_term),
is_add=1)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -200,7 +200,7 @@ class L2Util(object):
shg=int(shg),
port_type=int(port_type),
enable=1)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -248,7 +248,7 @@ class L2Util(object):
err_msg = 'Failed to add L2 bridge domain with 2 interfaces on host' \
' {host}'.format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3)
papi_exec.get_replies(err_msg)
@@ -285,7 +285,7 @@ class L2Util(object):
err_msg = 'Failed to add L2 cross-connect between two interfaces on' \
' host {host}'.format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
@staticmethod
@@ -321,7 +321,7 @@ class L2Util(object):
err_msg = 'Failed to add L2 patch between two interfaces on' \
' host {host}'.format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
@staticmethod
@@ -391,18 +391,14 @@ class L2Util(object):
args = dict(bd_id=int(bd_id))
err_msg = 'Failed to get L2FIB dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- bd_data = list() if bd_id == Constants.BITWISE_NON_ZERO else dict()
+ if bd_id == Constants.BITWISE_NON_ZERO:
+ return details
for bridge_domain in details:
- if bd_id == Constants.BITWISE_NON_ZERO:
- bd_data.append(bridge_domain)
- else:
- if bridge_domain['bd_id'] == bd_id:
- return bridge_domain
-
- return bd_data
+ if bridge_domain['bd_id'] == bd_id:
+ return bridge_domain
@staticmethod
def l2_vlan_tag_rewrite(node, interface, tag_rewrite_method,
@@ -444,7 +440,7 @@ class L2Util(object):
tag2=tag2_id)
err_msg = 'Failed to set VLAN TAG rewrite on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -463,7 +459,7 @@ class L2Util(object):
args = dict(bd_id=int(bd_id))
err_msg = 'Failed to get L2FIB dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for fib_item in details:
diff --git a/resources/libraries/python/LocalExecution.py b/resources/libraries/python/LocalExecution.py
index bb4cf794a1..f9a7b94d8e 100644
--- a/resources/libraries/python/LocalExecution.py
+++ b/resources/libraries/python/LocalExecution.py
@@ -40,7 +40,7 @@ __all__ = ["run"]
MESSAGE_TEMPLATE = "Command {com} ended with RC {ret} and output:\n{out}"
-def run(command, msg="", check=False, log=True, console=False):
+def run(command, msg="", check=True, log=False, console=False):
"""Wrapper around subprocess.check_output that can tolerates nonzero RCs.
Stderr is redirected to stdout, so it is part of output
diff --git a/resources/libraries/python/Memif.py b/resources/libraries/python/Memif.py
index 5c38ec46f5..34cf6ce9c3 100644
--- a/resources/libraries/python/Memif.py
+++ b/resources/libraries/python/Memif.py
@@ -18,7 +18,7 @@ from enum import IntEnum
from robot.api import logger
from resources.libraries.python.topology import NodeType, Topology
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.L2Util import L2Util
@@ -43,7 +43,7 @@ class Memif(object):
:returns: List of memif interfaces extracted from Papi response.
:rtype: list
"""
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add("memif_dump").get_details()
for memif in details:
@@ -78,7 +78,7 @@ class Memif(object):
socket_id=int(sid),
socket_filename=str('/tmp/' + filename)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
return papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
@@ -110,7 +110,7 @@ class Memif(object):
socket_id=int(sid),
id=int(mid)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
return papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
@staticmethod
diff --git a/resources/libraries/python/NATUtil.py b/resources/libraries/python/NATUtil.py
index 5c0278db90..f018d38335 100644
--- a/resources/libraries/python/NATUtil.py
+++ b/resources/libraries/python/NATUtil.py
@@ -21,7 +21,7 @@ from enum import IntEnum
from robot.api import logger
from resources.libraries.python.InterfaceUtil import InterfaceUtil
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
class NATConfigFlags(IntEnum):
@@ -65,7 +65,7 @@ class NATUtil(object):
is_add=1,
flags=getattr(NATConfigFlags, "NAT_IS_INSIDE").value
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
int_out_idx = InterfaceUtil.get_sw_if_index(node, int_out)
@@ -76,7 +76,7 @@ class NATUtil(object):
is_add=1,
flags=getattr(NATConfigFlags, "NAT_IS_OUTSIDE").value
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
@staticmethod
@@ -105,7 +105,7 @@ class NATUtil(object):
out_addr=inet_pton(AF_INET, str(ip_out)),
out_plen=int(subnet_out)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
@staticmethod
@@ -131,7 +131,7 @@ class NATUtil(object):
cmd = 'nat_show_config'
err_msg = 'Failed to get NAT configuration on host {host}'.\
format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
logger.debug("NAT Configuration:\n{reply}".format(reply=pformat(reply)))
@@ -145,4 +145,4 @@ class NATUtil(object):
"nat44_user_session_dump",
"nat_det_map_dump"
]
- PapiExecutor.dump_and_log(node, cmds)
+ PapiSocketExecutor.dump_and_log(node, cmds)
diff --git a/resources/libraries/python/OptionString.py b/resources/libraries/python/OptionString.py
index 7c8b2d066a..7163d057ec 100644
--- a/resources/libraries/python/OptionString.py
+++ b/resources/libraries/python/OptionString.py
@@ -36,19 +36,21 @@ class OptionString(object):
the best fitting one, without much logic near the call site.
"""
- def __init__(self, prefix="", *args):
+ def __init__(self, parts=tuple(), prefix=""):
"""Create instance with listed strings as parts to use.
Prefix will be converted to string and stripped.
The typical (nonempty) prefix values are "-" and "--".
+ TODO: Support users calling with parts being a string?
+
+ :param parts: List of of stringifiable objects to become parts.
:param prefix: Subtring to prepend to every parameter (not value).
- :param args: List of positional arguments to become parts.
+ :type parts: Iterable of object
:type prefix: object
- :type args: list of object
"""
+ self.parts = [str(part) for part in parts]
self.prefix = str(prefix).strip() # Not worth to call change_prefix.
- self.parts = list(args)
def __repr__(self):
"""Return string executable as Python constructor call.
@@ -56,12 +58,11 @@ class OptionString(object):
:returns: Executable constructor call as string.
:rtype: str
"""
- return "".join([
- "OptionString(prefix=", repr(self.prefix), ",",
- repr(self.parts)[1:-1], ")"])
+ return "OptionString(parts={parts!r},prefix={prefix!r})".format(
+ parts=self.parts, prefix=self.prefix)
# TODO: Would we ever need a copy() method?
- # Currently, supersting "master" is mutable but unique,
+ # Currently, superstring "master" is mutable but unique,
# substring "slave" can be used to extend, but does not need to be mutated.
def change_prefix(self, prefix):
diff --git a/resources/libraries/python/PapiExecutor.py b/resources/libraries/python/PapiExecutor.py
index 9ca34d88ae..0a009b3720 100644
--- a/resources/libraries/python/PapiExecutor.py
+++ b/resources/libraries/python/PapiExecutor.py
@@ -15,32 +15,90 @@
"""
import binascii
+import glob
import json
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
from pprint import pformat
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.PapiHistory import PapiHistory
+from resources.libraries.python.LocalExecution import run
+from resources.libraries.python.FilteredLogger import FilteredLogger
from resources.libraries.python.PythonThree import raise_from
-from resources.libraries.python.ssh import SSH, SSHTimeout
+from resources.libraries.python.PapiHistory import PapiHistory
+from resources.libraries.python.ssh import (
+ SSH, SSHTimeout, exec_cmd_no_error, scp_node)
-__all__ = ["PapiExecutor"]
+__all__ = ["PapiExecutor", "PapiSocketExecutor"]
-class PapiExecutor(object):
- """Contains methods for executing VPP Python API commands on DUTs.
+def dictize(obj):
+ """A helper method, to make namedtuple-like object accessible as dict.
+
+ If the object is namedtuple-like, its _asdict() form is returned,
+ but in the returned object __getitem__ method is wrapped
+ to dictize also any items returned.
+ If the object does not have _asdict, it will be returned without any change.
+ Integer keys still access the object as tuple.
+
+ A more useful version would be to keep obj mostly as a namedtuple,
+ just add getitem for string keys. Unfortunately, namedtuple inherits
+ from tuple, including its read-only __getitem__ attribute,
+ so we cannot monkey-patch it.
+
+ TODO: Create a proxy for namedtuple to allow that.
+
+ :param obj: Arbitrary object to dictize.
+ :type obj: object
+ :returns: Dictized object.
+ :rtype: same as obj type or collections.OrderedDict
+ """
+ if not hasattr(obj, "_asdict"):
+ return obj
+ ret = obj._asdict()
+ old_get = ret.__getitem__
+ new_get = lambda self, key: dictize(old_get(self, key))
+ ret.__getitem__ = new_get
+ return ret
+
+class PapiSocketExecutor(object):
+ """Methods for executing VPP Python API commands on forwarded socket.
+
+ The current implementation connects for the duration of resource manager.
+ Delay for accepting connection is 10s, and disconnect is explicit.
+ TODO: Decrease 10s to value that is long enough for creating connection
+ and short enough to not affect performance.
+
+ The current implementation downloads and parses .api.json files only once
+ and stores a VPPApiClient instance (disconnected) as a class variable.
+ Accessing multiple nodes with different APIs is therefore not supported.
+
+ The current implementation seems to run into read error occasionally.
+ Not sure if the error is in Python code on Robot side, ssh forwarding,
+ or socket handling at VPP side. Anyway, reconnect after some sleep
+ seems to help, hoping repeated command execution does not lead to surprises.
+ The reconnection is logged at WARN level, so it is prominently shown
+ in log.html, so we can see how frequently it happens.
+
+ TODO: Support sockets in NFs somehow.
+ TODO: Support handling of retval!=0 without try/except in caller.
Note: Use only with "with" statement, e.g.:
- with PapiExecutor(node) as papi_exec:
- replies = papi_exec.add('show_version').get_replies(err_msg)
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add('show_version').get_reply(err_msg)
- This class processes three classes of VPP PAPI methods:
- 1. simple request / reply: method='request',
- 2. dump functions: method='dump',
- 3. vpp-stats: method='stats'.
+ This class processes two classes of VPP PAPI methods:
+ 1. Simple request / reply: method='request'.
+ 2. Dump functions: method='dump'.
+
+ Note that access to VPP stats over socket is not supported yet.
The recommended ways of use are (examples):
@@ -48,88 +106,214 @@ class PapiExecutor(object):
a. One request with no arguments:
- with PapiExecutor(node) as papi_exec:
- reply = papi_exec.add('show_version').get_reply()
+ with PapiSocketExecutor(node) as papi_exec:
+ reply = papi_exec.add('show_version').get_reply(err_msg)
b. Three requests with arguments, the second and the third ones are the same
but with different arguments.
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
replies = papi_exec.add(cmd1, **args1).add(cmd2, **args2).\
add(cmd2, **args3).get_replies(err_msg)
2. Dump functions
cmd = 'sw_interface_rx_placement_dump'
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']).\
get_details(err_msg)
+ """
- 3. vpp-stats
-
- path = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
-
- with PapiExecutor(node) as papi_exec:
- stats = papi_exec.add(api_name='vpp-stats', path=path).get_stats()
-
- print('RX interface core 0, sw_if_index 0:\n{0}'.\
- format(stats[0]['/if/rx'][0][0]))
+ # Class cache for reuse between instances.
+ cached_vpp_instance = None
- or
+ def __init__(self, node, remote_vpp_socket="/run/vpp-api.sock"):
+ """Store the given arguments, declare managed variables.
- path_1 = ['^/if', ]
- path_2 = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
+ :param node: Node to connect to and forward unix domain socket from.
+ :param remote_vpp_socket: Path to remote socket to tunnel to.
+ :type node: dict
+ :type remote_vpp_socket: str
+ """
+ self._node = node
+ self._remote_vpp_socket = remote_vpp_socket
+ # The list of PAPI commands to be executed on the node.
+ self._api_command_list = list()
+ # The following values are set on enter, reset on exit.
+ self._temp_dir = None
+ self._ssh_control_socket = None
+ self._local_vpp_socket = None
- with PapiExecutor(node) as papi_exec:
- stats = papi_exec.add('vpp-stats', path=path_1).\
- add('vpp-stats', path=path_2).get_stats()
+ @property
+ def vpp_instance(self):
+ """Return VPP instance with bindings to all API calls.
- print('RX interface core 0, sw_if_index 0:\n{0}'.\
- format(stats[1]['/if/rx'][0][0]))
+ The returned instance is initialized for unix domain socket access,
+ it has initialized all the bindings, but it is not connected
+ (to local socket) yet.
- Note: In this case, when PapiExecutor method 'add' is used:
- - its parameter 'csit_papi_command' is used only to keep information
- that vpp-stats are requested. It is not further processed but it is
- included in the PAPI history this way:
- vpp-stats(path=['^/if', '/err/ip4-input', '/sys/node/ip4-input'])
- Always use csit_papi_command="vpp-stats" if the VPP PAPI method
- is "stats".
- - the second parameter must be 'path' as it is used by PapiExecutor
- method 'add'.
- """
+ First invocation downloads .api.json files from self._node
+ into a temporary directory.
- def __init__(self, node):
- """Initialization.
+ After first invocation, the result is cached, so other calls are quick.
+ Class variable is used as the cache, but this property is defined as
+ an instance method, so that _node (for api files) is known.
- :param node: Node to run command(s) on.
- :type node: dict
+ :returns: Initialized but not connected VPP instance.
+ :rtype: vpp_papi.VPPApiClient
"""
+ cls = self.__class__
+ if cls.cached_vpp_instance is not None:
+ return cls.cached_vpp_instance
+ tmp_dir = tempfile.mkdtemp(dir="/tmp")
+ package_path = "Not set yet."
+ try:
+ # Pack, copy and unpack Python part of VPP installation from _node.
+ # TODO: Use rsync or recursive version of ssh.scp_node instead?
+ node = self._node
+ exec_cmd_no_error(node, ["rm", "-rf", "/tmp/papi.txz"])
+ # Papi python version depends on OS (and time).
+ # Python 2.7 or 3.4, site-packages or dist-packages.
+ installed_papi_glob = "/usr/lib/python*/*-packages/vpp_papi"
+ # We need to wrap this command in bash, in order to expand globs,
+ # and as ssh does join, the inner command has to be quoted.
+ inner_cmd = " ".join([
+ "tar", "cJf", "/tmp/papi.txz", "--exclude=*.pyc",
+ installed_papi_glob, "/usr/share/vpp/api"])
+ exec_cmd_no_error(node, ["bash", "-c", "'" + inner_cmd + "'"])
+ scp_node(node, tmp_dir + "/papi.txz", "/tmp/papi.txz", get=True)
+ run(["tar", "xf", tmp_dir + "/papi.txz", "-C", tmp_dir])
+ # When present locally, we finally can find the installation path.
+ package_path = glob.glob(tmp_dir + installed_papi_glob)[0]
+ # Package path has to be one level above the vpp_papi directory.
+ package_path = package_path.rsplit('/', 1)[0]
+ sys.path.append(package_path)
+ from vpp_papi.vpp_papi import VPPApiClient as vpp_class
+ vpp_class.apidir = tmp_dir + "/usr/share/vpp/api"
+ # We need to create instance before removing from sys.path.
+ cls.cached_vpp_instance = vpp_class(
+ use_socket=True, server_address="TBD", async_thread=False,
+ read_timeout=6, logger=FilteredLogger(logger, "INFO"))
+ # Cannot use loglevel parameter, robot.api.logger lacks support.
+ # TODO: Stop overriding read_timeout when VPP-1722 is fixed.
+ finally:
+ shutil.rmtree(tmp_dir)
+ if sys.path[-1] == package_path:
+ sys.path.pop()
+ return cls.cached_vpp_instance
- # Node to run command(s) on.
- self._node = node
-
- # The list of PAPI commands to be executed on the node.
- self._api_command_list = list()
+ def __enter__(self):
+ """Create a tunnel, connect VPP instance.
- self._ssh = SSH()
+ Only at this point a local socket names are created
+ in a temporary directory, because VIRL runs 3 pybots at once,
+ so harcoding local filenames does not work.
- def __enter__(self):
- try:
- self._ssh.connect(self._node)
- except IOError:
- raise RuntimeError("Cannot open SSH connection to host {host} to "
- "execute PAPI command(s)".
- format(host=self._node["host"]))
+ :returns: self
+ :rtype: PapiSocketExecutor
+ """
+ # Parsing takes longer than connecting, prepare instance before tunnel.
+ vpp_instance = self.vpp_instance
+ node = self._node
+ self._temp_dir = tempfile.mkdtemp(dir="/tmp")
+ self._local_vpp_socket = self._temp_dir + "/vpp-api.sock"
+ self._ssh_control_socket = self._temp_dir + "/ssh.sock"
+ ssh_socket = self._ssh_control_socket
+ # Cleanup possibilities.
+ ret_code, _ = run(["ls", ssh_socket], check=False)
+ if ret_code != 2:
+ # This branch never seems to be hit in CI,
+ # but may be useful when testing manually.
+ run(["ssh", "-S", ssh_socket, "-O", "exit", "0.0.0.0"],
+ check=False, log=True)
+ # TODO: Is any sleep necessary? How to prove if not?
+ run(["sleep", "0.1"])
+ run(["rm", "-vrf", ssh_socket])
+ # Even if ssh can perhaps reuse this file,
+ # we need to remove it for readiness detection to work correctly.
+ run(["rm", "-rvf", self._local_vpp_socket])
+ # On VIRL, the ssh user is not added to "vpp" group,
+ # so we need to change remote socket file access rights.
+ exec_cmd_no_error(
+ node, "chmod o+rwx " + self._remote_vpp_socket, sudo=True)
+ # We use sleep command. The ssh command will exit in 10 second,
+ # unless a local socket connection is established,
+ # in which case the ssh command will exit only when
+ # the ssh connection is closed again (via control socket).
+ # The log level is to supress "Warning: Permanently added" messages.
+ ssh_cmd = [
+ "ssh", "-S", ssh_socket, "-M",
+ "-o", "LogLevel=ERROR", "-o", "UserKnownHostsFile=/dev/null",
+ "-o", "StrictHostKeyChecking=no", "-o", "ExitOnForwardFailure=yes",
+ "-L", self._local_vpp_socket + ':' + self._remote_vpp_socket,
+ "-p", str(node['port']), node['username'] + "@" + node['host'],
+ "sleep", "10"]
+ priv_key = node.get("priv_key")
+ if priv_key:
+ # This is tricky. We need a file to pass the value to ssh command.
+ # And we need ssh command, because paramiko does not suport sockets
+ # (neither ssh_socket, nor _remote_vpp_socket).
+ key_file = tempfile.NamedTemporaryFile()
+ key_file.write(priv_key)
+ # Make sure the content is written, but do not close yet.
+ key_file.flush()
+ ssh_cmd[1:1] = ["-i", key_file.name]
+ password = node.get("password")
+ if password:
+ # Prepend sshpass command to set password.
+ ssh_cmd[:0] = ["sshpass", "-p", password]
+ time_stop = time.time() + 10.0
+ # subprocess.Popen seems to be the best way to run commands
+ # on background. Other ways (shell=True with "&" and ssh with -f)
+ # seem to be too dependent on shell behavior.
+ # In particular, -f does NOT return values for run().
+ subprocess.Popen(ssh_cmd)
+ # Check socket presence on local side.
+ while time.time() < time_stop:
+ # It can take a moment for ssh to create the socket file.
+ ret_code, _ = run(["ls", "-l", self._local_vpp_socket], check=False)
+ if not ret_code:
+ break
+ time.sleep(0.1)
+ else:
+ raise RuntimeError("Local side socket has not appeared.")
+ if priv_key:
+ # Socket up means the key has been read. Delete file by closing it.
+ key_file.close()
+ # Everything is ready, set the local socket address and connect.
+ vpp_instance.transport.server_address = self._local_vpp_socket
+ # It seems we can get read error even if every preceding check passed.
+ # Single retry seems to help.
+ for _ in xrange(2):
+ try:
+ vpp_instance.connect_sync("csit_socket")
+ except IOError as err:
+ logger.warn("Got initial connect error {err!r}".format(err=err))
+ vpp_instance.disconnect()
+ else:
+ break
+ else:
+ raise RuntimeError("Failed to connect to VPP over a socket.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
- self._ssh.disconnect(self._node)
+ """Disconnect the vpp instance, tear down the SHH tunnel.
- def add(self, csit_papi_command="vpp-stats", history=True, **kwargs):
+ Also remove the local sockets by deleting the temporary directory.
+ Arguments related to possible exception are entirely ignored.
+ """
+ self.vpp_instance.disconnect()
+ run(["ssh", "-S", self._ssh_control_socket, "-O", "exit", "0.0.0.0"],
+ check=False)
+ shutil.rmtree(self._temp_dir)
+ return
+
+ def add(self, csit_papi_command, history=True, **kwargs):
"""Add next command to internal command list; return self.
The argument name 'csit_papi_command' must be unique enough as it cannot
be repeated in kwargs.
+ Unless disabled, new entry to papi history is also added at this point.
:param csit_papi_command: VPP API command.
:param history: Enable/disable adding command to PAPI command history.
@@ -138,51 +322,30 @@ class PapiExecutor(object):
:type history: bool
:type kwargs: dict
:returns: self, so that method chaining is possible.
- :rtype: PapiExecutor
+ :rtype: PapiSocketExecutor
"""
if history:
PapiHistory.add_to_papi_history(
self._node, csit_papi_command, **kwargs)
- self._api_command_list.append(dict(api_name=csit_papi_command,
- api_args=kwargs))
+ self._api_command_list.append(
+ dict(api_name=csit_papi_command, api_args=kwargs))
return self
- def get_stats(self, err_msg="Failed to get statistics.", timeout=120):
- """Get VPP Stats from VPP Python API.
-
- :param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
- :type err_msg: str
- :type timeout: int
- :returns: Requested VPP statistics.
- :rtype: list of dict
- """
-
- paths = [cmd['api_args']['path'] for cmd in self._api_command_list]
- self._api_command_list = list()
-
- stdout = self._execute_papi(
- paths, method='stats', err_msg=err_msg, timeout=timeout)
-
- return json.loads(stdout)
-
- def get_replies(self, err_msg="Failed to get replies.", timeout=120):
+ def get_replies(self, err_msg="Failed to get replies."):
"""Get replies from VPP Python API.
The replies are parsed into dict-like objects,
"retval" field is guaranteed to be zero on success.
:param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
:type err_msg: str
- :type timeout: int
:returns: Responses, dict objects with fields due to API and "retval".
:rtype: list of dict
:raises RuntimeError: If retval is nonzero, parsing or ssh error.
"""
- return self._execute(method='request', err_msg=err_msg, timeout=timeout)
+ return self._execute(err_msg=err_msg)
- def get_reply(self, err_msg="Failed to get reply.", timeout=120):
+ def get_reply(self, err_msg="Failed to get reply."):
"""Get reply from VPP Python API.
The reply is parsed into dict-like object,
@@ -191,20 +354,18 @@ class PapiExecutor(object):
TODO: Discuss exception types to raise, unify with inner methods.
:param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
:type err_msg: str
- :type timeout: int
:returns: Response, dict object with fields due to API and "retval".
:rtype: dict
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
- replies = self.get_replies(err_msg=err_msg, timeout=timeout)
+ replies = self.get_replies(err_msg=err_msg)
if len(replies) != 1:
raise RuntimeError("Expected single reply, got {replies!r}".format(
replies=replies))
return replies[0]
- def get_sw_if_index(self, err_msg="Failed to get reply.", timeout=120):
+ def get_sw_if_index(self, err_msg="Failed to get reply."):
"""Get sw_if_index from reply from VPP Python API.
Frequently, the caller is only interested in sw_if_index field
@@ -213,16 +374,16 @@ class PapiExecutor(object):
TODO: Discuss exception types to raise, unify with inner methods.
:param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
:type err_msg: str
- :type timeout: int
:returns: Response, sw_if_index value of the reply.
:rtype: int
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
- return self.get_reply(err_msg=err_msg, timeout=timeout)["sw_if_index"]
+ reply = self.get_reply(err_msg=err_msg)
+ logger.info("Getting index from {reply!r}".format(reply=reply))
+ return reply["sw_if_index"]
- def get_details(self, err_msg="Failed to get dump details.", timeout=120):
+ def get_details(self, err_msg="Failed to get dump details."):
"""Get dump details from VPP Python API.
The details are parsed into dict-like objects.
@@ -233,28 +394,11 @@ class PapiExecutor(object):
it is recommended to call get_details for each dump (type) separately.
:param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
:type err_msg: str
- :type timeout: int
:returns: Details, dict objects with fields due to API without "retval".
:rtype: list of dict
"""
- return self._execute(method='dump', err_msg=err_msg, timeout=timeout)
-
- @staticmethod
- def dump_and_log(node, cmds):
- """Dump and log requested information, return None.
-
- :param node: DUT node.
- :param cmds: Dump commands to be executed.
- :type node: dict
- :type cmds: list of str
- """
- with PapiExecutor(node) as papi_exec:
- for cmd in cmds:
- details = papi_exec.add(cmd).get_details()
- logger.debug("{cmd}:\n{details}".format(
- cmd=cmd, details=pformat(details)))
+ return self._execute(err_msg)
@staticmethod
def run_cli_cmd(node, cmd, log=True):
@@ -271,21 +415,194 @@ class PapiExecutor(object):
:returns: CLI output.
:rtype: str
"""
-
cli = 'cli_inband'
args = dict(cmd=cmd)
err_msg = "Failed to run 'cli_inband {cmd}' PAPI command on host " \
"{host}".format(host=node['host'], cmd=cmd)
-
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cli, **args).get_reply(err_msg)["reply"]
-
if log:
logger.info("{cmd}:\n{reply}".format(cmd=cmd, reply=reply))
-
return reply
@staticmethod
+ def dump_and_log(node, cmds):
+ """Dump and log requested information, return None.
+
+ :param node: DUT node.
+ :param cmds: Dump commands to be executed.
+ :type node: dict
+ :type cmds: list of str
+ """
+ with PapiSocketExecutor(node) as papi_exec:
+ for cmd in cmds:
+ dump = papi_exec.add(cmd).get_details()
+ logger.debug("{cmd}:\n{data}".format(
+ cmd=cmd, data=pformat(dump)))
+
+ def _execute(self, err_msg="Undefined error message"):
+ """Turn internal command list into data and execute; return replies.
+
+ This method also clears the internal command list.
+
+ IMPORTANT!
+ Do not use this method in L1 keywords. Use:
+ - get_replies()
+ - get_reply()
+ - get_sw_if_index()
+ - get_details()
+
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :type err_msg: str
+ :returns: Papi responses parsed into a dict-like object,
+ with fields due to API (possibly including retval).
+ :rtype: list of dict
+ :raises RuntimeError: If the replies are not all correct.
+ """
+ vpp_instance = self.vpp_instance
+ local_list = self._api_command_list
+ # Clear first as execution may fail.
+ self._api_command_list = list()
+ replies = list()
+ for command in local_list:
+ api_name = command["api_name"]
+ papi_fn = getattr(vpp_instance.api, api_name)
+ try:
+ reply = papi_fn(**command["api_args"])
+ except IOError as err:
+ # Ocassionally an error happens, try reconnect.
+ logger.warn("Reconnect after error: {err!r}".format(err=err))
+ self.vpp_instance.disconnect()
+ # Testing showes immediate reconnect fails.
+ time.sleep(1)
+ self.vpp_instance.connect_sync("csit_socket")
+ logger.trace("Reconnected.")
+ reply = papi_fn(**command["api_args"])
+ # *_dump commands return list of objects, convert, ordinary reply.
+ if not isinstance(reply, list):
+ reply = [reply]
+ for item in reply:
+ dict_item = dictize(item)
+ if "retval" in dict_item.keys():
+ # *_details messages do not contain retval.
+ retval = dict_item["retval"]
+ if retval != 0:
+ # TODO: What exactly to log and raise here?
+ err = AssertionError("Retval {rv!r}".format(rv=retval))
+ # Lowering log level, some retval!=0 calls are expected.
+ # TODO: Expose level argument so callers can decide?
+ raise_from(AssertionError(err_msg), err, level="DEBUG")
+ replies.append(dict_item)
+ return replies
+
+
+class PapiExecutor(object):
+ """Contains methods for executing VPP Python API commands on DUTs.
+
+ TODO: Remove .add step, make get_stats accept paths directly.
+
+ This class processes only one type of VPP PAPI methods: vpp-stats.
+
+ The recommended ways of use are (examples):
+
+ path = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
+ with PapiExecutor(node) as papi_exec:
+ stats = papi_exec.add(api_name='vpp-stats', path=path).get_stats()
+
+ print('RX interface core 0, sw_if_index 0:\n{0}'.\
+ format(stats[0]['/if/rx'][0][0]))
+
+ or
+
+ path_1 = ['^/if', ]
+ path_2 = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
+ with PapiExecutor(node) as papi_exec:
+ stats = papi_exec.add('vpp-stats', path=path_1).\
+ add('vpp-stats', path=path_2).get_stats()
+
+ print('RX interface core 0, sw_if_index 0:\n{0}'.\
+ format(stats[1]['/if/rx'][0][0]))
+
+ Note: In this case, when PapiExecutor method 'add' is used:
+ - its parameter 'csit_papi_command' is used only to keep information
+ that vpp-stats are requested. It is not further processed but it is
+ included in the PAPI history this way:
+ vpp-stats(path=['^/if', '/err/ip4-input', '/sys/node/ip4-input'])
+ Always use csit_papi_command="vpp-stats" if the VPP PAPI method
+ is "stats".
+ - the second parameter must be 'path' as it is used by PapiExecutor
+ method 'add'.
+ """
+
+ def __init__(self, node):
+ """Initialization.
+
+ :param node: Node to run command(s) on.
+ :type node: dict
+ """
+
+ # Node to run command(s) on.
+ self._node = node
+
+ # The list of PAPI commands to be executed on the node.
+ self._api_command_list = list()
+
+ self._ssh = SSH()
+
+ def __enter__(self):
+ try:
+ self._ssh.connect(self._node)
+ except IOError:
+ raise RuntimeError("Cannot open SSH connection to host {host} to "
+ "execute PAPI command(s)".
+ format(host=self._node["host"]))
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._ssh.disconnect(self._node)
+
+ def add(self, csit_papi_command="vpp-stats", history=True, **kwargs):
+ """Add next command to internal command list; return self.
+
+ The argument name 'csit_papi_command' must be unique enough as it cannot
+ be repeated in kwargs.
+
+ :param csit_papi_command: VPP API command.
+ :param history: Enable/disable adding command to PAPI command history.
+ :param kwargs: Optional key-value arguments.
+ :type csit_papi_command: str
+ :type history: bool
+ :type kwargs: dict
+ :returns: self, so that method chaining is possible.
+ :rtype: PapiExecutor
+ """
+ if history:
+ PapiHistory.add_to_papi_history(
+ self._node, csit_papi_command, **kwargs)
+ self._api_command_list.append(dict(api_name=csit_papi_command,
+ api_args=kwargs))
+ return self
+
+ def get_stats(self, err_msg="Failed to get statistics.", timeout=120):
+ """Get VPP Stats from VPP Python API.
+
+ :param err_msg: The message used if the PAPI command(s) execution fails.
+ :param timeout: Timeout in seconds.
+ :type err_msg: str
+ :type timeout: int
+ :returns: Requested VPP statistics.
+ :rtype: list of dict
+ """
+
+ paths = [cmd['api_args']['path'] for cmd in self._api_command_list]
+ self._api_command_list = list()
+
+ stdout = self._execute_papi(
+ paths, method='stats', err_msg=err_msg, timeout=timeout)
+
+ return json.loads(stdout)
+
+ @staticmethod
def _process_api_data(api_d):
"""Process API data for smooth converting to JSON string.
@@ -325,62 +642,6 @@ class PapiExecutor(object):
api_args=api_args_processed))
return api_data_processed
- @staticmethod
- def _revert_api_reply(api_r):
- """Process API reply / a part of API reply.
-
- Apply binascii.unhexlify() method for unicode values.
-
- TODO: Implement complex solution to process of replies.
-
- :param api_r: API reply.
- :type api_r: dict
- :returns: Processed API reply / a part of API reply.
- :rtype: dict
- """
- def process_value(val):
- """Process value.
-
- :param val: Value to be processed.
- :type val: object
- :returns: Processed value.
- :rtype: dict or str or int
- """
- if isinstance(val, dict):
- for val_k, val_v in val.iteritems():
- val[str(val_k)] = process_value(val_v)
- return val
- elif isinstance(val, list):
- for idx, val_l in enumerate(val):
- val[idx] = process_value(val_l)
- return val
- elif isinstance(val, unicode):
- return binascii.unhexlify(val)
- else:
- return val
-
- reply_dict = dict()
- reply_value = dict()
- for reply_key, reply_v in api_r.iteritems():
- for a_k, a_v in reply_v.iteritems():
- reply_value[a_k] = process_value(a_v)
- reply_dict[reply_key] = reply_value
- return reply_dict
-
- def _process_reply(self, api_reply):
- """Process API reply.
-
- :param api_reply: API reply.
- :type api_reply: dict or list of dict
- :returns: Processed API reply.
- :rtype: list or dict
- """
- if isinstance(api_reply, list):
- reverted_reply = [self._revert_api_reply(a_r) for a_r in api_reply]
- else:
- reverted_reply = self._revert_api_reply(api_reply)
- return reverted_reply
-
def _execute_papi(self, api_data, method='request', err_msg="",
timeout=120):
"""Execute PAPI command(s) on remote node and store the result.
@@ -430,66 +691,3 @@ class PapiExecutor(object):
raise AssertionError(err_msg)
return stdout
-
- def _execute(self, method='request', err_msg="", timeout=120):
- """Turn internal command list into data and execute; return replies.
-
- This method also clears the internal command list.
-
- IMPORTANT!
- Do not use this method in L1 keywords. Use:
- - get_stats()
- - get_replies()
- - get_details()
-
- :param method: VPP Python API method. Supported methods are: 'request',
- 'dump' and 'stats'.
- :param err_msg: The message used if the PAPI command(s) execution fails.
- :param timeout: Timeout in seconds.
- :type method: str
- :type err_msg: str
- :type timeout: int
- :returns: Papi responses parsed into a dict-like object,
- with field due to API or stats hierarchy.
- :rtype: list of dict
- :raises KeyError: If the reply is not correct.
- """
-
- local_list = self._api_command_list
-
- # Clear first as execution may fail.
- self._api_command_list = list()
-
- stdout = self._execute_papi(
- local_list, method=method, err_msg=err_msg, timeout=timeout)
- replies = list()
- try:
- json_data = json.loads(stdout)
- except ValueError as err:
- raise_from(RuntimeError(err_msg), err)
- for data in json_data:
- if method == "request":
- api_reply = self._process_reply(data["api_reply"])
- # api_reply contains single key, *_reply.
- obj = api_reply.values()[0]
- retval = obj["retval"]
- if retval != 0:
- # TODO: What exactly to log and raise here?
- err = AssertionError("Got retval {rv!r}".format(rv=retval))
- raise_from(AssertionError(err_msg), err, level="INFO")
- replies.append(obj)
- elif method == "dump":
- api_reply = self._process_reply(data["api_reply"])
- # api_reply is a list where item contas single key, *_details.
- for item in api_reply:
- obj = item.values()[0]
- replies.append(obj)
- else:
- # TODO: Implement support for stats.
- raise RuntimeError("Unsuported method {method}".format(
- method=method))
-
- # TODO: Make logging optional?
- logger.debug("PAPI replies: {replies}".format(replies=replies))
-
- return replies
diff --git a/resources/libraries/python/ProxyArp.py b/resources/libraries/python/ProxyArp.py
index a0a54fb103..e05bfe3a61 100644
--- a/resources/libraries/python/ProxyArp.py
+++ b/resources/libraries/python/ProxyArp.py
@@ -14,7 +14,7 @@
"""Proxy ARP library"""
from resources.libraries.python.InterfaceUtil import InterfaceUtil
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.VatExecutor import VatTerminal
@@ -53,5 +53,5 @@ class ProxyArp(object):
enable_disable=1)
err_msg = 'Failed to enable proxy ARP on interface {ifc}'.format(
ifc=interface)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
diff --git a/resources/libraries/python/QemuUtils.py b/resources/libraries/python/QemuUtils.py
index 6a63798b1e..0895f95409 100644
--- a/resources/libraries/python/QemuUtils.py
+++ b/resources/libraries/python/QemuUtils.py
@@ -222,6 +222,7 @@ class QemuUtils(object):
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_exec(running)
+ vpp_config.add_socksvr()
vpp_config.add_cpu_main_core('0')
if self._opt.get('smp') > 1:
vpp_config.add_cpu_corelist_workers('1-{smp}'.format(
diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py
index a1e4e7a679..28717dca6a 100644
--- a/resources/libraries/python/SetupFramework.py
+++ b/resources/libraries/python/SetupFramework.py
@@ -54,7 +54,7 @@ def pack_framework_dir():
run(["tar", "--sparse", "--exclude-vcs", "--exclude=output*.xml",
"--exclude=./tmp", "-zcf", file_name, "."],
- check=True, msg="Could not pack testing framework")
+ msg="Could not pack testing framework")
return file_name
diff --git a/resources/libraries/python/TestConfig.py b/resources/libraries/python/TestConfig.py
index 284b2e8cf4..5c441df09e 100644
--- a/resources/libraries/python/TestConfig.py
+++ b/resources/libraries/python/TestConfig.py
@@ -19,7 +19,7 @@ from robot.api import logger
from resources.libraries.python.Constants import Constants
from resources.libraries.python.InterfaceUtil import InterfaceUtil
from resources.libraries.python.IPUtil import IPUtil
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import Topology
from resources.libraries.python.VatExecutor import VatExecutor
@@ -169,7 +169,7 @@ class TestConfig(object):
err_msg = 'Failed to create VXLAN and VLAN interfaces on host {host}'.\
format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
for i in xrange(0, vxlan_count):
try:
src_ip = src_ip_addr_start + i * ip_step
@@ -261,7 +261,7 @@ class TestConfig(object):
err_msg = 'Failed to put VXLAN and VLAN interfaces up on host {host}'. \
format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
for i in xrange(0, vxlan_count):
vxlan_subif_key = Topology.add_new_port(node, 'vxlan_tunnel')
vxlan_subif_name = 'vxlan_tunnel{nr}'.format(nr=i)
@@ -401,7 +401,7 @@ class TestConfig(object):
err_msg = 'Failed to put VXLAN and VLAN interfaces to bridge domain ' \
'on host {host}'.format(host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
for i in xrange(0, vxlan_count):
dst_ip = dst_ip_addr_start + i * ip_step
args1['neighbor']['ip_address'] = str(dst_ip)
diff --git a/resources/libraries/python/Trace.py b/resources/libraries/python/Trace.py
index 6e3ac2a449..5f885d6c60 100644
--- a/resources/libraries/python/Trace.py
+++ b/resources/libraries/python/Trace.py
@@ -13,7 +13,7 @@
"""Packet trace library."""
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType
@@ -34,8 +34,8 @@ class Trace(object):
for node in nodes.values():
if node['type'] == NodeType.DUT:
- PapiExecutor.run_cli_cmd(node, cmd="show trace {max}".
- format(max=maximum))
+ PapiSocketExecutor.run_cli_cmd(
+ node, cmd="show trace {max}".format(max=maximum))
@staticmethod
def clear_packet_trace_on_all_duts(nodes):
@@ -46,4 +46,4 @@ class Trace(object):
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
- PapiExecutor.run_cli_cmd(node, cmd="clear trace")
+ PapiSocketExecutor.run_cli_cmd(node, cmd="clear trace")
diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py
index 676671f15e..1ae9ca6a40 100644
--- a/resources/libraries/python/VPPUtil.py
+++ b/resources/libraries/python/VPPUtil.py
@@ -13,14 +13,12 @@
"""VPP util library."""
-import binascii
-
from robot.api import logger
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.L2Util import L2Util
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.ssh import exec_cmd_no_error
from resources.libraries.python.topology import NodeType
@@ -136,6 +134,7 @@ class VPPUtil(object):
VPPUtil.verify_vpp_started(node)
# Verify responsivness of PAPI.
VPPUtil.show_log(node)
+ VPPUtil.vpp_show_version(node)
finally:
DUTSetup.get_service_logs(node, Constants.VPP_UNIT)
@@ -162,7 +161,7 @@ class VPPUtil(object):
:returns: VPP version.
:rtype: str
"""
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add('show_version').get_reply()
return_version = reply['version'].rstrip('\0x00')
version = 'VPP version: {ver}\n'.format(ver=return_version)
@@ -197,7 +196,7 @@ class VPPUtil(object):
args = dict(name_filter_valid=0, name_filter='')
err_msg = 'Failed to get interface dump on host {host}'.format(
host=node['host'])
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for if_dump in details:
@@ -226,7 +225,7 @@ class VPPUtil(object):
for cmd in cmds:
try:
- PapiExecutor.run_cli_cmd(node, cmd)
+ PapiSocketExecutor.run_cli_cmd(node, cmd)
except AssertionError:
if fail_on_error:
raise
@@ -252,7 +251,7 @@ class VPPUtil(object):
:param node: DUT node to set up.
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, "elog trace api cli barrier")
+ PapiSocketExecutor.run_cli_cmd(node, "elog trace api cli barrier")
@staticmethod
def vpp_enable_elog_traces_on_all_duts(nodes):
@@ -272,7 +271,7 @@ class VPPUtil(object):
:param node: DUT node to show traces on.
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, "show event-logger")
+ PapiSocketExecutor.run_cli_cmd(node, "show event-logger")
@staticmethod
def show_event_logger_on_all_duts(nodes):
@@ -294,7 +293,7 @@ class VPPUtil(object):
:returns: VPP log data.
:rtype: list
"""
- return PapiExecutor.run_cli_cmd(node, "show log")
+ return PapiSocketExecutor.run_cli_cmd(node, "show log")
@staticmethod
def vpp_show_threads(node):
@@ -305,7 +304,7 @@ class VPPUtil(object):
:returns: VPP thread data.
:rtype: list
"""
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add('show_threads').get_reply()
threads_data = list()
diff --git a/resources/libraries/python/VhostUser.py b/resources/libraries/python/VhostUser.py
index 916d0829ee..a24bc97633 100644
--- a/resources/libraries/python/VhostUser.py
+++ b/resources/libraries/python/VhostUser.py
@@ -15,7 +15,7 @@
from robot.api import logger
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.InterfaceUtil import InterfaceUtil
@@ -34,7 +34,7 @@ class VhostUser(object):
:rtype: list
"""
cmd = "sw_interface_vhost_user_dump"
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details()
for vhost in details:
@@ -62,7 +62,7 @@ class VhostUser(object):
args = dict(
sock_filename=str(socket)
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
# Update the Topology:
diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py
index e92d674507..7ee3e2ceb0 100644
--- a/resources/libraries/python/VppConfigGenerator.py
+++ b/resources/libraries/python/VppConfigGenerator.py
@@ -11,7 +11,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""VPP Configuration File Generator library."""
+"""VPP Configuration File Generator library.
+
+TODO: Support initialization with default values,
+so that we do not need to have block of 6 "Add Unix" commands
+in 7 various places of CSIT code.
+"""
import re
@@ -191,6 +196,11 @@ class VppConfigGenerator(object):
path = ['unix', 'exec']
self.add_config_item(self._nodeconfig, value, path)
+ def add_socksvr(self, socket="default"):
+ """Add socksvr configuration."""
+ path = ['socksvr', socket]
+ self.add_config_item(self._nodeconfig, '', path)
+
def add_api_segment_gid(self, value='vpp'):
"""Add API-SEGMENT gid configuration.
diff --git a/resources/libraries/python/VppCounters.py b/resources/libraries/python/VppCounters.py
index dd1553538d..03b40b7a6b 100644
--- a/resources/libraries/python/VppCounters.py
+++ b/resources/libraries/python/VppCounters.py
@@ -19,6 +19,7 @@ from pprint import pformat
from robot.api import logger
from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
from resources.libraries.python.topology import NodeType, Topology
@@ -46,7 +47,7 @@ class VppCounters(object):
:param node: Node to run command on.
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, 'show errors')
+ PapiSocketExecutor.run_cli_cmd(node, 'show errors')
@staticmethod
def vpp_show_errors_verbose(node):
@@ -55,7 +56,7 @@ class VppCounters(object):
:param node: Node to run command on.
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, 'show errors verbose')
+ PapiSocketExecutor.run_cli_cmd(node, 'show errors verbose')
@staticmethod
def vpp_show_errors_on_all_duts(nodes, verbose=False):
@@ -161,7 +162,7 @@ class VppCounters(object):
:param node: Node to run command on.
:type node: dict
"""
- PapiExecutor.run_cli_cmd(node, 'show hardware detail')
+ PapiSocketExecutor.run_cli_cmd(node, 'show hardware detail')
@staticmethod
def vpp_clear_runtime(node):
@@ -172,7 +173,7 @@ class VppCounters(object):
:returns: Verified data from PAPI response.
:rtype: dict
"""
- return PapiExecutor.run_cli_cmd(node, 'clear runtime', log=False)
+ return PapiSocketExecutor.run_cli_cmd(node, 'clear runtime', log=False)
@staticmethod
def clear_runtime_counters_on_all_duts(nodes):
@@ -194,7 +195,8 @@ class VppCounters(object):
:returns: Verified data from PAPI response.
:rtype: dict
"""
- return PapiExecutor.run_cli_cmd(node, 'clear interfaces', log=False)
+ return PapiSocketExecutor.run_cli_cmd(
+ node, 'clear interfaces', log=False)
@staticmethod
def clear_interface_counters_on_all_duts(nodes):
@@ -216,7 +218,7 @@ class VppCounters(object):
:returns: Verified data from PAPI response.
:rtype: dict
"""
- return PapiExecutor.run_cli_cmd(node, 'clear hardware', log=False)
+ return PapiSocketExecutor.run_cli_cmd(node, 'clear hardware', log=False)
@staticmethod
def clear_hardware_counters_on_all_duts(nodes):
@@ -238,7 +240,7 @@ class VppCounters(object):
:returns: Verified data from PAPI response.
:rtype: dict
"""
- return PapiExecutor.run_cli_cmd(node, 'clear errors', log=False)
+ return PapiSocketExecutor.run_cli_cmd(node, 'clear errors', log=False)
@staticmethod
def clear_error_counters_on_all_duts(nodes):
diff --git a/resources/libraries/python/telemetry/SPAN.py b/resources/libraries/python/telemetry/SPAN.py
index 2033525f55..268a268a0d 100644
--- a/resources/libraries/python/telemetry/SPAN.py
+++ b/resources/libraries/python/telemetry/SPAN.py
@@ -14,7 +14,7 @@
"""SPAN setup library"""
from resources.libraries.python.topology import Topology
-from resources.libraries.python.PapiExecutor import PapiExecutor
+from resources.libraries.python.PapiExecutor import PapiSocketExecutor
class SPAN(object):
@@ -41,7 +41,7 @@ class SPAN(object):
args = dict(
is_l2=1 if is_l2 else 0
)
- with PapiExecutor(node) as papi_exec:
+ with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details()
return details
diff --git a/resources/libraries/robot/honeycomb/performance.robot b/resources/libraries/robot/honeycomb/performance.robot
index bd906e1742..a4d8d101e7 100644
--- a/resources/libraries/robot/honeycomb/performance.robot
+++ b/resources/libraries/robot/honeycomb/performance.robot
@@ -77,6 +77,7 @@
| | Run keyword | VPP_config.Add Unix Log
| | Run keyword | VPP_config.Add Unix CLI Listen
| | Run keyword | VPP_config.Add Unix Nodaemon
+| | Run keyword | VPP_config.Add Socksvr
| | Run keyword | VPP_config.Add CPU Main Core | ${1}
| | Run keyword | VPP_config.Apply Config
diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot
index 89609f563b..0bacb53a5d 100644
--- a/resources/libraries/robot/shared/default.robot
+++ b/resources/libraries/robot/shared/default.robot
@@ -114,6 +114,7 @@
| | | Run keyword | ${dut}.Add Unix CLI Listen
| | | Run keyword | ${dut}.Add Unix Nodaemon
| | | Run keyword | ${dut}.Add Unix Coredump
+| | | Run keyword | ${dut}.Add Socksvr
| | | Run keyword | ${dut}.Add DPDK No Tx Checksum Offload
| | | Run keyword | ${dut}.Add DPDK Log Level | debug
| | | Run keyword | ${dut}.Add DPDK Uio Driver
@@ -429,7 +430,7 @@
| | ... | ${thr_count_int}
| Write startup configuration on all VPP DUTs
-| | [Documentation] | Write VPP startup configuration on all DUTs.
+| | [Documentation] | Write VPP startup configuration without restarting VPP.
| | ...
| | :FOR | ${dut} | IN | @{duts}
| | | Run keyword | ${dut}.Write Config